aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2022-07-27 20:11:54 +0000
committerDimitry Andric <dim@FreeBSD.org>2023-02-08 19:04:48 +0000
commit972a253a57b6f144b0e4a3e2080a2a0076ec55a0 (patch)
treea8aeeb0997a0a52500f1fa0644244206cf71df94
parentfcaf7f8644a9988098ac6be2165bce3ea4786e91 (diff)
parent08e8dd7b9db7bb4a9de26d44c1cbfd24e869c014 (diff)
downloadsrc-972a253a57b6f144b0e4a3e2080a2a0076ec55a0.tar.gz
src-972a253a57b6f144b0e4a3e2080a2a0076ec55a0.zip
Merge llvm-project main llvmorg-15-init-17826-g1f8ae9d7e7e4
This updates llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and openmp to llvmorg-15-init-17826-g1f8ae9d7e7e4, the last commit before the upstream release/16.x branch was created. PR: 265425 MFC after: 2 weeks
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/CFG.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/ConstructionContext.h72
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h15
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MapLattice.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Transfer.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h11
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Value.h59
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td6
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h24
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td6
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td8
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/LangOptions.def1
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def3
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td103
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/ModuleBuilder.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Driver.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Options.td4
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/Utils.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/PreprocessingRecord.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Parse/Parser.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Overload.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/RISCVIntrinsicManager.h36
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Scope.h38
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Sema.h14
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Template.h34
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h19
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Support/RISCVVIntrinsicUtils.h100
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRuleRequirements.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRules.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h4
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTImporter.cpp52
-rw-r--r--contrib/llvm-project/clang/lib/AST/Decl.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprConstant.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/CFG.cpp45
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ConstructionContext.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp71
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp48
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DebugSupport.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp48
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp110
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/LiveVariables.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/X86.h4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h23
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp39
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h11
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h1
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Edit/EditedSource.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp159
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdatomic.h3
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h1
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp36
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp594
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp52
-rw-r--r--contrib/llvm-project/clang/lib/Parse/Parser.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Sema/Scope.cpp82
-rw-r--r--contrib/llvm-project/clang/lib/Sema/Sema.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp37
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp45
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp395
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp55
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp100
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp23
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp91
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp124
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp28
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp69
-rw-r--r--contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp88
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp417
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/TableGen.cpp6
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/int_types.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.inc4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp63
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h40
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp44
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_shadow.h10
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/copy.h16
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/copy_backward.h42
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/equal_range.h1
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/includes.h19
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/inplace_merge.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/is_heap.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/is_heap_until.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/ranges_copy_backward.h7
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/ranges_generate.h24
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/ranges_generate_n.h14
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/ranges_includes.h8
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/ranges_is_heap.h23
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/ranges_is_heap_until.h20
-rw-r--r--contrib/llvm-project/libcxx/include/__assert6
-rw-r--r--contrib/llvm-project/libcxx/include/__concepts/arithmetic.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__format/formatter_integer.h1
-rw-r--r--contrib/llvm-project/libcxx/include/__functional/invoke.h11
-rw-r--r--contrib/llvm-project/libcxx/include/__hash_table1
-rw-r--r--contrib/llvm-project/libcxx/include/__iterator/incrementable_traits.h1
-rw-r--r--contrib/llvm-project/libcxx/include/__iterator/iterator_traits.h6
-rw-r--r--contrib/llvm-project/libcxx/include/__iterator/reverse_iterator.h184
-rw-r--r--contrib/llvm-project/libcxx/include/__memory/swap_allocator.h53
-rw-r--r--contrib/llvm-project/libcxx/include/__memory/temporary_buffer.h1
-rw-r--r--contrib/llvm-project/libcxx/include/__memory/uninitialized_algorithms.h144
-rw-r--r--contrib/llvm-project/libcxx/include/__split_buffer1
-rw-r--r--contrib/llvm-project/libcxx/include/__tree1
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/aligned_storage.h142
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/aligned_union.h55
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/common_reference.h188
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/common_type.h138
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/copy_cv.h54
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/copy_cvref.h46
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/is_nothrow_convertible.h53
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/is_primary_template.h34
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/is_signed_integer.h33
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/is_unsigned_integer.h33
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/is_valid_expansion.h31
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/lazy.h25
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/make_32_64_or_128_bit.h48
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/make_signed.h76
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/make_unsigned.h89
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/nat.h32
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/promote.h95
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/remove_cvref.h41
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/type_list.h44
-rw-r--r--contrib/llvm-project/libcxx/include/__utility/transaction.h5
-rw-r--r--contrib/llvm-project/libcxx/include/algorithm33
-rw-r--r--contrib/llvm-project/libcxx/include/charconv1
-rw-r--r--contrib/llvm-project/libcxx/include/forward_list1
-rw-r--r--contrib/llvm-project/libcxx/include/list1
-rw-r--r--contrib/llvm-project/libcxx/include/math.h1
-rw-r--r--contrib/llvm-project/libcxx/include/memory118
-rw-r--r--contrib/llvm-project/libcxx/include/module.modulemap.in20
-rw-r--r--contrib/llvm-project/libcxx/include/string1
-rw-r--r--contrib/llvm-project/libcxx/include/type_traits787
-rw-r--r--contrib/llvm-project/libcxx/include/vector19
-rw-r--r--contrib/llvm-project/libcxx/src/assert.cpp47
-rw-r--r--contrib/llvm-project/lld/ELF/Driver.cpp15
-rw-r--r--contrib/llvm-project/lld/ELF/DriverUtils.cpp20
-rw-r--r--contrib/llvm-project/lld/ELF/InputFiles.cpp4
-rw-r--r--contrib/llvm-project/lld/ELF/LinkerScript.cpp12
-rw-r--r--contrib/llvm-project/lld/ELF/Options.td14
-rw-r--r--contrib/llvm-project/lld/ELF/ScriptParser.cpp7
-rw-r--r--contrib/llvm-project/lld/ELF/SyntheticSections.cpp12
-rw-r--r--contrib/llvm-project/lld/ELF/Writer.cpp30
-rw-r--r--contrib/llvm-project/lld/MachO/Driver.cpp20
-rw-r--r--contrib/llvm-project/lld/MachO/DriverUtils.cpp1
-rw-r--r--contrib/llvm-project/lld/MachO/InputFiles.cpp60
-rw-r--r--contrib/llvm-project/lld/MachO/InputFiles.h12
-rw-r--r--contrib/llvm-project/lld/MachO/InputSection.cpp2
-rw-r--r--contrib/llvm-project/lld/MachO/Options.td10
-rw-r--r--contrib/llvm-project/lld/MachO/SyntheticSections.h6
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Core/Disassembler.h8
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/MemoryTagManager.h15
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/Process.h4
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/TraceCursor.h36
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/TraceDumper.h7
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Utility/TraceIntelPTGDBRemotePackets.h4
-rw-r--r--contrib/llvm-project/lldb/include/lldb/lldb-enumerations.h8
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectDisassemble.cpp3
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectExpression.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectMemory.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Commands/CommandObjectThread.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Commands/Options.td10
-rw-r--r--contrib/llvm-project/lldb/source/Core/Disassembler.cpp385
-rw-r--r--contrib/llvm-project/lldb/source/Host/common/Host.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Disassembler/LLVMC/DisassemblerLLVMC.cpp331
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.cpp10
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp5
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Instruction/ARM/EmulateInstructionARM.cpp4
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Instruction/ARM/EmulationStateARM.cpp85
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Instruction/ARM/EmulationStateARM.h7
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/ObjectFile/Minidump/MinidumpFileBuilder.cpp4
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.cpp65
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.h6
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/Utility/RegisterContextDarwin_arm64.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/Utility/ThreadMemory.cpp3
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/elf-core/ProcessElfCore.cpp72
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/elf-core/ProcessElfCore.h14
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.cpp18
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.h2
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp68
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/minidump/ProcessMinidump.cpp3
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/PdbUtil.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/DecodedThread.cpp198
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/DecodedThread.h180
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/LibiptDecoder.cpp57
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/LibiptDecoder.h10
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/ThreadDecoder.cpp19
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/ThreadDecoder.h6
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceCursorIntelPT.cpp89
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceCursorIntelPT.h46
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPT.cpp50
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPT.h9
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTMultiCpuDecoder.cpp26
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTMultiCpuDecoder.h6
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/UnwindAssembly/x86/x86AssemblyInspectionEngine.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Symbol/Type.cpp4
-rw-r--r--contrib/llvm-project/lldb/source/Target/Process.cpp15
-rw-r--r--contrib/llvm-project/lldb/source/Target/RegisterContextUnwind.cpp4
-rw-r--r--contrib/llvm-project/lldb/source/Target/StackFrame.cpp42
-rw-r--r--contrib/llvm-project/lldb/source/Target/ThreadPlanCallFunction.cpp10
-rw-r--r--contrib/llvm-project/lldb/source/Target/ThreadPlanTracer.cpp4
-rw-r--r--contrib/llvm-project/lldb/source/Target/TraceCursor.cpp2
-rw-r--r--contrib/llvm-project/lldb/source/Target/TraceDumper.cpp58
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/DenseMap.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/Optional.h7
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/DDG.h23
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/MemoryBuiltins.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfo.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h22
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CSEInfo.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/LiveIntervals.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAGNodes.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/CodeGen/TargetInstrInfo.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DWARFLinker/DWARFLinker.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h11
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/x86_64.h23
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/Instructions.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/IntrinsicInst.h11
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/Intrinsics.td6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/ModuleSummaryIndex.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/PrintPasses.h15
-rw-r--r--contrib/llvm-project/llvm/include/llvm/LTO/legacy/LTOCodeGenerator.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/MC/MCDisassembler/MCDisassembler.h23
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Passes/StandardInstrumentations.h10
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfReader.h37
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfWriter.h45
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/BinaryByteStream.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/DXILOperationCommon.h63
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/Error.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/MathExtras.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/raw_ostream.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/IPO.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/IPO/Attributor.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/IPO/WholeProgramDevirt.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/MatrixUtils.h47
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/MemoryOpRemark.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/CodeMetrics.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/InlineCost.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/LoopAccessAnalysis.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/MemoryBuiltins.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/PHITransAddr.cpp17
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp26
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/TypeMetadataUtils.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/ValueTracking.cpp43
-rw-r--r--contrib/llvm-project/llvm/lib/Bitcode/Reader/BitcodeReader.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/WasmException.h2
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AtomicExpandPass.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp10
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/LiveRangeEdit.cpp16
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/MachineFunctionPass.cpp29
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/ProcessImplicitDefs.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.cpp19
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.h2
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp90
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp41
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h2
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp17
-rw-r--r--contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinker.cpp53
-rw-r--r--contrib/llvm-project/llvm/lib/DWP/DWP.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp124
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.h10
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFF_x86_64.cpp179
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/SEHFrameSupport.h61
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectFileInterface.cpp38
-rw-r--r--contrib/llvm-project/llvm/lib/FileCheck/FileCheck.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Instructions.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/IR/IntrinsicInst.cpp33
-rw-r--r--contrib/llvm-project/llvm/lib/IR/ModuleSummaryIndex.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/IR/PrintPasses.cpp44
-rw-r--r--contrib/llvm-project/llvm/lib/LTO/LTO.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/LTO/LTOBackend.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/LTO/LTOCodeGenerator.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/LTO/ThinLTOCodeGenerator.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/MC/ELFObjectWriter.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/MC/MCDisassembler/MCDisassembler.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/MC/XCOFFObjectWriter.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/ObjCopy/ELF/ELFObject.cpp52
-rw-r--r--contrib/llvm-project/llvm/lib/ObjCopy/ELF/ELFObject.h14
-rw-r--r--contrib/llvm-project/llvm/lib/ObjectYAML/ELFYAML.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Passes/PassBuilderPipelines.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Passes/StandardInstrumentations.cpp58
-rw-r--r--contrib/llvm-project/llvm/lib/Support/ARMAttributeParser.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp144
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td6
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td32
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.h3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/SVEInstrFormats.td12
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp212
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUMIRFormatter.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp826
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h233
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp16
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/R600ISelLowering.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.h8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIRegisterInfo.td56
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/VOPCInstructions.td2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp30
-rw-r--r--contrib/llvm-project/llvm/lib/Target/DirectX/DXILOpBuilder.cpp324
-rw-r--r--contrib/llvm-project/llvm/lib/Target/DirectX/DXILOpBuilder.h46
-rw-r--r--contrib/llvm-project/llvm/lib/Target/DirectX/DXILOpLowering.cpp167
-rw-r--r--contrib/llvm-project/llvm/lib/Target/DirectX/DXILWriter/DXILValueEnumerator.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLowering.h3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp46
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/Disassembler/LoongArchDisassembler.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArch.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchFrameLowering.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchISelLowering.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchRegisterInfo.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchSubtarget.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchELFObjectWriter.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchInstPrinter.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCAsmInfo.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCCodeEmitter.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCTargetDesc.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCTargetDesc.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMatInt.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/LoongArch/TargetInfo/LoongArchTargetInfo.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVCodeGenPrepare.cpp15
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp53
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.h21
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoC.td56
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetMachine.h3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h34
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp30
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp84
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp55
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp28
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionAttrs.cpp61
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/GlobalOpt.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/LowerTypeTests.cpp61
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/OpenMPOpt.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/SCCP.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp35
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineInternal.h7
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp25
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Scalar/Reassociate.cpp24
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/InlineFunction.cpp59
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/MatrixUtils.cpp42
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp137
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp4
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-dwarfutil/DebugInfoLinker.cpp49
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-dwarfutil/DebugInfoLinker.h4
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-dwarfutil/llvm-dwarfutil.cpp22
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-lto/llvm-lto.cpp7
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-mca/CodeRegionGenerator.cpp4
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp27
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOpts.td11
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.cpp152
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-profdata/llvm-profdata.cpp3
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-readobj/ELFDumper.cpp15
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-xray/xray-graph.cpp9
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/DXILEmitter.cpp107
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDagPredicate.h6
-rw-r--r--lib/clang/include/VCSVersion.inc6
-rw-r--r--lib/clang/include/clang/Config/config.h2
-rw-r--r--lib/clang/include/lld/Common/Version.inc2
-rw-r--r--lib/clang/include/lldb/Version/Version.inc4
-rw-r--r--lib/clang/include/llvm/Config/config.h4
-rw-r--r--lib/clang/include/llvm/Config/llvm-config.h2
-rw-r--r--lib/clang/include/llvm/Support/VCSRevision.h2
-rw-r--r--lib/clang/libclang/Makefile9
-rw-r--r--lib/clang/liblldb/LLDBWrapLua.cpp2
-rw-r--r--lib/libc++/Makefile20
438 files changed, 9684 insertions, 4700 deletions
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/CFG.h b/contrib/llvm-project/clang/include/clang/Analysis/CFG.h
index d8e7e1e43d81..4f16a6361950 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/CFG.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/CFG.h
@@ -202,7 +202,8 @@ public:
isa<ReturnedValueConstructionContext>(C) ||
isa<VariableConstructionContext>(C) ||
isa<ConstructorInitializerConstructionContext>(C) ||
- isa<ArgumentConstructionContext>(C)));
+ isa<ArgumentConstructionContext>(C) ||
+ isa<LambdaCaptureConstructionContext>(C)));
Data2.setPointer(const_cast<ConstructionContext *>(C));
}
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/ConstructionContext.h b/contrib/llvm-project/clang/include/clang/Analysis/ConstructionContext.h
index a437160e0778..67a091199b91 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/ConstructionContext.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/ConstructionContext.h
@@ -36,13 +36,14 @@ public:
ElidedDestructorKind,
ElidableConstructorKind,
ArgumentKind,
- STATEMENT_WITH_INDEX_KIND_BEGIN=ArgumentKind,
- STATEMENT_WITH_INDEX_KIND_END=ArgumentKind,
+ LambdaCaptureKind,
+ STATEMENT_WITH_INDEX_KIND_BEGIN = ArgumentKind,
+ STATEMENT_WITH_INDEX_KIND_END = LambdaCaptureKind,
STATEMENT_KIND_BEGIN = VariableKind,
- STATEMENT_KIND_END = ArgumentKind,
+ STATEMENT_KIND_END = LambdaCaptureKind,
InitializerKind,
- INITIALIZER_KIND_BEGIN=InitializerKind,
- INITIALIZER_KIND_END=InitializerKind
+ INITIALIZER_KIND_BEGIN = InitializerKind,
+ INITIALIZER_KIND_END = InitializerKind
};
LLVM_DUMP_METHOD static StringRef getKindAsString(ItemKind K) {
@@ -55,6 +56,8 @@ public:
case ElidedDestructorKind: return "elide destructor";
case ElidableConstructorKind: return "elide constructor";
case ArgumentKind: return "construct into argument";
+ case LambdaCaptureKind:
+ return "construct into lambda captured variable";
case InitializerKind: return "construct into member variable";
};
llvm_unreachable("Unknown ItemKind");
@@ -72,7 +75,7 @@ private:
bool hasIndex() const {
return Kind >= STATEMENT_WITH_INDEX_KIND_BEGIN &&
- Kind >= STATEMENT_WITH_INDEX_KIND_END;
+ Kind <= STATEMENT_WITH_INDEX_KIND_END;
}
bool hasInitializer() const {
@@ -127,6 +130,9 @@ public:
ConstructionContextItem(const CXXCtorInitializer *Init)
: Data(Init), Kind(InitializerKind), Index(0) {}
+ ConstructionContextItem(const LambdaExpr *LE, unsigned Index)
+ : Data(LE), Kind(LambdaCaptureKind), Index(Index) {}
+
ItemKind getKind() const { return Kind; }
LLVM_DUMP_METHOD StringRef getKindAsString() const {
@@ -254,7 +260,8 @@ public:
CXX17ElidedCopyReturnedValueKind,
RETURNED_VALUE_BEGIN = SimpleReturnedValueKind,
RETURNED_VALUE_END = CXX17ElidedCopyReturnedValueKind,
- ArgumentKind
+ ArgumentKind,
+ LambdaCaptureKind
};
protected:
@@ -298,6 +305,11 @@ public:
const ConstructionContextLayer *TopLayer);
Kind getKind() const { return K; }
+
+ virtual const ArrayInitLoopExpr *getArrayInitLoop() const { return nullptr; }
+
+ // Only declared to silence -Wnon-virtual-dtor warnings.
+ virtual ~ConstructionContext() = default;
};
/// An abstract base class for local variable constructors.
@@ -314,6 +326,12 @@ protected:
public:
const DeclStmt *getDeclStmt() const { return DS; }
+ const ArrayInitLoopExpr *getArrayInitLoop() const override {
+ const auto *Var = cast<VarDecl>(DS->getSingleDecl());
+
+ return dyn_cast<ArrayInitLoopExpr>(Var->getInit());
+ }
+
static bool classof(const ConstructionContext *CC) {
return CC->getKind() >= VARIABLE_BEGIN &&
CC->getKind() <= VARIABLE_END;
@@ -381,6 +399,10 @@ protected:
public:
const CXXCtorInitializer *getCXXCtorInitializer() const { return I; }
+ const ArrayInitLoopExpr *getArrayInitLoop() const override {
+ return dyn_cast<ArrayInitLoopExpr>(I->getInit());
+ }
+
static bool classof(const ConstructionContext *CC) {
return CC->getKind() >= INITIALIZER_BEGIN &&
CC->getKind() <= INITIALIZER_END;
@@ -659,6 +681,42 @@ public:
}
};
+class LambdaCaptureConstructionContext : public ConstructionContext {
+ // The lambda of which the initializer we capture.
+ const LambdaExpr *LE;
+
+ // Index of the captured element in the captured list.
+ unsigned Index;
+
+ friend class ConstructionContext; // Allows to create<>() itself.
+
+ explicit LambdaCaptureConstructionContext(const LambdaExpr *LE,
+ unsigned Index)
+ : ConstructionContext(LambdaCaptureKind), LE(LE), Index(Index) {}
+
+public:
+ const LambdaExpr *getLambdaExpr() const { return LE; }
+ unsigned getIndex() const { return Index; }
+
+ const Expr *getInitializer() const {
+ return *(LE->capture_init_begin() + Index);
+ }
+
+ const FieldDecl *getFieldDecl() const {
+ auto It = LE->getLambdaClass()->field_begin();
+ std::advance(It, Index);
+ return *It;
+ }
+
+ const ArrayInitLoopExpr *getArrayInitLoop() const override {
+ return dyn_cast_or_null<ArrayInitLoopExpr>(getInitializer());
+ }
+
+ static bool classof(const ConstructionContext *CC) {
+ return CC->getKind() == LambdaCaptureKind;
+ }
+};
+
} // end namespace clang
#endif // LLVM_CLANG_ANALYSIS_CONSTRUCTIONCONTEXT_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h
index abc3183e1b0b..b3e725ad3f6a 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h
@@ -340,6 +340,10 @@ private:
llvm::DenseMap<std::pair<BoolValue *, BoolValue *>, DisjunctionValue *>
DisjunctionVals;
llvm::DenseMap<BoolValue *, NegationValue *> NegationVals;
+ llvm::DenseMap<std::pair<BoolValue *, BoolValue *>, ImplicationValue *>
+ ImplicationVals;
+ llvm::DenseMap<std::pair<BoolValue *, BoolValue *>, BiconditionalValue *>
+ BiconditionalVals;
// Flow conditions are tracked symbolically: each unique flow condition is
// associated with a fresh symbolic variable (token), bound to the clause that
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
index f17df36f6a4a..2e9c088d0e5c 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
@@ -128,6 +128,21 @@ public:
/// with a symbolic representation of the `this` pointee.
Environment(DataflowAnalysisContext &DACtx, const DeclContext &DeclCtx);
+ /// Creates and returns an environment to use for an inline analysis of the
+ /// callee. Uses the storage location from each argument in the `Call` as the
+ /// storage location for the corresponding parameter in the callee.
+ ///
+ /// Requirements:
+ ///
+ /// The callee of `Call` must be a `FunctionDecl` with a body.
+ ///
+ /// The body of the callee must not reference globals.
+ ///
+ /// The arguments of `Call` must map 1:1 to the callee's parameters.
+ ///
+ /// Each argument of `Call` must already have a `StorageLocation`.
+ Environment pushCall(const CallExpr *Call) const;
+
/// Returns true if and only if the environment is equivalent to `Other`, i.e
/// the two environments:
/// - have the same mappings from declarations to storage locations,
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MapLattice.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MapLattice.h
index 014cd60841ee..16b0c978779a 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MapLattice.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MapLattice.h
@@ -54,10 +54,13 @@ public:
// The `bottom` element is the empty map.
static MapLattice bottom() { return MapLattice(); }
- void insert(const std::pair<const key_type, mapped_type> &P) { C.insert(P); }
+ std::pair<iterator, bool>
+ insert(const std::pair<const key_type, mapped_type> &P) {
+ return C.insert(P);
+ }
- void insert(std::pair<const key_type, mapped_type> &&P) {
- C.insert(std::move(P));
+ std::pair<iterator, bool> insert(std::pair<const key_type, mapped_type> &&P) {
+ return C.insert(std::move(P));
}
unsigned size() const { return C.size(); }
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Transfer.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Transfer.h
index 25afa01f307c..cbb625487c1e 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Transfer.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Transfer.h
@@ -20,6 +20,12 @@
namespace clang {
namespace dataflow {
+struct TransferOptions {
+ /// Determines whether to analyze function bodies when present in the
+ /// translation unit.
+ bool ContextSensitive = false;
+};
+
/// Maps statements to the environments of basic blocks that contain them.
class StmtToEnvMap {
public:
@@ -36,7 +42,8 @@ public:
/// Requirements:
///
/// `S` must not be `ParenExpr` or `ExprWithCleanups`.
-void transfer(const StmtToEnvMap &StmtToEnv, const Stmt &S, Environment &Env);
+void transfer(const StmtToEnvMap &StmtToEnv, const Stmt &S, Environment &Env,
+ TransferOptions Options);
} // namespace dataflow
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h
index b043062459e4..92700f164e7b 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h
@@ -23,6 +23,7 @@
#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
#include "clang/Analysis/FlowSensitive/DataflowLattice.h"
+#include "clang/Analysis/FlowSensitive/Transfer.h"
#include "llvm/ADT/Any.h"
#include "llvm/ADT/Optional.h"
#include "llvm/Support/Error.h"
@@ -36,6 +37,9 @@ struct DataflowAnalysisOptions {
// (at which point the built-in transfer functions can be simply a standalone
// analysis).
bool ApplyBuiltinTransfer = true;
+
+ /// Only has an effect if `ApplyBuiltinTransfer` is true.
+ TransferOptions BuiltinTransferOptions;
};
/// Type-erased lattice element container.
@@ -57,7 +61,7 @@ public:
/// Deprecated. Use the `DataflowAnalysisOptions` constructor instead.
TypeErasedDataflowAnalysis(bool ApplyBuiltinTransfer)
- : Options({ApplyBuiltinTransfer}) {}
+ : Options({ApplyBuiltinTransfer, TransferOptions{}}) {}
TypeErasedDataflowAnalysis(DataflowAnalysisOptions Options)
: Options(Options) {}
@@ -90,6 +94,11 @@ public:
/// Determines whether to apply the built-in transfer functions, which model
/// the heap and stack in the `Environment`.
bool applyBuiltinTransfer() const { return Options.ApplyBuiltinTransfer; }
+
+ /// Returns the options to be passed to the built-in transfer functions.
+ TransferOptions builtinTransferOptions() const {
+ return Options.BuiltinTransferOptions;
+ }
};
/// Type-erased model of the program at a given program point.
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Value.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Value.h
index 70348f874543..c63799fe6a46 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Value.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Value.h
@@ -37,12 +37,13 @@ public:
Pointer,
Struct,
- // Synthetic boolean values are either atomic values or composites that
- // represent conjunctions, disjunctions, and negations.
+ // Synthetic boolean values are either atomic values or logical connectives.
AtomicBool,
Conjunction,
Disjunction,
- Negation
+ Negation,
+ Implication,
+ Biconditional,
};
explicit Value(Kind ValKind) : ValKind(ValKind) {}
@@ -84,7 +85,9 @@ public:
return Val->getKind() == Kind::AtomicBool ||
Val->getKind() == Kind::Conjunction ||
Val->getKind() == Kind::Disjunction ||
- Val->getKind() == Kind::Negation;
+ Val->getKind() == Kind::Negation ||
+ Val->getKind() == Kind::Implication ||
+ Val->getKind() == Kind::Biconditional;
}
};
@@ -162,6 +165,54 @@ private:
BoolValue &SubVal;
};
+/// Models a boolean implication.
+///
+/// Equivalent to `!LHS v RHS`.
+class ImplicationValue : public BoolValue {
+public:
+ explicit ImplicationValue(BoolValue &LeftSubVal, BoolValue &RightSubVal)
+ : BoolValue(Kind::Implication), LeftSubVal(LeftSubVal),
+ RightSubVal(RightSubVal) {}
+
+ static bool classof(const Value *Val) {
+ return Val->getKind() == Kind::Implication;
+ }
+
+ /// Returns the left sub-value of the implication.
+ BoolValue &getLeftSubValue() const { return LeftSubVal; }
+
+ /// Returns the right sub-value of the implication.
+ BoolValue &getRightSubValue() const { return RightSubVal; }
+
+private:
+ BoolValue &LeftSubVal;
+ BoolValue &RightSubVal;
+};
+
+/// Models a boolean biconditional.
+///
+/// Equivalent to `(LHS ^ RHS) v (!LHS ^ !RHS)`.
+class BiconditionalValue : public BoolValue {
+public:
+ explicit BiconditionalValue(BoolValue &LeftSubVal, BoolValue &RightSubVal)
+ : BoolValue(Kind::Biconditional), LeftSubVal(LeftSubVal),
+ RightSubVal(RightSubVal) {}
+
+ static bool classof(const Value *Val) {
+ return Val->getKind() == Kind::Biconditional;
+ }
+
+ /// Returns the left sub-value of the biconditional.
+ BoolValue &getLeftSubValue() const { return LeftSubVal; }
+
+ /// Returns the right sub-value of the biconditional.
+ BoolValue &getRightSubValue() const { return RightSubVal; }
+
+private:
+ BoolValue &LeftSubVal;
+ BoolValue &RightSubVal;
+};
+
/// Models an integer.
class IntegerValue : public Value {
public:
diff --git a/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td b/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td
index aff0dbbdd94d..5c84e2fc5b77 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td
@@ -5040,6 +5040,12 @@ general this requires the template to be declared at least twice. For example:
clang::preferred_name(wstring)]] basic_string {
// ...
};
+
+
+Note that the ``preferred_name`` attribute will be ignored when the compiler
+writes a C++20 Module interface now. This is due to a compiler issue
+(https://github.com/llvm/llvm-project/issues/56490) that blocks users to modularize
+declarations with `preferred_name`. This is intended to be fixed in the future.
}];
}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h
index 709d5e1dc80d..91b180f8004d 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h
@@ -45,18 +45,18 @@ namespace clang {
// Start position for diagnostics.
enum {
DIAG_START_COMMON = 0,
- DIAG_START_DRIVER = DIAG_START_COMMON + DIAG_SIZE_COMMON,
- DIAG_START_FRONTEND = DIAG_START_DRIVER + DIAG_SIZE_DRIVER,
- DIAG_START_SERIALIZATION = DIAG_START_FRONTEND + DIAG_SIZE_FRONTEND,
- DIAG_START_LEX = DIAG_START_SERIALIZATION + DIAG_SIZE_SERIALIZATION,
- DIAG_START_PARSE = DIAG_START_LEX + DIAG_SIZE_LEX,
- DIAG_START_AST = DIAG_START_PARSE + DIAG_SIZE_PARSE,
- DIAG_START_COMMENT = DIAG_START_AST + DIAG_SIZE_AST,
- DIAG_START_CROSSTU = DIAG_START_COMMENT + DIAG_SIZE_COMMENT,
- DIAG_START_SEMA = DIAG_START_CROSSTU + DIAG_SIZE_CROSSTU,
- DIAG_START_ANALYSIS = DIAG_START_SEMA + DIAG_SIZE_SEMA,
- DIAG_START_REFACTORING = DIAG_START_ANALYSIS + DIAG_SIZE_ANALYSIS,
- DIAG_UPPER_LIMIT = DIAG_START_REFACTORING + DIAG_SIZE_REFACTORING
+ DIAG_START_DRIVER = DIAG_START_COMMON + static_cast<int>(DIAG_SIZE_COMMON),
+ DIAG_START_FRONTEND = DIAG_START_DRIVER + static_cast<int>(DIAG_SIZE_DRIVER),
+ DIAG_START_SERIALIZATION = DIAG_START_FRONTEND + static_cast<int>(DIAG_SIZE_FRONTEND),
+ DIAG_START_LEX = DIAG_START_SERIALIZATION + static_cast<int>(DIAG_SIZE_SERIALIZATION),
+ DIAG_START_PARSE = DIAG_START_LEX + static_cast<int>(DIAG_SIZE_LEX),
+ DIAG_START_AST = DIAG_START_PARSE + static_cast<int>(DIAG_SIZE_PARSE),
+ DIAG_START_COMMENT = DIAG_START_AST + static_cast<int>(DIAG_SIZE_AST),
+ DIAG_START_CROSSTU = DIAG_START_COMMENT + static_cast<int>(DIAG_SIZE_COMMENT),
+ DIAG_START_SEMA = DIAG_START_CROSSTU + static_cast<int>(DIAG_SIZE_CROSSTU),
+ DIAG_START_ANALYSIS = DIAG_START_SEMA + static_cast<int>(DIAG_SIZE_SEMA),
+ DIAG_START_REFACTORING = DIAG_START_ANALYSIS + static_cast<int>(DIAG_SIZE_ANALYSIS),
+ DIAG_UPPER_LIMIT = DIAG_START_REFACTORING + static_cast<int>(DIAG_SIZE_REFACTORING)
};
class CustomDiagInfo;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td
index 352a050ba5cf..18adb21e2be0 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td
@@ -282,7 +282,7 @@ def err_inline_nested_namespace_definition : Error<
def err_expected_semi_after_attribute_list : Error<
"expected ';' after attribute list">;
def err_expected_semi_after_static_assert : Error<
- "expected ';' after static_assert">;
+ "expected ';' after '%0'">;
def err_expected_semi_for : Error<"expected ';' in 'for' statement specifier">;
def err_single_decl_assign_in_for_range : Error<
"range-based 'for' statement uses ':', not '='">;
@@ -425,7 +425,7 @@ def err_unexpected_token_in_nested_name_spec : Error<
def err_bool_redeclaration : Error<
"redeclaration of C++ built-in type 'bool'">;
def warn_cxx98_compat_static_assert : Warning<
- "static_assert declarations are incompatible with C++98">,
+ "'static_assert' declarations are incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
def ext_ms_static_assert : ExtWarn<
"use of 'static_assert' without inclusion of <assert.h> is a Microsoft "
@@ -538,6 +538,8 @@ def err_invalid_operator_on_type : Error<
"cannot use %select{dot|arrow}0 operator on a type">;
def err_expected_unqualified_id : Error<
"expected %select{identifier|unqualified-id}0">;
+def err_while_loop_outside_of_a_function : Error<
+ "while loop outside of a function">;
def err_brackets_go_after_unqualified_id : Error<
"brackets are not allowed here; to declare an array, "
"place the brackets after the %select{identifier|name}0">;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 756102720049..6ff5b8de57fd 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -1526,12 +1526,12 @@ def err_messaging_class_with_direct_method : Error<
// C++ declarations
def err_static_assert_expression_is_not_constant : Error<
- "static_assert expression is not an integral constant expression">;
+ "static assertion expression is not an integral constant expression">;
def err_constexpr_if_condition_expression_is_not_constant : Error<
"constexpr if condition is not a constant expression">;
-def err_static_assert_failed : Error<"static_assert failed%select{: %1|}0">;
+def err_static_assert_failed : Error<"static assertion failed%select{: %1|}0">;
def err_static_assert_requirement_failed : Error<
- "static_assert failed due to requirement '%0'%select{: %2|}1">;
+ "static assertion failed due to requirement '%0'%select{: %2|}1">;
def warn_consteval_if_always_true : Warning<
"consteval if is always true in an %select{unevaluated|immediate}0 context">,
@@ -5774,6 +5774,8 @@ def warn_forward_class_redefinition : Warning<
def err_redefinition_different_typedef : Error<
"%select{typedef|type alias|type alias template}0 "
"redefinition with different types%diff{ ($ vs $)|}1,2">;
+def err_redefinition_different_concept : Error<
+ "redefinition of concept %0 with different template parameters or requirements">;
def err_tag_reference_non_tag : Error<
"%select{non-struct type|non-class type|non-union type|non-enum "
"type|typedef|type alias|template|type alias template|template "
diff --git a/contrib/llvm-project/clang/include/clang/Basic/LangOptions.def b/contrib/llvm-project/clang/include/clang/Basic/LangOptions.def
index 6fb31c5655ab..ad366821f3cb 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/LangOptions.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/LangOptions.def
@@ -195,6 +195,7 @@ VALUE_LANGOPT(DoubleSize , 32, 0, "width of double")
VALUE_LANGOPT(LongDoubleSize , 32, 0, "width of long double")
LANGOPT(PPCIEEELongDouble , 1, 0, "use IEEE 754 quadruple-precision for long double")
LANGOPT(EnableAIXExtendedAltivecABI , 1, 0, "__EXTABI__ predefined macro")
+LANGOPT(EnableAIXQuadwordAtomicsABI , 1, 0, "Use 16-byte atomic lock free semantics")
COMPATIBLE_VALUE_LANGOPT(PICLevel , 2, 0, "__PIC__ level")
COMPATIBLE_VALUE_LANGOPT(PIE , 1, 0, "is pie")
LANGOPT(ROPI , 1, 0, "Read-only position independence")
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def b/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def
index 7b65a1537805..84fc0893c8b5 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def
@@ -908,6 +908,9 @@ PRAGMA_ANNOTATION(pragma_fp)
// Annotation for the attribute pragma directives - #pragma clang attribute ...
PRAGMA_ANNOTATION(pragma_attribute)
+// Annotation for the riscv pragma directives - #pragma clang riscv intrinsic ...
+PRAGMA_ANNOTATION(pragma_riscv)
+
// Annotations for module import translated from #include etc.
ANNOTATION(module_include)
ANNOTATION(module_begin)
diff --git a/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td b/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td
index d96020ee40d0..6b21f48110de 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td
@@ -186,7 +186,7 @@ class RVVBuiltin<string suffix, string prototype, string type_range,
// HasPolicyOperand: Has a policy operand. 0 is tail and mask undisturbed, 1 is
// tail agnostic, 2 is mask undisturbed, and 3 is tail and mask agnostic. The
// policy operand is located at the last position.
- Policy MaskedPolicy = HasPolicyOperand;
+ Policy MaskedPolicyScheme = HasPolicyOperand;
// The policy scheme for unmasked intrinsic IR.
// It could be NonePolicy, HasPassthruOperand or HasPolicyOperand.
@@ -194,7 +194,7 @@ class RVVBuiltin<string suffix, string prototype, string type_range,
// undef, tail policy is tail agnostic, otherwise policy is tail undisturbed.
// HasPolicyOperand: Has a policy operand. 1 is tail agnostic and 0 is tail
// undisturbed.
- Policy UnMaskedPolicy = NonePolicy;
+ Policy UnMaskedPolicyScheme = NonePolicy;
// This builtin supports non-masked function overloading api.
// All masked operations support overloading api.
@@ -443,7 +443,7 @@ class RVVMaskOp0Builtin<string prototype> : RVVOp0Builtin<"m", prototype, "c"> {
let HasMaskedOffOperand = false;
}
-let UnMaskedPolicy = HasPolicyOperand,
+let UnMaskedPolicyScheme = HasPolicyOperand,
HasMaskedOffOperand = false in {
multiclass RVVSlideBuiltinSet {
defm "" : RVVOutBuiltinSet<NAME, "csilxfd",
@@ -582,7 +582,7 @@ class IsFloat<string type> {
}
let HasUnMaskedOverloaded = false,
- MaskedPolicy = NonePolicy in {
+ MaskedPolicyScheme = NonePolicy in {
class RVVVLEMaskBuiltin : RVVOutBuiltin<"m", "mPCUe", "c"> {
let Name = "vlm_v";
let IRName = "vlm";
@@ -591,7 +591,7 @@ let HasUnMaskedOverloaded = false,
}
let HasUnMaskedOverloaded = false,
- UnMaskedPolicy = HasPassthruOperand in {
+ UnMaskedPolicyScheme = HasPassthruOperand in {
multiclass RVVVLEBuiltin<list<string> types> {
let Name = NAME # "_v",
IRName = "vle",
@@ -664,7 +664,7 @@ multiclass RVVVLSEBuiltin<list<string> types> {
IRName = "vlse",
MaskedIRName ="vlse_mask",
HasUnMaskedOverloaded = false,
- UnMaskedPolicy = HasPassthruOperand in {
+ UnMaskedPolicyScheme = HasPassthruOperand in {
foreach type = types in {
def : RVVOutBuiltin<"v", "vPCet", type>;
if !not(IsFloat<type>.val) then {
@@ -675,7 +675,7 @@ multiclass RVVVLSEBuiltin<list<string> types> {
}
multiclass RVVIndexedLoad<string op> {
- let UnMaskedPolicy = HasPassthruOperand in {
+ let UnMaskedPolicyScheme = HasPassthruOperand in {
foreach type = TypeList in {
foreach eew_list = EEWList[0-2] in {
defvar eew = eew_list[0];
@@ -701,7 +701,7 @@ multiclass RVVIndexedLoad<string op> {
}
let HasMaskedOffOperand = false,
- MaskedPolicy = NonePolicy,
+ MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
// Builtin: (ptr, value, vl). Intrinsic: (value, ptr, vl)
std::swap(Ops[0], Ops[1]);
@@ -738,7 +738,7 @@ multiclass RVVVSSEBuiltin<list<string> types> {
IRName = "vsse",
MaskedIRName = "vsse_mask",
HasMaskedOffOperand = false,
- MaskedPolicy = NonePolicy,
+ MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
// Builtin: (ptr, stride, value, vl). Intrinsic: (value, ptr, stride, vl)
std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
@@ -762,7 +762,7 @@ multiclass RVVVSSEBuiltin<list<string> types> {
multiclass RVVIndexedStore<string op> {
let HasMaskedOffOperand = false,
- MaskedPolicy = NonePolicy,
+ MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
// Builtin: (ptr, index, value, vl). Intrinsic: (value, ptr, index, vl)
std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
@@ -1141,7 +1141,7 @@ multiclass RVVUnitStridedSegStore<string op> {
MaskedIRName = op # nf # "_mask",
NF = nf,
HasMaskedOffOperand = false,
- MaskedPolicy = NonePolicy,
+ MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
{
// Builtin: (ptr, val0, val1, ..., vl)
@@ -1187,7 +1187,7 @@ multiclass RVVStridedSegStore<string op> {
MaskedIRName = op # nf # "_mask",
NF = nf,
HasMaskedOffOperand = false,
- MaskedPolicy = NonePolicy,
+ MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
{
// Builtin: (ptr, stride, val0, val1, ..., vl).
@@ -1229,7 +1229,7 @@ multiclass RVVIndexedSegStore<string op> {
MaskedIRName = op # nf # "_mask",
NF = nf,
HasMaskedOffOperand = false,
- MaskedPolicy = NonePolicy,
+ MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
{
// Builtin: (ptr, index, val0, val1, ..., vl)
@@ -1568,7 +1568,7 @@ def vsetvl_macro: RVVHeader;
let HasBuiltinAlias = false,
HasVL = false,
HasMasked = false,
- MaskedPolicy = NonePolicy,
+ MaskedPolicyScheme = NonePolicy,
Log2LMUL = [0],
ManualCodegen = [{IntrinsicTypes = {ResultType};}] in // Set XLEN type
{
@@ -1627,7 +1627,7 @@ defm : RVVIndexedSegStore<"vsoxseg">;
// 12. Vector Integer Arithmetic Instructions
// 12.1. Vector Single-Width Integer Add and Subtract
-let UnMaskedPolicy = HasPassthruOperand in {
+let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vadd : RVVIntBinBuiltinSet;
defm vsub : RVVIntBinBuiltinSet;
defm vrsub : RVVOutOp1BuiltinSet<"vrsub", "csil",
@@ -1638,7 +1638,7 @@ defm vneg_v : RVVPseudoUnaryBuiltin<"vrsub", "csil">;
// 12.2. Vector Widening Integer Add/Subtract
// Widening unsigned integer add/subtract, 2*SEW = SEW +/- SEW
-let UnMaskedPolicy = HasPassthruOperand in {
+let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vwaddu : RVVUnsignedWidenBinBuiltinSet;
defm vwsubu : RVVUnsignedWidenBinBuiltinSet;
// Widening signed integer add/subtract, 2*SEW = SEW +/- SEW
@@ -1657,7 +1657,7 @@ defm vwcvt_x_x_v : RVVPseudoVWCVTBuiltin<"vwadd", "vwcvt_x", "csi",
[["w", "wv"]]>;
// 12.3. Vector Integer Extension
-let UnMaskedPolicy = HasPassthruOperand in {
+let UnMaskedPolicyScheme = HasPassthruOperand in {
let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
def vsext_vf2 : RVVIntExt<"vsext", "w", "wv", "csi">;
def vzext_vf2 : RVVIntExt<"vzext", "Uw", "UwUv", "csi">;
@@ -1673,8 +1673,8 @@ let Log2LMUL = [-3, -2, -1, 0] in {
}
// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
-let HasMasked = false, MaskedPolicy = NonePolicy in {
- let UnMaskedPolicy = HasPassthruOperand in {
+let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
+ let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vadc : RVVCarryinBuiltinSet;
defm vsbc : RVVCarryinBuiltinSet;
}
@@ -1685,7 +1685,7 @@ let HasMasked = false, MaskedPolicy = NonePolicy in {
}
// 12.5. Vector Bitwise Logical Instructions
-let UnMaskedPolicy = HasPassthruOperand in {
+let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vand : RVVIntBinBuiltinSet;
defm vxor : RVVIntBinBuiltinSet;
defm vor : RVVIntBinBuiltinSet;
@@ -1693,7 +1693,7 @@ defm vor : RVVIntBinBuiltinSet;
defm vnot_v : RVVPseudoVNotBuiltin<"vxor", "csil">;
// 12.6. Vector Single-Width Bit Shift Instructions
-let UnMaskedPolicy = HasPassthruOperand in {
+let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vsll : RVVShiftBuiltinSet;
defm vsrl : RVVUnsignedShiftBuiltinSet;
defm vsra : RVVSignedShiftBuiltinSet;
@@ -1707,7 +1707,7 @@ defm vncvt_x_x_w : RVVPseudoVNCVTBuiltin<"vnsrl", "vncvt_x", "csi",
["Uv", "UvUw"]]>;
// 12.8. Vector Integer Comparison Instructions
-let MaskedPolicy = NonePolicy in {
+let MaskedPolicyScheme = NonePolicy in {
defm vmseq : RVVIntMaskOutBuiltinSet;
defm vmsne : RVVIntMaskOutBuiltinSet;
defm vmsltu : RVVUnsignedMaskOutBuiltinSet;
@@ -1721,7 +1721,7 @@ defm vmsge : RVVSignedMaskOutBuiltinSet;
}
// 12.9. Vector Integer Min/Max Instructions
-let UnMaskedPolicy = HasPassthruOperand in {
+let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vminu : RVVUnsignedBinBuiltinSet;
defm vmin : RVVSignedBinBuiltinSet;
defm vmaxu : RVVUnsignedBinBuiltinSet;
@@ -1745,7 +1745,7 @@ defm vrem : RVVSignedBinBuiltinSet;
}
// 12.12. Vector Widening Integer Multiply Instructions
-let Log2LMUL = [-3, -2, -1, 0, 1, 2], UnMaskedPolicy = HasPassthruOperand in {
+let Log2LMUL = [-3, -2, -1, 0, 1, 2], UnMaskedPolicyScheme = HasPassthruOperand in {
defm vwmul : RVVOutOp0Op1BuiltinSet<"vwmul", "csi",
[["vv", "w", "wvv"],
["vx", "w", "wve"]]>;
@@ -1758,7 +1758,7 @@ defm vwmulsu : RVVOutOp0Op1BuiltinSet<"vwmulsu", "csi",
}
// 12.13. Vector Single-Width Integer Multiply-Add Instructions
-let UnMaskedPolicy = HasPolicyOperand in {
+let UnMaskedPolicyScheme = HasPolicyOperand in {
defm vmacc : RVVIntTerBuiltinSet;
defm vnmsac : RVVIntTerBuiltinSet;
defm vmadd : RVVIntTerBuiltinSet;
@@ -1783,7 +1783,7 @@ defm vwmaccus : RVVOutOp1Op2BuiltinSet<"vwmaccus", "csi",
// 12.15. Vector Integer Merge Instructions
// C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (op1, op2, mask, vl)
-let HasMasked = false, MaskedPolicy = NonePolicy,
+let HasMasked = false, MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3);
IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()};
@@ -1798,7 +1798,9 @@ let HasMasked = false, MaskedPolicy = NonePolicy,
}
// 12.16. Vector Integer Move Instructions
-let HasMasked = false, UnMaskedPolicy = HasPassthruOperand, MaskedPolicy = NonePolicy in {
+let HasMasked = false,
+ UnMaskedPolicyScheme = HasPassthruOperand,
+ MaskedPolicyScheme = NonePolicy in {
let OverloadedName = "vmv_v" in {
defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csil",
[["v", "Uv", "UvUv"]]>;
@@ -1813,7 +1815,7 @@ let HasMasked = false, UnMaskedPolicy = HasPassthruOperand, MaskedPolicy = NoneP
// 13. Vector Fixed-Point Arithmetic Instructions
// 13.1. Vector Single-Width Saturating Add and Subtract
-let UnMaskedPolicy = HasPassthruOperand in {
+let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vsaddu : RVVUnsignedBinBuiltinSet;
defm vsadd : RVVSignedBinBuiltinSet;
defm vssubu : RVVUnsignedBinBuiltinSet;
@@ -1866,7 +1868,7 @@ let Log2LMUL = [-2, -1, 0, 1, 2] in {
}
// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
-let UnMaskedPolicy = HasPolicyOperand in {
+let UnMaskedPolicyScheme = HasPolicyOperand in {
defm vfmacc : RVVFloatingTerBuiltinSet;
defm vfnmacc : RVVFloatingTerBuiltinSet;
defm vfmsac : RVVFloatingTerBuiltinSet;
@@ -1884,7 +1886,7 @@ defm vfwnmsac : RVVFloatingWidenTerBuiltinSet;
}
// 14.8. Vector Floating-Point Square-Root Instruction
-let UnMaskedPolicy = HasPassthruOperand in {
+let UnMaskedPolicyScheme = HasPassthruOperand in {
def vfsqrt : RVVFloatingUnaryVVBuiltin;
// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
@@ -1906,7 +1908,7 @@ defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "xfd">;
defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "xfd">;
// 14.13. Vector Floating-Point Compare Instructions
-let MaskedPolicy = NonePolicy in {
+let MaskedPolicyScheme = NonePolicy in {
defm vmfeq : RVVFloatingMaskOutBuiltinSet;
defm vmfne : RVVFloatingMaskOutBuiltinSet;
defm vmflt : RVVFloatingMaskOutBuiltinSet;
@@ -1916,12 +1918,12 @@ defm vmfge : RVVFloatingMaskOutBuiltinSet;
}
// 14.14. Vector Floating-Point Classify Instruction
-let Name = "vfclass_v", UnMaskedPolicy = HasPassthruOperand in
+let Name = "vfclass_v", UnMaskedPolicyScheme = HasPassthruOperand in
def vfclass : RVVOp0Builtin<"Uv", "Uvv", "xfd">;
// 14.15. Vector Floating-Point Merge Instructio
// C/C++ Operand: (mask, op1, op2, vl), Builtin: (op1, op2, mask, vl)
-let HasMasked = false, MaskedPolicy = NonePolicy,
+let HasMasked = false, MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3);
IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()};
@@ -1935,13 +1937,13 @@ let HasMasked = false, MaskedPolicy = NonePolicy,
}
// 14.16. Vector Floating-Point Move Instruction
-let HasMasked = false, UnMaskedPolicy = HasPassthruOperand,
- HasUnMaskedOverloaded = false, MaskedPolicy = NonePolicy in
+let HasMasked = false, UnMaskedPolicyScheme = HasPassthruOperand,
+ HasUnMaskedOverloaded = false, MaskedPolicyScheme = NonePolicy in
defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "xfd",
[["f", "v", "ve"]]>;
// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
-let UnMaskedPolicy = HasPassthruOperand in {
+let UnMaskedPolicyScheme = HasPassthruOperand in {
def vfcvt_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_xu">;
def vfcvt_x_f_v : RVVConvToSignedBuiltin<"vfcvt_x">;
def vfcvt_rtz_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_rtz_xu">;
@@ -1975,7 +1977,7 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
// 15. Vector Reduction Operations
// 15.1. Vector Single-Width Integer Reduction Instructions
-let MaskedPolicy = NonePolicy in {
+let MaskedPolicyScheme = NonePolicy in {
defm vredsum : RVVIntReductionBuiltinSet;
defm vredmaxu : RVVUnsignedReductionBuiltin;
defm vredmax : RVVSignedReductionBuiltin;
@@ -2021,7 +2023,7 @@ def vmset : RVVMaskNullaryBuiltin;
defm vmmv_m : RVVPseudoMaskBuiltin<"vmand", "c">;
defm vmnot_m : RVVPseudoMaskBuiltin<"vmnand", "c">;
-let MaskedPolicy = NonePolicy in {
+let MaskedPolicyScheme = NonePolicy in {
// 16.2. Vector count population in mask vcpop.m
def vcpop : RVVMaskOp0Builtin<"um">;
@@ -2038,7 +2040,7 @@ def vmsif : RVVMaskUnaryBuiltin;
def vmsof : RVVMaskUnaryBuiltin;
}
-let UnMaskedPolicy = HasPassthruOperand, HasUnMaskedOverloaded = false in {
+let UnMaskedPolicyScheme = HasPassthruOperand, HasUnMaskedOverloaded = false in {
// 16.8. Vector Iota Instruction
defm viota : RVVOutBuiltinSet<"viota", "csil", [["m", "Uv", "Uvm"]]>;
@@ -2049,7 +2051,7 @@ let UnMaskedPolicy = HasPassthruOperand, HasUnMaskedOverloaded = false in {
// 17. Vector Permutation Instructions
// 17.1. Integer Scalar Move Instructions
-let HasMasked = false, MaskedPolicy = NonePolicy in {
+let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
let HasVL = false, OverloadedName = "vmv_x" in
defm vmv_x : RVVOp0BuiltinSet<"vmv_x_s", "csil",
[["s", "ve", "ev"],
@@ -2061,7 +2063,7 @@ let HasMasked = false, MaskedPolicy = NonePolicy in {
}
// 17.2. Floating-Point Scalar Move Instructions
-let HasMasked = false, MaskedPolicy = NonePolicy in {
+let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
let HasVL = false, OverloadedName = "vfmv_f" in
defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "xfd",
[["s", "ve", "ev"]]>;
@@ -2078,7 +2080,7 @@ defm vslideup : RVVSlideBuiltinSet;
defm vslidedown : RVVSlideBuiltinSet;
// 17.3.3. Vector Slide1up Instructions
-let UnMaskedPolicy = HasPassthruOperand in {
+let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vslide1up : RVVSlideOneBuiltinSet;
defm vfslide1up : RVVFloatingBinVFBuiltinSet;
@@ -2104,7 +2106,7 @@ defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csil",
}
// 17.5. Vector Compress Instruction
-let HasMasked = false, MaskedPolicy = NonePolicy,
+let HasMasked = false, MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3);
IntrinsicTypes = {ResultType, Ops[3]->getType()};
@@ -2119,7 +2121,7 @@ let HasMasked = false, MaskedPolicy = NonePolicy,
// Miscellaneous
let HasMasked = false, HasVL = false, IRName = "" in {
- let Name = "vreinterpret_v", MaskedPolicy = NonePolicy,
+ let Name = "vreinterpret_v", MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
return Builder.CreateBitCast(Ops[0], ResultType);
}] in {
@@ -2141,7 +2143,8 @@ let HasMasked = false, HasVL = false, IRName = "" in {
}
}
- let Name = "vundefined", HasUnMaskedOverloaded = false, MaskedPolicy = NonePolicy,
+ let Name = "vundefined", HasUnMaskedOverloaded = false,
+ MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
return llvm::UndefValue::get(ResultType);
}] in {
@@ -2151,7 +2154,8 @@ let HasMasked = false, HasVL = false, IRName = "" in {
// LMUL truncation
// C/C++ Operand: VecTy, IR Operand: VecTy, Index
- let Name = "vlmul_trunc_v", OverloadedName = "vlmul_trunc", MaskedPolicy = NonePolicy,
+ let Name = "vlmul_trunc_v", OverloadedName = "vlmul_trunc",
+ MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{ {
ID = Intrinsic::vector_extract;
IntrinsicTypes = {ResultType, Ops[0]->getType()};
@@ -2169,7 +2173,8 @@ let HasMasked = false, HasVL = false, IRName = "" in {
// LMUL extension
// C/C++ Operand: SubVecTy, IR Operand: VecTy, SubVecTy, Index
- let Name = "vlmul_ext_v", OverloadedName = "vlmul_ext", MaskedPolicy = NonePolicy,
+ let Name = "vlmul_ext_v", OverloadedName = "vlmul_ext",
+ MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
ID = Intrinsic::vector_insert;
IntrinsicTypes = {ResultType, Ops[0]->getType()};
@@ -2187,7 +2192,7 @@ let HasMasked = false, HasVL = false, IRName = "" in {
}
}
- let Name = "vget_v", MaskedPolicy = NonePolicy,
+ let Name = "vget_v", MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
{
ID = Intrinsic::vector_extract;
@@ -2211,7 +2216,7 @@ let HasMasked = false, HasVL = false, IRName = "" in {
}
}
- let Name = "vset_v", Log2LMUL = [0, 1, 2], MaskedPolicy = NonePolicy,
+ let Name = "vset_v", Log2LMUL = [0, 1, 2], MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
{
ID = Intrinsic::vector_insert;
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/ModuleBuilder.h b/contrib/llvm-project/clang/include/clang/CodeGen/ModuleBuilder.h
index 26587e73bf6c..edacd82bf899 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/ModuleBuilder.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/ModuleBuilder.h
@@ -14,12 +14,17 @@
#define LLVM_CLANG_CODEGEN_MODULEBUILDER_H
#include "clang/AST/ASTConsumer.h"
+#include "clang/Basic/LLVM.h"
namespace llvm {
class Constant;
class LLVMContext;
class Module;
class StringRef;
+
+ namespace vfs {
+ class FileSystem;
+ }
}
namespace clang {
@@ -98,10 +103,11 @@ public:
/// the allocated CodeGenerator instance.
CodeGenerator *CreateLLVMCodeGen(DiagnosticsEngine &Diags,
llvm::StringRef ModuleName,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
const HeaderSearchOptions &HeaderSearchOpts,
const PreprocessorOptions &PreprocessorOpts,
const CodeGenOptions &CGO,
- llvm::LLVMContext& C,
+ llvm::LLVMContext &C,
CoverageSourceInfo *CoverageInfo = nullptr);
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Driver.h b/contrib/llvm-project/clang/include/clang/Driver/Driver.h
index 774eac613a10..0781d476ec4a 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Driver.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Driver.h
@@ -44,9 +44,7 @@ typedef SmallVector<InputInfo, 4> InputInfoList;
class Command;
class Compilation;
-class JobList;
class JobAction;
-class SanitizerArgs;
class ToolChain;
/// Describes the kind of LTO mode selected via -f(no-)?lto(=.*)? options.
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Options.td b/contrib/llvm-project/clang/include/clang/Driver/Options.td
index b9c2e4d528e4..3cab37b21aaf 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Options.td
+++ b/contrib/llvm-project/clang/include/clang/Driver/Options.td
@@ -3679,6 +3679,10 @@ def mabi_EQ_vec_extabi : Flag<["-"], "mabi=vec-extabi">, Group<m_Group>, Flags<[
MarshallingInfoFlag<LangOpts<"EnableAIXExtendedAltivecABI">>;
def mabi_EQ_vec_default : Flag<["-"], "mabi=vec-default">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Enable the default Altivec ABI on AIX (AIX only). Uses only volatile vector registers.">;
+def mabi_EQ_quadword_atomics : Flag<["-"], "mabi=quadword-atomics">,
+ Group<m_Group>, Flags<[CC1Option]>,
+ HelpText<"Enable quadword atomics ABI on AIX (AIX PPC64 only). Uses lqarx/stqcx. instructions.">,
+ MarshallingInfoFlag<LangOpts<"EnableAIXQuadwordAtomicsABI">>;
def mvsx : Flag<["-"], "mvsx">, Group<m_ppc_Features_Group>;
def mno_vsx : Flag<["-"], "mno-vsx">, Group<m_ppc_Features_Group>;
def msecure_plt : Flag<["-"], "msecure-plt">, Group<m_ppc_Features_Group>;
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/Utils.h b/contrib/llvm-project/clang/include/clang/Frontend/Utils.h
index 240624d5408f..143cf4359f00 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/Utils.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/Utils.h
@@ -107,10 +107,10 @@ public:
void finishedMainFile(DiagnosticsEngine &Diags) override;
- bool needSystemDependencies() final override { return IncludeSystemHeaders; }
+ bool needSystemDependencies() final { return IncludeSystemHeaders; }
bool sawDependency(StringRef Filename, bool FromModule, bool IsSystem,
- bool IsModuleFile, bool IsMissing) final override;
+ bool IsModuleFile, bool IsMissing) final;
protected:
void outputDependencyFile(llvm::raw_ostream &OS);
diff --git a/contrib/llvm-project/clang/include/clang/Lex/PreprocessingRecord.h b/contrib/llvm-project/clang/include/clang/Lex/PreprocessingRecord.h
index 063929dd8f96..c97ca8628e14 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/PreprocessingRecord.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/PreprocessingRecord.h
@@ -49,7 +49,6 @@ void operator delete(void *ptr, clang::PreprocessingRecord &PR,
namespace clang {
-class FileEntry;
class IdentifierInfo;
class MacroInfo;
class SourceManager;
diff --git a/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h b/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h
index 8fc24c731035..79454b5addea 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h
@@ -67,7 +67,6 @@ namespace clang {
class CodeCompletionHandler;
class CommentHandler;
class DirectoryEntry;
-class DirectoryLookup;
class EmptylineHandler;
class ExternalPreprocessorSource;
class FileEntry;
diff --git a/contrib/llvm-project/clang/include/clang/Parse/Parser.h b/contrib/llvm-project/clang/include/clang/Parse/Parser.h
index 76e1c9db5284..41bfc9f48ecc 100644
--- a/contrib/llvm-project/clang/include/clang/Parse/Parser.h
+++ b/contrib/llvm-project/clang/include/clang/Parse/Parser.h
@@ -215,6 +215,7 @@ class Parser : public CodeCompletionHandler {
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler;
+ std::unique_ptr<PragmaHandler> RISCVPragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
@@ -1043,7 +1044,7 @@ private:
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
- bool ExpectAndConsumeSemi(unsigned DiagID);
+ bool ExpectAndConsumeSemi(unsigned DiagID , StringRef TokenUsed = "");
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Overload.h b/contrib/llvm-project/clang/include/clang/Sema/Overload.h
index 48997e186ef6..fb4812675d9a 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Overload.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Overload.h
@@ -795,6 +795,10 @@ class Sema;
/// This candidate was not viable because its associated constraints were
/// not satisfied.
ovl_fail_constraints_not_satisfied,
+
+ /// This candidate was not viable because it has internal linkage and is
+ /// from a different module unit than the use.
+ ovl_fail_module_mismatched,
};
/// A list of implicit conversion sequences for the arguments of an
diff --git a/contrib/llvm-project/clang/include/clang/Sema/RISCVIntrinsicManager.h b/contrib/llvm-project/clang/include/clang/Sema/RISCVIntrinsicManager.h
new file mode 100644
index 000000000000..505100249d6f
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Sema/RISCVIntrinsicManager.h
@@ -0,0 +1,36 @@
+//===- RISCVIntrinsicManager.h - RISC-V Intrinsic Handler -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RISCVIntrinsicManager, which handles RISC-V vector
+// intrinsic functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_RISCVINTRINSICMANAGER_H
+#define LLVM_CLANG_SEMA_RISCVINTRINSICMANAGER_H
+
+namespace clang {
+class Sema;
+class LookupResult;
+class IdentifierInfo;
+class Preprocessor;
+
+namespace sema {
+class RISCVIntrinsicManager {
+public:
+ virtual ~RISCVIntrinsicManager() = default;
+
+ // Create RISC-V intrinsic and insert into symbol table and return true if
+ // found, otherwise return false.
+ virtual bool CreateIntrinsicIfFound(LookupResult &LR, IdentifierInfo *II,
+ Preprocessor &PP) = 0;
+};
+} // end namespace sema
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Scope.h b/contrib/llvm-project/clang/include/clang/Sema/Scope.h
index f4c50864f51c..3749d925b106 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Scope.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Scope.h
@@ -210,9 +210,19 @@ private:
/// Used to determine if errors occurred in this scope.
DiagnosticErrorTrap ErrorTrap;
- /// A lattice consisting of undefined, a single NRVO candidate variable in
- /// this scope, or over-defined. The bit is true when over-defined.
- llvm::PointerIntPair<VarDecl *, 1, bool> NRVO;
+ /// A single NRVO candidate variable in this scope.
+ /// There are three possible values:
+ /// 1) pointer to VarDecl that denotes NRVO candidate itself.
+ /// 2) nullptr value means that NRVO is not allowed in this scope
+ /// (e.g. return a function parameter).
+ /// 3) None value means that there is no NRVO candidate in this scope
+ /// (i.e. there are no return statements in this scope).
+ Optional<VarDecl *> NRVO;
+
+ /// Represents return slots for NRVO candidates in the current scope.
+ /// If a variable is present in this set, it means that a return slot is
+ /// available for this variable in the current scope.
+ llvm::SmallPtrSet<VarDecl *, 8> ReturnSlots;
void setFlags(Scope *Parent, unsigned F);
@@ -304,6 +314,10 @@ public:
bool decl_empty() const { return DeclsInScope.empty(); }
void AddDecl(Decl *D) {
+ if (auto *VD = dyn_cast<VarDecl>(D))
+ if (!isa<ParmVarDecl>(VD))
+ ReturnSlots.insert(VD);
+
DeclsInScope.insert(D);
}
@@ -527,23 +541,9 @@ public:
UsingDirectives.end());
}
- void addNRVOCandidate(VarDecl *VD) {
- if (NRVO.getInt())
- return;
- if (NRVO.getPointer() == nullptr) {
- NRVO.setPointer(VD);
- return;
- }
- if (NRVO.getPointer() != VD)
- setNoNRVO();
- }
-
- void setNoNRVO() {
- NRVO.setInt(true);
- NRVO.setPointer(nullptr);
- }
+ void updateNRVOCandidate(VarDecl *VD);
- void mergeNRVOIntoParent();
+ void applyNRVO();
/// Init - This is used by the parser to implement scope caching.
void Init(Scope *parent, unsigned flags);
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Sema.h b/contrib/llvm-project/clang/include/clang/Sema/Sema.h
index a33d85cc954d..06ea0b417cb3 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Sema.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Sema.h
@@ -226,6 +226,7 @@ namespace sema {
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
+ class RISCVIntrinsicManager;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
@@ -1587,7 +1588,12 @@ public:
/// assignment.
llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments;
+ /// Indicate RISC-V vector builtin functions enabled or not.
+ bool DeclareRISCVVBuiltins = false;
+
private:
+ std::unique_ptr<sema::RISCVIntrinsicManager> RVIntrinsicManager;
+
Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo;
bool WarnedDarwinSDKInfoMissing = false;
@@ -8260,6 +8266,9 @@ public:
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
+ void CheckConceptRedefinition(ConceptDecl *NewDecl, LookupResult &Previous,
+ bool &AddToScope);
+
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
@@ -12170,7 +12179,8 @@ public:
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
- Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
+ Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType,
+ BinaryOperatorKind Opc);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
@@ -13586,6 +13596,8 @@ void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation,
llvm::StringRef StackSlotLabel,
AlignPackInfo Value);
+std::unique_ptr<sema::RISCVIntrinsicManager>
+CreateRISCVIntrinsicManager(Sema &S);
} // end namespace clang
namespace llvm {
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Template.h b/contrib/llvm-project/clang/include/clang/Sema/Template.h
index 5dcde77b5dd3..8df92b7000f3 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Template.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Template.h
@@ -75,6 +75,8 @@ enum class TemplateSubstitutionKind : char {
class MultiLevelTemplateArgumentList {
/// The template argument list at a certain template depth
using ArgList = ArrayRef<TemplateArgument>;
+ using ArgListsIterator = SmallVector<ArgList, 4>::iterator;
+ using ConstArgListsIterator = SmallVector<ArgList, 4>::const_iterator;
/// The template argument lists, stored from the innermost template
/// argument list (first) to the outermost template argument list (last).
@@ -121,6 +123,12 @@ enum class TemplateSubstitutionKind : char {
return TemplateArgumentLists.size();
}
+ // Determine the number of substituted args at 'Depth'.
+ unsigned getNumSubsitutedArgs(unsigned Depth) const {
+ assert(NumRetainedOuterLevels <= Depth && Depth < getNumLevels());
+ return TemplateArgumentLists[getNumLevels() - Depth - 1].size();
+ }
+
unsigned getNumRetainedOuterLevels() const {
return NumRetainedOuterLevels;
}
@@ -158,6 +166,14 @@ enum class TemplateSubstitutionKind : char {
return !(*this)(Depth, Index).isNull();
}
+ bool isAnyArgInstantiationDependent() const {
+ for (ArgList List : TemplateArgumentLists)
+ for (const TemplateArgument &TA : List)
+ if (TA.isInstantiationDependent())
+ return true;
+ return false;
+ }
+
/// Clear out a specific template argument.
void setArgument(unsigned Depth, unsigned Index,
TemplateArgument Arg) {
@@ -183,6 +199,14 @@ enum class TemplateSubstitutionKind : char {
TemplateArgumentLists.push_back(Args);
}
+ /// Replaces the current 'innermost' level with the provided argument list.
+ /// This is useful for type deduction cases where we need to get the entire
+ /// list from the AST, but then add the deduced innermost list.
+ void replaceInnermostTemplateArguments(ArgList Args) {
+ assert(TemplateArgumentLists.size() > 0 && "Replacing in an empty list?");
+ TemplateArgumentLists[0] = Args;
+ }
+
/// Add an outermost level that we are not substituting. We have no
/// arguments at this level, and do not remove it from the depth of inner
/// template parameters that we instantiate.
@@ -197,6 +221,16 @@ enum class TemplateSubstitutionKind : char {
const ArgList &getInnermost() const {
return TemplateArgumentLists.front();
}
+ /// Retrieve the outermost template argument list.
+ const ArgList &getOutermost() const {
+ return TemplateArgumentLists.back();
+ }
+ ArgListsIterator begin() { return TemplateArgumentLists.begin(); }
+ ConstArgListsIterator begin() const {
+ return TemplateArgumentLists.begin();
+ }
+ ArgListsIterator end() { return TemplateArgumentLists.end(); }
+ ConstArgListsIterator end() const { return TemplateArgumentLists.end(); }
};
/// The context in which partial ordering of function templates occurs.
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h b/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h
index 6a3532d7272d..83bc7dcdfde3 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h
@@ -703,6 +703,10 @@ public:
bool hasChain() const { return Chain; }
ASTReader *getChain() const { return Chain; }
+ bool isWritingNamedModules() const {
+ return WritingModule && WritingModule->isModulePurview();
+ }
+
private:
// ASTDeserializationListener implementation
void ReaderInitialized(ASTReader *Reader) override;
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h
index 685dc66182ef..48da56c00f7a 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h
@@ -731,7 +731,7 @@ public:
PathDiagnosticPieceRef VisitNode(const ExplodedNode *N,
BugReporterContext &BR,
- PathSensitiveBugReport &R) override final;
+ PathSensitiveBugReport &R) final;
};
} // namespace ento
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
index 116a5970c341..8773e171369f 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
@@ -622,6 +622,11 @@ public:
getIndexOfElementToConstruct(ProgramStateRef State, const CXXConstructExpr *E,
const LocationContext *LCtx);
+ /// Retreives the size of the array in the pending ArrayInitLoopExpr.
+ static Optional<unsigned> getPendingInitLoop(ProgramStateRef State,
+ const CXXConstructExpr *E,
+ const LocationContext *LCtx);
+
/// By looking at a certain item that may be potentially part of an object's
/// ConstructionContext, retrieve such object's location. A particular
/// statement can be transparently passed as \p Item in most cases.
@@ -816,7 +821,9 @@ private:
/// Checks whether our policies allow us to inline a non-POD type array
/// construction.
- bool shouldInlineArrayConstruction(const ArrayType *Type);
+ bool shouldInlineArrayConstruction(const ProgramStateRef State,
+ const CXXConstructExpr *CE,
+ const LocationContext *LCtx);
/// Checks whether we construct an array of non-POD type, and decides if the
/// constructor should be inkoved once again.
@@ -916,6 +923,16 @@ private:
const CXXConstructExpr *E,
const LocationContext *LCtx);
+ /// Sets the size of the array in a pending ArrayInitLoopExpr.
+ static ProgramStateRef setPendingInitLoop(ProgramStateRef State,
+ const CXXConstructExpr *E,
+ const LocationContext *LCtx,
+ unsigned Idx);
+
+ static ProgramStateRef removePendingInitLoop(ProgramStateRef State,
+ const CXXConstructExpr *E,
+ const LocationContext *LCtx);
+
/// Store the location of a C++ object corresponding to a statement
/// until the statement is actually encountered. For example, if a DeclStmt
/// has CXXConstructExpr as its initializer, the object would be considered
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
index 1092d1292255..9927b6340793 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
@@ -55,8 +55,6 @@ template <typename T> struct ProgramStateTrait {
}
};
-class RangeSet;
-
/// \class ProgramState
/// ProgramState - This class encapsulates:
///
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
index c9c21fcf230e..2ae811ee3365 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
@@ -40,7 +40,6 @@ class LabelDecl;
namespace ento {
-class BasicValueFactory;
class CompoundValData;
class LazyCompoundValData;
class MemRegion;
diff --git a/contrib/llvm-project/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/contrib/llvm-project/clang/include/clang/Support/RISCVVIntrinsicUtils.h
index a5e7e6d35cc8..7ee4896eea09 100644
--- a/contrib/llvm-project/clang/include/clang/Support/RISCVVIntrinsicUtils.h
+++ b/contrib/llvm-project/clang/include/clang/Support/RISCVVIntrinsicUtils.h
@@ -18,6 +18,10 @@
#include <string>
#include <vector>
+namespace llvm {
+class raw_ostream;
+} // end namespace llvm
+
namespace clang {
namespace RISCV {
@@ -104,12 +108,14 @@ struct PrototypeDescriptor {
uint8_t TM = static_cast<uint8_t>(TypeModifier::NoModifier);
bool operator!=(const PrototypeDescriptor &PD) const {
- return PD.PT != PT || PD.VTM != VTM || PD.TM != TM;
+ return !(*this == PD);
}
- bool operator>(const PrototypeDescriptor &PD) const {
- return !(PD.PT <= PT && PD.VTM <= VTM && PD.TM <= TM);
+ bool operator==(const PrototypeDescriptor &PD) const {
+ return PD.PT == PT && PD.VTM == VTM && PD.TM == TM;
+ }
+ bool operator<(const PrototypeDescriptor &PD) const {
+ return std::tie(PT, VTM, TM) < std::tie(PD.PT, PD.VTM, PD.TM);
}
-
static const PrototypeDescriptor Mask;
static const PrototypeDescriptor Vector;
static const PrototypeDescriptor VL;
@@ -224,8 +230,12 @@ public:
bool isFloat(unsigned Width) const {
return isFloat() && ElementBitwidth == Width;
}
-
+ bool isConstant() const { return IsConstant; }
bool isPointer() const { return IsPointer; }
+ unsigned getElementBitwidth() const { return ElementBitwidth; }
+
+ ScalarTypeKind getScalarType() const { return ScalarType; }
+ VScaleVal getScale() const { return Scale; }
private:
// Verify RVV vector type and set Valid.
@@ -263,18 +273,6 @@ public:
PrototypeDescriptor Proto);
};
-using RISCVPredefinedMacroT = uint8_t;
-
-enum RISCVPredefinedMacro : RISCVPredefinedMacroT {
- Basic = 0,
- V = 1 << 1,
- Zvfh = 1 << 2,
- RV64 = 1 << 3,
- VectorMaxELen64 = 1 << 4,
- VectorMaxELenFp32 = 1 << 5,
- VectorMaxELenFp64 = 1 << 6,
-};
-
enum PolicyScheme : uint8_t {
SchemeNone,
HasPassthruOperand,
@@ -302,7 +300,6 @@ private:
// The types we use to obtain the specific LLVM intrinsic. They are index of
// InputTypes. -1 means the return type.
std::vector<int64_t> IntrinsicTypes;
- RISCVPredefinedMacroT RISCVPredefinedMacros = 0;
unsigned NF = 1;
public:
@@ -333,9 +330,6 @@ public:
llvm::StringRef getIRName() const { return IRName; }
llvm::StringRef getManualCodegen() const { return ManualCodegen; }
PolicyScheme getPolicyScheme() const { return Scheme; }
- RISCVPredefinedMacroT getRISCVPredefinedMacros() const {
- return RISCVPredefinedMacros;
- }
unsigned getNF() const { return NF; }
const std::vector<int64_t> &getIntrinsicTypes() const {
return IntrinsicTypes;
@@ -347,8 +341,72 @@ public:
static std::string
getSuffixStr(BasicType Type, int Log2LMUL,
llvm::ArrayRef<PrototypeDescriptor> PrototypeDescriptors);
+
+ static llvm::SmallVector<PrototypeDescriptor>
+ computeBuiltinTypes(llvm::ArrayRef<PrototypeDescriptor> Prototype,
+ bool IsMasked, bool HasMaskedOffOperand, bool HasVL,
+ unsigned NF);
+};
+
+// RVVRequire should be sync'ed with target features, but only
+// required features used in riscv_vector.td.
+enum RVVRequire : uint8_t {
+ RVV_REQ_None = 0,
+ RVV_REQ_RV64 = 1 << 0,
+ RVV_REQ_FullMultiply = 1 << 1,
+
+ LLVM_MARK_AS_BITMASK_ENUM(RVV_REQ_FullMultiply)
+};
+
+// Raw RVV intrinsic info, used to expand later.
+// This struct is highly compact for minimized code size.
+struct RVVIntrinsicRecord {
+ // Intrinsic name, e.g. vadd_vv
+ const char *Name;
+
+ // Overloaded intrinsic name, could be empty if it can be computed from Name.
+ // e.g. vadd
+ const char *OverloadedName;
+
+ // Prototype for this intrinsic, index of RVVSignatureTable.
+ uint16_t PrototypeIndex;
+
+ // Suffix of intrinsic name, index of RVVSignatureTable.
+ uint16_t SuffixIndex;
+
+ // Suffix of overloaded intrinsic name, index of RVVSignatureTable.
+ uint16_t OverloadedSuffixIndex;
+
+ // Length of the prototype.
+ uint8_t PrototypeLength;
+
+ // Length of intrinsic name suffix.
+ uint8_t SuffixLength;
+
+ // Length of overloaded intrinsic suffix.
+ uint8_t OverloadedSuffixSize;
+
+ // Required target features for this intrinsic.
+ uint8_t RequiredExtensions;
+
+ // Supported type, mask of BasicType.
+ uint8_t TypeRangeMask;
+
+ // Supported LMUL.
+ uint8_t Log2LMULMask;
+
+ // Number of fields, greater than 1 if it's segment load/store.
+ uint8_t NF;
+
+ bool HasMasked : 1;
+ bool HasVL : 1;
+ bool HasMaskedOffOperand : 1;
};
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
+ const RVVIntrinsicRecord &RVVInstrRecord);
+
+LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
} // end namespace RISCV
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRuleRequirements.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRuleRequirements.h
index 49e4a0c149f1..1a318da3acca 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRuleRequirements.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRuleRequirements.h
@@ -98,7 +98,7 @@ public:
OptionRequirement() : Opt(createRefactoringOption<OptionType>()) {}
ArrayRef<std::shared_ptr<RefactoringOption>>
- getRefactoringOptions() const final override {
+ getRefactoringOptions() const final {
return Opt;
}
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRules.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRules.h
index 86fcc6ad0a79..5cb051d53433 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRules.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRules.h
@@ -52,7 +52,7 @@ using RefactoringActionRules =
class SourceChangeRefactoringRule : public RefactoringActionRuleBase {
public:
void invoke(RefactoringResultConsumer &Consumer,
- RefactoringRuleContext &Context) final override {
+ RefactoringRuleContext &Context) final {
Expected<AtomicChanges> Changes = createSourceReplacements(Context);
if (!Changes)
Consumer.handleError(Changes.takeError());
@@ -74,7 +74,7 @@ private:
class FindSymbolOccurrencesRefactoringRule : public RefactoringActionRuleBase {
public:
void invoke(RefactoringResultConsumer &Consumer,
- RefactoringRuleContext &Context) final override {
+ RefactoringRuleContext &Context) final {
Expected<SymbolOccurrences> Occurrences = findSymbolOccurrences(Context);
if (!Occurrences)
Consumer.handleError(Occurrences.takeError());
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h
index 1575a136b11c..75bd91d15016 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h
@@ -24,7 +24,7 @@ template <typename T,
typename = std::enable_if_t<traits::IsValidOptionType<T>::value>>
class OptionalRefactoringOption : public RefactoringOption {
public:
- void passToVisitor(RefactoringOptionVisitor &Visitor) final override {
+ void passToVisitor(RefactoringOptionVisitor &Visitor) final {
Visitor.visit(*this, Value);
}
@@ -48,7 +48,7 @@ public:
const ValueType &getValue() const {
return *OptionalRefactoringOption<T>::Value;
}
- bool isRequired() const final override { return true; }
+ bool isRequired() const final { return true; }
};
} // end namespace tooling
diff --git a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
index f7e7b73d1218..0273e5068371 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
@@ -12,9 +12,9 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTImporter.h"
-#include "clang/AST/ASTImporterSharedState.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/ASTImporterSharedState.h"
#include "clang/AST/ASTStructuralEquivalence.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
@@ -34,6 +34,7 @@
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OperationKinds.h"
+#include "clang/AST/ParentMapContext.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
@@ -58,8 +59,8 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
@@ -3219,9 +3220,12 @@ Error ASTNodeImporter::ImportFunctionDeclBody(FunctionDecl *FromFD,
}
// Returns true if the given D has a DeclContext up to the TranslationUnitDecl
-// which is equal to the given DC.
+// which is equal to the given DC, or D is equal to DC.
static bool isAncestorDeclContextOf(const DeclContext *DC, const Decl *D) {
- const DeclContext *DCi = D->getDeclContext();
+ const DeclContext *DCi = dyn_cast<DeclContext>(D);
+ if (!DCi)
+ DCi = D->getDeclContext();
+ assert(DCi && "Declaration should have a context");
while (DCi != D->getTranslationUnitDecl()) {
if (DCi == DC)
return true;
@@ -3230,9 +3234,36 @@ static bool isAncestorDeclContextOf(const DeclContext *DC, const Decl *D) {
return false;
}
+// Returns true if the statement S has a parent declaration that has a
+// DeclContext that is inside (or equal to) DC. In a specific use case if DC is
+// a FunctionDecl, check if statement S resides in the body of the function.
+static bool isAncestorDeclContextOf(const DeclContext *DC, const Stmt *S) {
+ ParentMapContext &ParentC = DC->getParentASTContext().getParentMapContext();
+ DynTypedNodeList Parents = ParentC.getParents(*S);
+ while (!Parents.empty()) {
+ if (const Decl *PD = Parents.begin()->get<Decl>())
+ return isAncestorDeclContextOf(DC, PD);
+ Parents = ParentC.getParents(*Parents.begin());
+ }
+ return false;
+}
+
static bool hasTypeDeclaredInsideFunction(QualType T, const FunctionDecl *FD) {
if (T.isNull())
return false;
+
+ auto CheckTemplateArgument = [FD](const TemplateArgument &Arg) {
+ switch (Arg.getKind()) {
+ case TemplateArgument::Type:
+ return hasTypeDeclaredInsideFunction(Arg.getAsType(), FD);
+ case TemplateArgument::Expression:
+ return isAncestorDeclContextOf(FD, Arg.getAsExpr());
+ default:
+ // FIXME: Handle other argument kinds.
+ return false;
+ }
+ };
+
if (const auto *RecordT = T->getAs<RecordType>()) {
const RecordDecl *RD = RecordT->getDecl();
assert(RD);
@@ -3241,12 +3272,15 @@ static bool hasTypeDeclaredInsideFunction(QualType T, const FunctionDecl *FD) {
return true;
}
if (const auto *RDTempl = dyn_cast<ClassTemplateSpecializationDecl>(RD))
- return llvm::count_if(RDTempl->getTemplateArgs().asArray(),
- [FD](const TemplateArgument &Arg) {
- return hasTypeDeclaredInsideFunction(
- Arg.getAsType(), FD);
- });
+ if (llvm::count_if(RDTempl->getTemplateArgs().asArray(),
+ CheckTemplateArgument))
+ return true;
+ // Note: It is possible that T can be get as both a RecordType and a
+ // TemplateSpecializationType.
}
+ if (const auto *TST = T->getAs<TemplateSpecializationType>())
+ return llvm::count_if(TST->template_arguments(), CheckTemplateArgument);
+
return false;
}
diff --git a/contrib/llvm-project/clang/lib/AST/Decl.cpp b/contrib/llvm-project/clang/lib/AST/Decl.cpp
index 8580cc639d2d..2f3fd34d511c 100644
--- a/contrib/llvm-project/clang/lib/AST/Decl.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Decl.cpp
@@ -394,7 +394,6 @@ void LinkageComputer::mergeTemplateLV(
shouldConsiderTemplateVisibility(fn, specInfo);
FunctionTemplateDecl *temp = specInfo->getTemplate();
-
// Merge information from the template declaration.
LinkageInfo tempLV = getLVForDecl(temp, computation);
// The linkage of the specialization should be consistent with the
@@ -468,11 +467,16 @@ void LinkageComputer::mergeTemplateLV(
// Merge information from the template parameters, but ignore
// visibility if we're only considering template arguments.
-
ClassTemplateDecl *temp = spec->getSpecializedTemplate();
- LinkageInfo tempLV =
+ // Merge information from the template declaration.
+ LinkageInfo tempLV = getLVForDecl(temp, computation);
+ // The linkage of the specialization should be consistent with the
+ // template declaration.
+ LV.setLinkage(tempLV.getLinkage());
+
+ LinkageInfo paramsLV =
getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
- LV.mergeMaybeWithVisibility(tempLV,
+ LV.mergeMaybeWithVisibility(paramsLV,
considerVisibility && !hasExplicitVisibilityAlready(computation));
// Merge information from the template arguments. We ignore
@@ -520,7 +524,6 @@ void LinkageComputer::mergeTemplateLV(LinkageInfo &LV,
// Merge information from the template parameters, but ignore
// visibility if we're only considering template arguments.
-
VarTemplateDecl *temp = spec->getSpecializedTemplate();
LinkageInfo tempLV =
getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
@@ -1077,7 +1080,6 @@ LinkageComputer::getLVForClassMember(const NamedDecl *D,
// Finally, merge in information from the class.
LV.mergeMaybeWithVisibility(classLV, considerClassVisibility);
-
return LV;
}
diff --git a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
index cd14ff4fb970..01d80dd49e10 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
@@ -5269,10 +5269,14 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
}
}
bool Cond;
- if (IS->isConsteval())
+ if (IS->isConsteval()) {
Cond = IS->isNonNegatedConsteval();
- else if (!EvaluateCond(Info, IS->getConditionVariable(), IS->getCond(),
- Cond))
+ // If we are not in a constant context, if consteval should not evaluate
+ // to true.
+ if (!Info.InConstantContext)
+ Cond = !Cond;
+ } else if (!EvaluateCond(Info, IS->getConditionVariable(), IS->getCond(),
+ Cond))
return ESR_Failed;
if (const Stmt *SubStmt = Cond ? IS->getThen() : IS->getElse()) {
diff --git a/contrib/llvm-project/clang/lib/Analysis/CFG.cpp b/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
index 614d94ae31a6..84178ff488a5 100644
--- a/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
@@ -1659,9 +1659,13 @@ CFGBlock *CFGBuilder::addInitializer(CXXCtorInitializer *I) {
appendInitializer(Block, I);
if (Init) {
+ // If the initializer is an ArrayInitLoopExpr, we want to extract the
+ // initializer, that's used for each element.
+ const auto *AILE = dyn_cast_or_null<ArrayInitLoopExpr>(Init);
+
findConstructionContexts(
ConstructionContextLayer::create(cfg->getBumpVectorContext(), I),
- Init);
+ AILE ? AILE->getSubExpr() : Init);
if (HasTemporaries) {
// For expression with temporaries go directly to subexpression to omit
@@ -2928,12 +2932,30 @@ CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) {
}
}
+ // If we bind to a tuple-like type, we iterate over the HoldingVars, and
+ // create a DeclStmt for each of them.
+ if (const auto *DD = dyn_cast<DecompositionDecl>(VD)) {
+ for (auto BD : llvm::reverse(DD->bindings())) {
+ if (auto *VD = BD->getHoldingVar()) {
+ DeclGroupRef DG(VD);
+ DeclStmt *DSNew =
+ new (Context) DeclStmt(DG, VD->getLocation(), GetEndLoc(VD));
+ cfg->addSyntheticDeclStmt(DSNew, DS);
+ Block = VisitDeclSubExpr(DSNew);
+ }
+ }
+ }
+
autoCreateBlock();
appendStmt(Block, DS);
+ // If the initializer is an ArrayInitLoopExpr, we want to extract the
+ // initializer, that's used for each element.
+ const auto *AILE = dyn_cast_or_null<ArrayInitLoopExpr>(Init);
+
findConstructionContexts(
ConstructionContextLayer::create(cfg->getBumpVectorContext(), DS),
- Init);
+ AILE ? AILE->getSubExpr() : Init);
// Keep track of the last non-null block, as 'Block' can be nulled out
// if the initializer expression is something like a 'while' in a
@@ -3340,9 +3362,20 @@ CFGBlock *CFGBuilder::VisitBlockExpr(BlockExpr *E, AddStmtChoice asc) {
CFGBlock *CFGBuilder::VisitLambdaExpr(LambdaExpr *E, AddStmtChoice asc) {
CFGBlock *LastBlock = VisitNoRecurse(E, asc);
+
+ unsigned Idx = 0;
for (LambdaExpr::capture_init_iterator it = E->capture_init_begin(),
- et = E->capture_init_end(); it != et; ++it) {
+ et = E->capture_init_end();
+ it != et; ++it, ++Idx) {
if (Expr *Init = *it) {
+ // If the initializer is an ArrayInitLoopExpr, we want to extract the
+ // initializer, that's used for each element.
+ const auto *AILE = dyn_cast_or_null<ArrayInitLoopExpr>(Init);
+
+ findConstructionContexts(ConstructionContextLayer::create(
+ cfg->getBumpVectorContext(), {E, Idx}),
+ AILE ? AILE->getSubExpr() : Init);
+
CFGBlock *Tmp = Visit(Init);
if (Tmp)
LastBlock = Tmp;
@@ -5616,6 +5649,12 @@ static void print_construction_context(raw_ostream &OS,
Stmts.push_back(TOCC->getConstructorAfterElision());
break;
}
+ case ConstructionContext::LambdaCaptureKind: {
+ const auto *LCC = cast<LambdaCaptureConstructionContext>(CC);
+ Helper.handledStmt(const_cast<LambdaExpr *>(LCC->getLambdaExpr()), OS);
+ OS << "+" << LCC->getIndex();
+ return;
+ }
case ConstructionContext::ArgumentKind: {
const auto *ACC = cast<ArgumentConstructionContext>(CC);
if (const Stmt *BTE = ACC->getCXXBindTemporaryExpr()) {
diff --git a/contrib/llvm-project/clang/lib/Analysis/ConstructionContext.cpp b/contrib/llvm-project/clang/lib/Analysis/ConstructionContext.cpp
index 6ba1e2173d2c..8a862c06f13a 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ConstructionContext.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ConstructionContext.cpp
@@ -156,6 +156,12 @@ const ConstructionContext *ConstructionContext::createBoundTemporaryFromLayers(
return create<CXX17ElidedCopyConstructorInitializerConstructionContext>(
C, I, BTE);
}
+ case ConstructionContextItem::LambdaCaptureKind: {
+ assert(ParentLayer->isLast());
+ const auto *E = cast<LambdaExpr>(ParentItem.getStmt());
+ return create<LambdaCaptureConstructionContext>(C, E,
+ ParentItem.getIndex());
+ }
} // switch (ParentItem.getKind())
llvm_unreachable("Unexpected construction context with destructor!");
@@ -200,6 +206,11 @@ const ConstructionContext *ConstructionContext::createFromLayers(
case ConstructionContextItem::ElidableConstructorKind: {
llvm_unreachable("The argument needs to be materialized first!");
}
+ case ConstructionContextItem::LambdaCaptureKind: {
+ assert(TopLayer->isLast());
+ const auto *E = cast<LambdaExpr>(TopItem.getStmt());
+ return create<LambdaCaptureConstructionContext>(C, E, TopItem.getIndex());
+ }
case ConstructionContextItem::InitializerKind: {
assert(TopLayer->isLast());
const CXXCtorInitializer *I = TopItem.getCXXCtorInitializer();
diff --git a/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp b/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp
index e3bb902b1fe9..c876eaa6358a 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp
@@ -455,14 +455,16 @@ const Stmt *ExprMutationAnalyzer::findRangeLoopMutation(const Expr *Exp) {
// array is considered modified if the loop-variable is a non-const reference.
const auto DeclStmtToNonRefToArray = declStmt(hasSingleDecl(varDecl(hasType(
hasUnqualifiedDesugaredType(referenceType(pointee(arrayType())))))));
- const auto RefToArrayRefToElements = match(
- findAll(stmt(cxxForRangeStmt(
- hasLoopVariable(varDecl(hasType(nonConstReferenceType()))
- .bind(NodeID<Decl>::value)),
- hasRangeStmt(DeclStmtToNonRefToArray),
- hasRangeInit(canResolveToExpr(equalsNode(Exp)))))
- .bind("stmt")),
- Stm, Context);
+ const auto RefToArrayRefToElements =
+ match(findAll(stmt(cxxForRangeStmt(
+ hasLoopVariable(
+ varDecl(anyOf(hasType(nonConstReferenceType()),
+ hasType(nonConstPointerType())))
+ .bind(NodeID<Decl>::value)),
+ hasRangeStmt(DeclStmtToNonRefToArray),
+ hasRangeInit(canResolveToExpr(equalsNode(Exp)))))
+ .bind("stmt")),
+ Stm, Context);
if (const auto *BadRangeInitFromArray =
selectFirst<Stmt>("stmt", RefToArrayRefToElements))
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp
index 5105999741e6..216f41bdee1c 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp
@@ -113,16 +113,27 @@ BoolValue &DataflowAnalysisContext::getOrCreateNegation(BoolValue &Val) {
BoolValue &DataflowAnalysisContext::getOrCreateImplication(BoolValue &LHS,
BoolValue &RHS) {
- return &LHS == &RHS ? getBoolLiteralValue(true)
- : getOrCreateDisjunction(getOrCreateNegation(LHS), RHS);
+ if (&LHS == &RHS)
+ return getBoolLiteralValue(true);
+
+ auto Res = ImplicationVals.try_emplace(std::make_pair(&LHS, &RHS), nullptr);
+ if (Res.second)
+ Res.first->second =
+ &takeOwnership(std::make_unique<ImplicationValue>(LHS, RHS));
+ return *Res.first->second;
}
BoolValue &DataflowAnalysisContext::getOrCreateIff(BoolValue &LHS,
BoolValue &RHS) {
- return &LHS == &RHS
- ? getBoolLiteralValue(true)
- : getOrCreateConjunction(getOrCreateImplication(LHS, RHS),
- getOrCreateImplication(RHS, LHS));
+ if (&LHS == &RHS)
+ return getBoolLiteralValue(true);
+
+ auto Res = BiconditionalVals.try_emplace(makeCanonicalBoolValuePair(LHS, RHS),
+ nullptr);
+ if (Res.second)
+ Res.first->second =
+ &takeOwnership(std::make_unique<BiconditionalValue>(LHS, RHS));
+ return *Res.first->second;
}
AtomicBoolValue &DataflowAnalysisContext::makeFlowConditionToken() {
@@ -199,18 +210,18 @@ void DataflowAnalysisContext::addTransitiveFlowConditionConstraints(
if (!Res.second)
return;
- auto ConstraintsIT = FlowConditionConstraints.find(&Token);
- if (ConstraintsIT == FlowConditionConstraints.end()) {
+ auto ConstraintsIt = FlowConditionConstraints.find(&Token);
+ if (ConstraintsIt == FlowConditionConstraints.end()) {
Constraints.insert(&Token);
} else {
// Bind flow condition token via `iff` to its set of constraints:
// FC <=> (C1 ^ C2 ^ ...), where Ci are constraints
- Constraints.insert(&getOrCreateIff(Token, *ConstraintsIT->second));
+ Constraints.insert(&getOrCreateIff(Token, *ConstraintsIt->second));
}
- auto DepsIT = FlowConditionDeps.find(&Token);
- if (DepsIT != FlowConditionDeps.end()) {
- for (AtomicBoolValue *DepToken : DepsIT->second) {
+ auto DepsIt = FlowConditionDeps.find(&Token);
+ if (DepsIt != FlowConditionDeps.end()) {
+ for (AtomicBoolValue *DepToken : DepsIt->second) {
addTransitiveFlowConditionConstraints(*DepToken, Constraints,
VisitedTokens);
}
@@ -220,10 +231,10 @@ void DataflowAnalysisContext::addTransitiveFlowConditionConstraints(
BoolValue &DataflowAnalysisContext::substituteBoolValue(
BoolValue &Val,
llvm::DenseMap<BoolValue *, BoolValue *> &SubstitutionsCache) {
- auto IT = SubstitutionsCache.find(&Val);
- if (IT != SubstitutionsCache.end()) {
+ auto It = SubstitutionsCache.find(&Val);
+ if (It != SubstitutionsCache.end()) {
// Return memoized result of substituting this boolean value.
- return *IT->second;
+ return *It->second;
}
// Handle substitution on the boolean value (and its subvalues), saving the
@@ -258,6 +269,24 @@ BoolValue &DataflowAnalysisContext::substituteBoolValue(
Result = &getOrCreateConjunction(LeftSub, RightSub);
break;
}
+ case Value::Kind::Implication: {
+ auto &IV = *cast<ImplicationValue>(&Val);
+ auto &LeftSub =
+ substituteBoolValue(IV.getLeftSubValue(), SubstitutionsCache);
+ auto &RightSub =
+ substituteBoolValue(IV.getRightSubValue(), SubstitutionsCache);
+ Result = &getOrCreateImplication(LeftSub, RightSub);
+ break;
+ }
+ case Value::Kind::Biconditional: {
+ auto &BV = *cast<BiconditionalValue>(&Val);
+ auto &LeftSub =
+ substituteBoolValue(BV.getLeftSubValue(), SubstitutionsCache);
+ auto &RightSub =
+ substituteBoolValue(BV.getRightSubValue(), SubstitutionsCache);
+ Result = &getOrCreateIff(LeftSub, RightSub);
+ break;
+ }
default:
llvm_unreachable("Unhandled Value Kind");
}
@@ -280,19 +309,19 @@ BoolValue &DataflowAnalysisContext::buildAndSubstituteFlowCondition(
BoolValue &DataflowAnalysisContext::buildAndSubstituteFlowConditionWithCache(
AtomicBoolValue &Token,
llvm::DenseMap<BoolValue *, BoolValue *> &SubstitutionsCache) {
- auto ConstraintsIT = FlowConditionConstraints.find(&Token);
- if (ConstraintsIT == FlowConditionConstraints.end()) {
+ auto ConstraintsIt = FlowConditionConstraints.find(&Token);
+ if (ConstraintsIt == FlowConditionConstraints.end()) {
return getBoolLiteralValue(true);
}
- auto DepsIT = FlowConditionDeps.find(&Token);
- if (DepsIT != FlowConditionDeps.end()) {
- for (AtomicBoolValue *DepToken : DepsIT->second) {
+ auto DepsIt = FlowConditionDeps.find(&Token);
+ if (DepsIt != FlowConditionDeps.end()) {
+ for (AtomicBoolValue *DepToken : DepsIt->second) {
auto &NewDep = buildAndSubstituteFlowConditionWithCache(
*DepToken, SubstitutionsCache);
SubstitutionsCache[DepToken] = &NewDep;
}
}
- return substituteBoolValue(*ConstraintsIT->second, SubstitutionsCache);
+ return substituteBoolValue(*ConstraintsIt->second, SubstitutionsCache);
}
void DataflowAnalysisContext::dumpFlowCondition(AtomicBoolValue &Token) {
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
index 2b6cd0c4f857..f6f71e34b892 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
@@ -200,6 +200,42 @@ Environment::Environment(DataflowAnalysisContext &DACtx,
}
}
+Environment Environment::pushCall(const CallExpr *Call) const {
+ Environment Env(*this);
+
+ // FIXME: Currently this only works if the callee is never a method and the
+ // same callee is never analyzed from multiple separate callsites. To
+ // generalize this, we'll need to store a "context" field (probably a stack of
+ // `const CallExpr *`s) in the `Environment`, and then change the
+ // `DataflowAnalysisContext` class to hold a map from contexts to "frames",
+ // where each frame stores its own version of what are currently the
+ // `DeclToLoc`, `ExprToLoc`, and `ThisPointeeLoc` fields.
+
+ const auto *FuncDecl = Call->getDirectCallee();
+ assert(FuncDecl != nullptr);
+ assert(FuncDecl->getBody() != nullptr);
+ // FIXME: In order to allow the callee to reference globals, we probably need
+ // to call `initGlobalVars` here in some way.
+
+ auto ParamIt = FuncDecl->param_begin();
+ auto ArgIt = Call->arg_begin();
+ auto ArgEnd = Call->arg_end();
+
+ // FIXME: Parameters don't always map to arguments 1:1; examples include
+ // overloaded operators implemented as member functions, and parameter packs.
+ for (; ArgIt != ArgEnd; ++ParamIt, ++ArgIt) {
+ assert(ParamIt != FuncDecl->param_end());
+
+ const VarDecl *Param = *ParamIt;
+ const Expr *Arg = *ArgIt;
+ auto *ArgLoc = Env.getStorageLocation(*Arg, SkipPast::Reference);
+ assert(ArgLoc != nullptr);
+ Env.setStorageLocation(*Param, *ArgLoc);
+ }
+
+ return Env;
+}
+
bool Environment::equivalentTo(const Environment &Other,
Environment::ValueModel &Model) const {
assert(DACtx == Other.DACtx);
@@ -352,16 +388,16 @@ void Environment::setValue(const StorageLocation &Loc, Value &Val) {
}
}
- auto IT = MemberLocToStruct.find(&Loc);
- if (IT != MemberLocToStruct.end()) {
+ auto It = MemberLocToStruct.find(&Loc);
+ if (It != MemberLocToStruct.end()) {
// `Loc` is the location of a struct member so we need to also update the
// value of the member in the corresponding `StructValue`.
- assert(IT->second.first != nullptr);
- StructValue &StructVal = *IT->second.first;
+ assert(It->second.first != nullptr);
+ StructValue &StructVal = *It->second.first;
- assert(IT->second.second != nullptr);
- const ValueDecl &Member = *IT->second.second;
+ assert(It->second.second != nullptr);
+ const ValueDecl &Member = *It->second.second;
StructVal.setChild(Member, Val);
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DebugSupport.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DebugSupport.cpp
index 309ff0682f50..714ad08643ed 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DebugSupport.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DebugSupport.cpp
@@ -96,6 +96,20 @@ public:
S = formatv("(not\n{0})", debugString(N.getSubVal(), Depth + 1));
break;
}
+ case Value::Kind::Implication: {
+ auto &IV = cast<ImplicationValue>(B);
+ auto L = debugString(IV.getLeftSubValue(), Depth + 1);
+ auto R = debugString(IV.getRightSubValue(), Depth + 1);
+ S = formatv("(=>\n{0}\n{1})", L, R);
+ break;
+ }
+ case Value::Kind::Biconditional: {
+ auto &BV = cast<BiconditionalValue>(B);
+ auto L = debugString(BV.getLeftSubValue(), Depth + 1);
+ auto R = debugString(BV.getRightSubValue(), Depth + 1);
+ S = formatv("(=\n{0}\n{1})", L, R);
+ break;
+ }
default:
llvm_unreachable("Unhandled value kind");
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp
index 500e1a7a9390..bbf7526adce9 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp
@@ -20,7 +20,9 @@
#include "clang/AST/OperationKinds.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/NoopAnalysis.h"
#include "clang/Analysis/FlowSensitive/Value.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/OperatorKinds.h"
@@ -46,8 +48,9 @@ static BoolValue &evaluateBooleanEquality(const Expr &LHS, const Expr &RHS,
class TransferVisitor : public ConstStmtVisitor<TransferVisitor> {
public:
- TransferVisitor(const StmtToEnvMap &StmtToEnv, Environment &Env)
- : StmtToEnv(StmtToEnv), Env(Env) {}
+ TransferVisitor(const StmtToEnvMap &StmtToEnv, Environment &Env,
+ TransferOptions Options)
+ : StmtToEnv(StmtToEnv), Env(Env), Options(Options) {}
void VisitBinaryOperator(const BinaryOperator *S) {
const Expr *LHS = S->getLHS();
@@ -503,6 +506,35 @@ public:
if (ArgLoc == nullptr)
return;
Env.setStorageLocation(*S, *ArgLoc);
+ } else if (const FunctionDecl *F = S->getDirectCallee()) {
+ // This case is for context-sensitive analysis, which we only do if we
+ // have the callee body available in the translation unit.
+ if (!Options.ContextSensitive || F->getBody() == nullptr)
+ return;
+
+ auto &ASTCtx = F->getASTContext();
+
+ // FIXME: Cache these CFGs.
+ auto CFCtx = ControlFlowContext::build(F, F->getBody(), &ASTCtx);
+ // FIXME: Handle errors here and below.
+ assert(CFCtx);
+ auto ExitBlock = CFCtx->getCFG().getExit().getBlockID();
+
+ auto CalleeEnv = Env.pushCall(S);
+
+ // FIXME: Use the same analysis as the caller for the callee.
+ DataflowAnalysisOptions Options;
+ auto Analysis = NoopAnalysis(ASTCtx, Options);
+
+ auto BlockToOutputState =
+ dataflow::runDataflowAnalysis(*CFCtx, Analysis, CalleeEnv);
+ assert(BlockToOutputState);
+ assert(ExitBlock < BlockToOutputState->size());
+
+ auto ExitState = (*BlockToOutputState)[ExitBlock];
+ assert(ExitState);
+
+ Env = ExitState->Env;
}
}
@@ -564,11 +596,11 @@ public:
Env.setValue(Loc, *Val);
if (Type->isStructureOrClassType()) {
- for (auto IT : llvm::zip(Type->getAsRecordDecl()->fields(), S->inits())) {
- const FieldDecl *Field = std::get<0>(IT);
+ for (auto It : llvm::zip(Type->getAsRecordDecl()->fields(), S->inits())) {
+ const FieldDecl *Field = std::get<0>(It);
assert(Field != nullptr);
- const Expr *Init = std::get<1>(IT);
+ const Expr *Init = std::get<1>(It);
assert(Init != nullptr);
if (Value *InitVal = Env.getValue(*Init, SkipPast::None))
@@ -633,10 +665,12 @@ private:
const StmtToEnvMap &StmtToEnv;
Environment &Env;
+ TransferOptions Options;
};
-void transfer(const StmtToEnvMap &StmtToEnv, const Stmt &S, Environment &Env) {
- TransferVisitor(StmtToEnv, Env).Visit(&S);
+void transfer(const StmtToEnvMap &StmtToEnv, const Stmt &S, Environment &Env,
+ TransferOptions Options) {
+ TransferVisitor(StmtToEnv, Env, Options).Visit(&S);
}
} // namespace dataflow
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
index 6ce9dd55914d..fbb521763ee6 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
@@ -47,9 +47,9 @@ public:
: CFCtx(CFCtx), BlockToState(BlockToState) {}
const Environment *getEnvironment(const Stmt &S) const override {
- auto BlockIT = CFCtx.getStmtToBlock().find(&ignoreCFGOmittedNodes(S));
- assert(BlockIT != CFCtx.getStmtToBlock().end());
- const auto &State = BlockToState[BlockIT->getSecond()->getBlockID()];
+ auto BlockIt = CFCtx.getStmtToBlock().find(&ignoreCFGOmittedNodes(S));
+ assert(BlockIt != CFCtx.getStmtToBlock().end());
+ const auto &State = BlockToState[BlockIt->getSecond()->getBlockID()];
assert(State);
return &State.value().Env;
}
@@ -74,8 +74,9 @@ static int blockIndexInPredecessor(const CFGBlock &Pred,
class TerminatorVisitor : public ConstStmtVisitor<TerminatorVisitor> {
public:
TerminatorVisitor(const StmtToEnvMap &StmtToEnv, Environment &Env,
- int BlockSuccIdx)
- : StmtToEnv(StmtToEnv), Env(Env), BlockSuccIdx(BlockSuccIdx) {}
+ int BlockSuccIdx, TransferOptions TransferOpts)
+ : StmtToEnv(StmtToEnv), Env(Env), BlockSuccIdx(BlockSuccIdx),
+ TransferOpts(TransferOpts) {}
void VisitIfStmt(const IfStmt *S) {
auto *Cond = S->getCond();
@@ -118,7 +119,7 @@ private:
void extendFlowCondition(const Expr &Cond) {
// The terminator sub-expression might not be evaluated.
if (Env.getStorageLocation(Cond, SkipPast::None) == nullptr)
- transfer(StmtToEnv, Cond, Env);
+ transfer(StmtToEnv, Cond, Env, TransferOpts);
// FIXME: The flow condition must be an r-value, so `SkipPast::None` should
// suffice.
@@ -150,6 +151,7 @@ private:
const StmtToEnvMap &StmtToEnv;
Environment &Env;
int BlockSuccIdx;
+ TransferOptions TransferOpts;
};
/// Computes the input state for a given basic block by joining the output
@@ -217,7 +219,8 @@ static TypeErasedDataflowAnalysisState computeBlockInputState(
if (const Stmt *PredTerminatorStmt = Pred->getTerminatorStmt()) {
const StmtToEnvMapImpl StmtToEnv(CFCtx, BlockStates);
TerminatorVisitor(StmtToEnv, PredState.Env,
- blockIndexInPredecessor(*Pred, Block))
+ blockIndexInPredecessor(*Pred, Block),
+ Analysis.builtinTransferOptions())
.Visit(PredTerminatorStmt);
}
}
@@ -253,7 +256,8 @@ static void transferCFGStmt(
assert(S != nullptr);
if (Analysis.applyBuiltinTransfer())
- transfer(StmtToEnvMapImpl(CFCtx, BlockStates), *S, State.Env);
+ transfer(StmtToEnvMapImpl(CFCtx, BlockStates), *S, State.Env,
+ Analysis.builtinTransferOptions());
Analysis.transferTypeErased(S, State.Lattice, State.Env);
if (HandleTransferredStmt != nullptr)
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
index 6a3948bd1fea..cd1fd708a43a 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
@@ -221,6 +221,18 @@ BooleanFormula buildBooleanFormula(const llvm::DenseSet<BoolValue *> &Vals) {
UnprocessedSubVals.push(&N->getSubVal());
break;
}
+ case Value::Kind::Implication: {
+ auto *I = cast<ImplicationValue>(Val);
+ UnprocessedSubVals.push(&I->getLeftSubValue());
+ UnprocessedSubVals.push(&I->getRightSubValue());
+ break;
+ }
+ case Value::Kind::Biconditional: {
+ auto *B = cast<BiconditionalValue>(Val);
+ UnprocessedSubVals.push(&B->getLeftSubValue());
+ UnprocessedSubVals.push(&B->getRightSubValue());
+ break;
+ }
case Value::Kind::AtomicBool: {
Atomics[Var] = cast<AtomicBoolValue>(Val);
break;
@@ -263,30 +275,52 @@ BooleanFormula buildBooleanFormula(const llvm::DenseSet<BoolValue *> &Vals) {
const Variable LeftSubVar = GetVar(&C->getLeftSubValue());
const Variable RightSubVar = GetVar(&C->getRightSubValue());
- // `X <=> (A ^ B)` is equivalent to `(!X v A) ^ (!X v B) ^ (X v !A v !B)`
- // which is already in conjunctive normal form. Below we add each of the
- // conjuncts of the latter expression to the result.
- Formula.addClause(negLit(Var), posLit(LeftSubVar));
- Formula.addClause(negLit(Var), posLit(RightSubVar));
- Formula.addClause(posLit(Var), negLit(LeftSubVar), negLit(RightSubVar));
+ if (LeftSubVar == RightSubVar) {
+ // `X <=> (A ^ A)` is equivalent to `(!X v A) ^ (X v !A)` which is
+ // already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ Formula.addClause(negLit(Var), posLit(LeftSubVar));
+ Formula.addClause(posLit(Var), negLit(LeftSubVar));
- // Visit the sub-values of `Val`.
- UnprocessedSubVals.push(&C->getLeftSubValue());
- UnprocessedSubVals.push(&C->getRightSubValue());
+ // Visit a sub-value of `Val` (pick any, they are identical).
+ UnprocessedSubVals.push(&C->getLeftSubValue());
+ } else {
+ // `X <=> (A ^ B)` is equivalent to `(!X v A) ^ (!X v B) ^ (X v !A v !B)`
+ // which is already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ Formula.addClause(negLit(Var), posLit(LeftSubVar));
+ Formula.addClause(negLit(Var), posLit(RightSubVar));
+ Formula.addClause(posLit(Var), negLit(LeftSubVar), negLit(RightSubVar));
+
+ // Visit the sub-values of `Val`.
+ UnprocessedSubVals.push(&C->getLeftSubValue());
+ UnprocessedSubVals.push(&C->getRightSubValue());
+ }
} else if (auto *D = dyn_cast<DisjunctionValue>(Val)) {
const Variable LeftSubVar = GetVar(&D->getLeftSubValue());
const Variable RightSubVar = GetVar(&D->getRightSubValue());
- // `X <=> (A v B)` is equivalent to `(!X v A v B) ^ (X v !A) ^ (X v !B)`
- // which is already in conjunctive normal form. Below we add each of the
- // conjuncts of the latter expression to the result.
- Formula.addClause(negLit(Var), posLit(LeftSubVar), posLit(RightSubVar));
- Formula.addClause(posLit(Var), negLit(LeftSubVar));
- Formula.addClause(posLit(Var), negLit(RightSubVar));
+ if (LeftSubVar == RightSubVar) {
+ // `X <=> (A v A)` is equivalent to `(!X v A) ^ (X v !A)` which is
+ // already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ Formula.addClause(negLit(Var), posLit(LeftSubVar));
+ Formula.addClause(posLit(Var), negLit(LeftSubVar));
- // Visit the sub-values of `Val`.
- UnprocessedSubVals.push(&D->getLeftSubValue());
- UnprocessedSubVals.push(&D->getRightSubValue());
+ // Visit a sub-value of `Val` (pick any, they are identical).
+ UnprocessedSubVals.push(&D->getLeftSubValue());
+ } else {
+ // `X <=> (A v B)` is equivalent to `(!X v A v B) ^ (X v !A) ^ (X v !B)`
+ // which is already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ Formula.addClause(negLit(Var), posLit(LeftSubVar), posLit(RightSubVar));
+ Formula.addClause(posLit(Var), negLit(LeftSubVar));
+ Formula.addClause(posLit(Var), negLit(RightSubVar));
+
+ // Visit the sub-values of `Val`.
+ UnprocessedSubVals.push(&D->getLeftSubValue());
+ UnprocessedSubVals.push(&D->getRightSubValue());
+ }
} else if (auto *N = dyn_cast<NegationValue>(Val)) {
const Variable SubVar = GetVar(&N->getSubVal());
@@ -298,6 +332,46 @@ BooleanFormula buildBooleanFormula(const llvm::DenseSet<BoolValue *> &Vals) {
// Visit the sub-values of `Val`.
UnprocessedSubVals.push(&N->getSubVal());
+ } else if (auto *I = dyn_cast<ImplicationValue>(Val)) {
+ const Variable LeftSubVar = GetVar(&I->getLeftSubValue());
+ const Variable RightSubVar = GetVar(&I->getRightSubValue());
+
+ // `X <=> (A => B)` is equivalent to
+ // `(X v A) ^ (X v !B) ^ (!X v !A v B)` which is already in
+ // conjunctive normal form. Below we add each of the conjuncts of the
+ // latter expression to the result.
+ Formula.addClause(posLit(Var), posLit(LeftSubVar));
+ Formula.addClause(posLit(Var), negLit(RightSubVar));
+ Formula.addClause(negLit(Var), negLit(LeftSubVar), posLit(RightSubVar));
+
+ // Visit the sub-values of `Val`.
+ UnprocessedSubVals.push(&I->getLeftSubValue());
+ UnprocessedSubVals.push(&I->getRightSubValue());
+ } else if (auto *B = dyn_cast<BiconditionalValue>(Val)) {
+ const Variable LeftSubVar = GetVar(&B->getLeftSubValue());
+ const Variable RightSubVar = GetVar(&B->getRightSubValue());
+
+ if (LeftSubVar == RightSubVar) {
+ // `X <=> (A <=> A)` is equvalent to `X` which is already in
+ // conjunctive normal form. Below we add each of the conjuncts of the
+ // latter expression to the result.
+ Formula.addClause(posLit(Var));
+
+ // No need to visit the sub-values of `Val`.
+ } else {
+ // `X <=> (A <=> B)` is equivalent to
+ // `(X v A v B) ^ (X v !A v !B) ^ (!X v A v !B) ^ (!X v !A v B)` which is
+ // already in conjunctive normal form. Below we add each of the conjuncts
+ // of the latter expression to the result.
+ Formula.addClause(posLit(Var), posLit(LeftSubVar), posLit(RightSubVar));
+ Formula.addClause(posLit(Var), negLit(LeftSubVar), negLit(RightSubVar));
+ Formula.addClause(negLit(Var), posLit(LeftSubVar), negLit(RightSubVar));
+ Formula.addClause(negLit(Var), negLit(LeftSubVar), posLit(RightSubVar));
+
+ // Visit the sub-values of `Val`.
+ UnprocessedSubVals.push(&B->getLeftSubValue());
+ UnprocessedSubVals.push(&B->getRightSubValue());
+ }
}
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/LiveVariables.cpp b/contrib/llvm-project/clang/lib/Analysis/LiveVariables.cpp
index 6c601c290c92..ff7f3ebe28f8 100644
--- a/contrib/llvm-project/clang/lib/Analysis/LiveVariables.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/LiveVariables.cpp
@@ -72,6 +72,11 @@ bool LiveVariables::LivenessValues::isLive(const VarDecl *D) const {
bool alive = false;
for (const BindingDecl *BD : DD->bindings())
alive |= liveBindings.contains(BD);
+
+ // Note: the only known case this condition is necessary, is when a bindig
+ // to a tuple-like structure is created. The HoldingVar initializers have a
+ // DeclRefExpr to the DecompositionDecl.
+ alive |= liveDecls.contains(DD);
return alive;
}
return liveDecls.contains(D);
@@ -343,8 +348,12 @@ void TransferFunctions::VisitBinaryOperator(BinaryOperator *B) {
if (const BindingDecl* BD = dyn_cast<BindingDecl>(D)) {
Killed = !BD->getType()->isReferenceType();
- if (Killed)
+ if (Killed) {
+ if (const auto *HV = BD->getHoldingVar())
+ val.liveDecls = LV.DSetFact.remove(val.liveDecls, HV);
+
val.liveBindings = LV.BSetFact.remove(val.liveBindings, BD);
+ }
} else if (const auto *VD = dyn_cast<VarDecl>(D)) {
Killed = writeShouldKill(VD);
if (Killed)
@@ -371,8 +380,12 @@ void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *DR) {
const Decl* D = DR->getDecl();
bool InAssignment = LV.inAssignment[DR];
if (const auto *BD = dyn_cast<BindingDecl>(D)) {
- if (!InAssignment)
+ if (!InAssignment) {
+ if (const auto *HV = BD->getHoldingVar())
+ val.liveDecls = LV.DSetFact.add(val.liveDecls, HV);
+
val.liveBindings = LV.BSetFact.add(val.liveBindings, BD);
+ }
} else if (const auto *VD = dyn_cast<VarDecl>(D)) {
if (!InAssignment && !isAlwaysAlive(VD))
val.liveDecls = LV.DSetFact.add(val.liveDecls, VD);
@@ -382,8 +395,16 @@ void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *DR) {
void TransferFunctions::VisitDeclStmt(DeclStmt *DS) {
for (const auto *DI : DS->decls()) {
if (const auto *DD = dyn_cast<DecompositionDecl>(DI)) {
- for (const auto *BD : DD->bindings())
+ for (const auto *BD : DD->bindings()) {
+ if (const auto *HV = BD->getHoldingVar())
+ val.liveDecls = LV.DSetFact.remove(val.liveDecls, HV);
+
val.liveBindings = LV.BSetFact.remove(val.liveBindings, BD);
+ }
+
+ // When a bindig to a tuple-like structure is created, the HoldingVar
+ // initializers have a DeclRefExpr to the DecompositionDecl.
+ val.liveDecls = LV.DSetFact.remove(val.liveDecls, DD);
} else if (const auto *VD = dyn_cast<VarDecl>(DI)) {
if (!isAlwaysAlive(VD))
val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD);
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h b/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h
index 7e932e7c86b1..6edd035d9eb8 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h
@@ -71,7 +71,7 @@ public:
bool isValidCPUName(StringRef Name) const override;
- virtual unsigned getMinGlobalAlign(uint64_t) const override;
+ unsigned getMinGlobalAlign(uint64_t) const override;
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
index 9120808e298d..ca01b44ae3a5 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
@@ -849,6 +849,9 @@ void PPCTargetInfo::adjust(DiagnosticsEngine &Diags, LangOptions &Opts) {
? &llvm::APFloat::IEEEquad()
: &llvm::APFloat::PPCDoubleDouble();
Opts.IEEE128 = 1;
+ if (getTriple().isOSAIX() && Opts.EnableAIXQuadwordAtomicsABI &&
+ HasQuadwordAtomics)
+ MaxAtomicInlineWidth = 128;
}
ArrayRef<Builtin::Info> PPCTargetInfo::getTargetBuiltins() const {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/X86.h b/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
index 3caf5256118e..224145f4b020 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
@@ -229,14 +229,14 @@ public:
bool validateInputSize(const llvm::StringMap<bool> &FeatureMap,
StringRef Constraint, unsigned Size) const override;
- virtual bool
+ bool
checkCFProtectionReturnSupported(DiagnosticsEngine &Diags) const override {
if (CPU == llvm::X86::CK_None || CPU >= llvm::X86::CK_PentiumPro)
return true;
return TargetInfo::checkCFProtectionReturnSupported(Diags);
};
- virtual bool
+ bool
checkCFProtectionBranchSupported(DiagnosticsEngine &Diags) const override {
if (CPU == llvm::X86::CK_None || CPU >= llvm::X86::CK_PentiumPro)
return true;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
index fe6cc7a2b1c7..c1eb8a975796 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
@@ -127,7 +127,7 @@ namespace CodeGen {
public:
SwiftABIInfo(CodeGen::CodeGenTypes &cgt) : ABIInfo(cgt) {}
- bool supportsSwift() const final override { return true; }
+ bool supportsSwift() const final { return true; }
virtual bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> types,
bool asReturnValue) const = 0;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
index a46f7f37141f..0768e6581acb 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
@@ -41,6 +41,8 @@ struct CatchTypeInfo;
/// Implements C++ ABI-specific code generation functions.
class CGCXXABI {
+ friend class CodeGenModule;
+
protected:
CodeGenModule &CGM;
std::unique_ptr<MangleContext> MangleCtx;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
index 104a30dd6b25..dfa78bf59c65 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
@@ -4473,17 +4473,22 @@ llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
// they are nested within.
SmallVector<llvm::OperandBundleDef, 1>
CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
- SmallVector<llvm::OperandBundleDef, 1> BundleList;
// There is no need for a funclet operand bundle if we aren't inside a
// funclet.
if (!CurrentFuncletPad)
- return BundleList;
-
- // Skip intrinsics which cannot throw.
- auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
- if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
- return BundleList;
+ return (SmallVector<llvm::OperandBundleDef, 1>());
+
+ // Skip intrinsics which cannot throw (as long as they don't lower into
+ // regular function calls in the course of IR transformations).
+ if (auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) {
+ if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
+ auto IID = CalleeFn->getIntrinsicID();
+ if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
+ return (SmallVector<llvm::OperandBundleDef, 1>());
+ }
+ }
+ SmallVector<llvm::OperandBundleDef, 1> BundleList;
BundleList.emplace_back("funclet", CurrentFuncletPad);
return BundleList;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
index cde31711a7db..c6696c4df775 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
@@ -2698,15 +2698,21 @@ void CodeGenFunction::EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD,
// Don't insert type test assumes if we are forcing public
// visibility.
!CGM.AlwaysHasLTOVisibilityPublic(RD)) {
- llvm::Metadata *MD =
- CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
+ QualType Ty = QualType(RD->getTypeForDecl(), 0);
+ llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(Ty);
llvm::Value *TypeId =
llvm::MetadataAsValue::get(CGM.getLLVMContext(), MD);
llvm::Value *CastedVTable = Builder.CreateBitCast(VTable, Int8PtrTy);
+ // If we already know that the call has hidden LTO visibility, emit
+ // @llvm.type.test(). Otherwise emit @llvm.public.type.test(), which WPD
+ // will convert to @llvm.type.test() if we assert at link time that we have
+ // whole program visibility.
+ llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD)
+ ? llvm::Intrinsic::type_test
+ : llvm::Intrinsic::public_type_test;
llvm::Value *TypeTest =
- Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
- {CastedVTable, TypeId});
+ Builder.CreateCall(CGM.getIntrinsic(IID), {CastedVTable, TypeId});
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::assume), TypeTest);
}
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
index 7e9e86763af9..94c48316add7 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -491,9 +491,11 @@ StringRef CGDebugInfo::getCurrentDirname() {
if (!CWDName.empty())
return CWDName;
- SmallString<256> CWD;
- llvm::sys::fs::current_path(CWD);
- return CWDName = internString(CWD);
+ llvm::ErrorOr<std::string> CWD =
+ CGM.getFileSystem()->getCurrentWorkingDirectory();
+ if (!CWD)
+ return StringRef();
+ return CWDName = internString(*CWD);
}
void CGDebugInfo::CreateCompileUnit() {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
index ec459f07f307..7bbe9af7ed59 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -2837,11 +2837,13 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
// Enter the continuation block and emit a phi if required.
CGF.EmitBlock(continueBB);
if (msgRet.isScalar()) {
- llvm::Value *v = msgRet.getScalarVal();
- llvm::PHINode *phi = Builder.CreatePHI(v->getType(), 2);
- phi->addIncoming(v, nonNilPathBB);
- phi->addIncoming(CGM.EmitNullConstant(ResultType), nilPathBB);
- msgRet = RValue::get(phi);
+ // If the return type is void, do nothing
+ if (llvm::Value *v = msgRet.getScalarVal()) {
+ llvm::PHINode *phi = Builder.CreatePHI(v->getType(), 2);
+ phi->addIncoming(v, nonNilPathBB);
+ phi->addIncoming(CGM.EmitNullConstant(ResultType), nilPathBB);
+ msgRet = RValue::get(phi);
+ }
} else if (msgRet.isAggregate()) {
// Aggregate zeroing is handled in nilCleanupBB when it's required.
} else /* isComplex() */ {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
index 1d30c5061743..ff585efa3fce 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
@@ -186,17 +186,16 @@ public:
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
- virtual void emitProcBindClause(CodeGenFunction &CGF,
- llvm::omp::ProcBindKind ProcBind,
- SourceLocation Loc) override;
+ void emitProcBindClause(CodeGenFunction &CGF,
+ llvm::omp::ProcBindKind ProcBind,
+ SourceLocation Loc) override;
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
- virtual void emitNumThreadsClause(CodeGenFunction &CGF,
- llvm::Value *NumThreads,
- SourceLocation Loc) override;
+ void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads,
+ SourceLocation Loc) override;
/// This function ought to emit, in the general case, a call to
// the openmp runtime kmpc_push_num_teams. In NVPTX backend it is not needed
@@ -300,12 +299,12 @@ public:
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
- virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
- ArrayRef<const Expr *> Privates,
- ArrayRef<const Expr *> LHSExprs,
- ArrayRef<const Expr *> RHSExprs,
- ArrayRef<const Expr *> ReductionOps,
- ReductionOptionsTy Options) override;
+ void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
+ ArrayRef<const Expr *> Privates,
+ ArrayRef<const Expr *> LHSExprs,
+ ArrayRef<const Expr *> RHSExprs,
+ ArrayRef<const Expr *> ReductionOps,
+ ReductionOptionsTy Options) override;
/// Returns specified OpenMP runtime function for the current OpenMP
/// implementation. Specialized for the NVPTX device.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
index 4ffbecdf2741..12c6b3f49c43 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
@@ -146,6 +146,7 @@ namespace clang {
public:
BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
const HeaderSearchOptions &HeaderSearchOpts,
const PreprocessorOptions &PPOpts,
const CodeGenOptions &CodeGenOpts,
@@ -159,8 +160,8 @@ namespace clang {
AsmOutStream(std::move(OS)), Context(nullptr),
LLVMIRGeneration("irgen", "LLVM IR Generation Time"),
LLVMIRGenerationRefCount(0),
- Gen(CreateLLVMCodeGen(Diags, InFile, HeaderSearchOpts, PPOpts,
- CodeGenOpts, C, CoverageInfo)),
+ Gen(CreateLLVMCodeGen(Diags, InFile, std::move(FS), HeaderSearchOpts,
+ PPOpts, CodeGenOpts, C, CoverageInfo)),
LinkModules(std::move(LinkModules)) {
TimerIsEnabled = CodeGenOpts.TimePasses;
llvm::TimePassesIsEnabled = CodeGenOpts.TimePasses;
@@ -171,6 +172,7 @@ namespace clang {
// to use the clang diagnostic handler for IR input files. It avoids
// initializing the OS field.
BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
const HeaderSearchOptions &HeaderSearchOpts,
const PreprocessorOptions &PPOpts,
const CodeGenOptions &CodeGenOpts,
@@ -183,8 +185,8 @@ namespace clang {
Context(nullptr),
LLVMIRGeneration("irgen", "LLVM IR Generation Time"),
LLVMIRGenerationRefCount(0),
- Gen(CreateLLVMCodeGen(Diags, "", HeaderSearchOpts, PPOpts,
- CodeGenOpts, C, CoverageInfo)),
+ Gen(CreateLLVMCodeGen(Diags, "", std::move(FS), HeaderSearchOpts,
+ PPOpts, CodeGenOpts, C, CoverageInfo)),
LinkModules(std::move(LinkModules)), CurLinkModule(Module) {
TimerIsEnabled = CodeGenOpts.TimePasses;
llvm::TimePassesIsEnabled = CodeGenOpts.TimePasses;
@@ -1052,10 +1054,10 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
CI.getPreprocessor());
std::unique_ptr<BackendConsumer> Result(new BackendConsumer(
- BA, CI.getDiagnostics(), CI.getHeaderSearchOpts(),
- CI.getPreprocessorOpts(), CI.getCodeGenOpts(), CI.getTargetOpts(),
- CI.getLangOpts(), std::string(InFile), std::move(LinkModules),
- std::move(OS), *VMContext, CoverageInfo));
+ BA, CI.getDiagnostics(), &CI.getVirtualFileSystem(),
+ CI.getHeaderSearchOpts(), CI.getPreprocessorOpts(), CI.getCodeGenOpts(),
+ CI.getTargetOpts(), CI.getLangOpts(), std::string(InFile),
+ std::move(LinkModules), std::move(OS), *VMContext, CoverageInfo));
BEConsumer = Result.get();
// Enable generating macro debug info only when debug info is not disabled and
@@ -1185,9 +1187,10 @@ void CodeGenAction::ExecuteAction() {
// Set clang diagnostic handler. To do this we need to create a fake
// BackendConsumer.
- BackendConsumer Result(BA, CI.getDiagnostics(), CI.getHeaderSearchOpts(),
- CI.getPreprocessorOpts(), CI.getCodeGenOpts(),
- CI.getTargetOpts(), CI.getLangOpts(), TheModule.get(),
+ BackendConsumer Result(BA, CI.getDiagnostics(), &CI.getVirtualFileSystem(),
+ CI.getHeaderSearchOpts(), CI.getPreprocessorOpts(),
+ CI.getCodeGenOpts(), CI.getTargetOpts(),
+ CI.getLangOpts(), TheModule.get(),
std::move(LinkModules), *VMContext, nullptr);
// PR44896: Force DiscardValueNames as false. DiscardValueNames cannot be
// true here because the valued names are needed for reading textual IR.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
index 101080b6fe13..4e8e120d89df 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
@@ -96,16 +96,18 @@ static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
llvm_unreachable("invalid C++ ABI kind");
}
-CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
+CodeGenModule::CodeGenModule(ASTContext &C,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
+ const HeaderSearchOptions &HSO,
const PreprocessorOptions &PPO,
const CodeGenOptions &CGO, llvm::Module &M,
DiagnosticsEngine &diags,
CoverageSourceInfo *CoverageInfo)
- : Context(C), LangOpts(C.getLangOpts()), HeaderSearchOpts(HSO),
- PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags),
- Target(C.getTargetInfo()), ABI(createCXXABI(*this)),
- VMContext(M.getContext()), Types(*this), VTables(*this),
- SanitizerMD(new SanitizerMetadata(*this)) {
+ : Context(C), LangOpts(C.getLangOpts()), FS(std::move(FS)),
+ HeaderSearchOpts(HSO), PreprocessorOpts(PPO), CodeGenOpts(CGO),
+ TheModule(M), Diags(diags), Target(C.getTargetInfo()),
+ ABI(createCXXABI(*this)), VMContext(M.getContext()), Types(*this),
+ VTables(*this), SanitizerMD(new SanitizerMetadata(*this)) {
// Initialize the type cache.
llvm::LLVMContext &LLVMContext = M.getContext();
@@ -795,18 +797,17 @@ void CodeGenModule::Release() {
Arch == llvm::Triple::arm || Arch == llvm::Triple::armeb ||
Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_32 ||
Arch == llvm::Triple::aarch64_be) {
- getModule().addModuleFlag(llvm::Module::Min, "branch-target-enforcement",
- LangOpts.BranchTargetEnforcement);
-
- getModule().addModuleFlag(llvm::Module::Min, "sign-return-address",
- LangOpts.hasSignReturnAddress());
-
- getModule().addModuleFlag(llvm::Module::Min, "sign-return-address-all",
- LangOpts.isSignReturnAddressScopeAll());
-
- getModule().addModuleFlag(llvm::Module::Min,
- "sign-return-address-with-bkey",
- !LangOpts.isSignReturnAddressWithAKey());
+ if (LangOpts.BranchTargetEnforcement)
+ getModule().addModuleFlag(llvm::Module::Min, "branch-target-enforcement",
+ 1);
+ if (LangOpts.hasSignReturnAddress())
+ getModule().addModuleFlag(llvm::Module::Min, "sign-return-address", 1);
+ if (LangOpts.isSignReturnAddressScopeAll())
+ getModule().addModuleFlag(llvm::Module::Min, "sign-return-address-all",
+ 1);
+ if (!LangOpts.isSignReturnAddressWithAKey())
+ getModule().addModuleFlag(llvm::Module::Min,
+ "sign-return-address-with-bkey", 1);
}
if (!CodeGenOpts.MemoryProfileOutput.empty()) {
@@ -7000,4 +7001,6 @@ void CodeGenModule::moveLazyEmissionStates(CodeGenModule *NewBuilder) {
"Still have (unmerged) EmittedDeferredDecls deferred decls");
NewBuilder->EmittedDeferredDecls = std::move(EmittedDeferredDecls);
+
+ NewBuilder->ABI->MangleCtx = std::move(ABI->MangleCtx);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
index c939e7a309f5..5fbcc5ad1f5f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
@@ -47,6 +47,10 @@ class DataLayout;
class FunctionType;
class LLVMContext;
class IndexedInstrProfReader;
+
+namespace vfs {
+class FileSystem;
+}
}
namespace clang {
@@ -293,6 +297,7 @@ public:
private:
ASTContext &Context;
const LangOptions &LangOpts;
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS; // Only used for debug info.
const HeaderSearchOptions &HeaderSearchOpts; // Only used for debug info.
const PreprocessorOptions &PreprocessorOpts; // Only used for debug info.
const CodeGenOptions &CodeGenOpts;
@@ -584,7 +589,8 @@ private:
llvm::DenseMap<const llvm::Constant *, llvm::GlobalVariable *> RTTIProxyMap;
public:
- CodeGenModule(ASTContext &C, const HeaderSearchOptions &headersearchopts,
+ CodeGenModule(ASTContext &C, IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
+ const HeaderSearchOptions &headersearchopts,
const PreprocessorOptions &ppopts,
const CodeGenOptions &CodeGenOpts, llvm::Module &M,
DiagnosticsEngine &Diags,
@@ -712,6 +718,9 @@ public:
ASTContext &getContext() const { return Context; }
const LangOptions &getLangOpts() const { return LangOpts; }
+ const IntrusiveRefCntPtr<llvm::vfs::FileSystem> &getFileSystem() const {
+ return FS;
+ }
const HeaderSearchOptions &getHeaderSearchOpts()
const { return HeaderSearchOpts; }
const PreprocessorOptions &getPreprocessorOpts()
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
index f0003c4aab78..fc2ff15a6acd 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -707,8 +707,12 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
llvm::Value *VFPAddr =
Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
+ llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD)
+ ? llvm::Intrinsic::type_test
+ : llvm::Intrinsic::public_type_test;
+
CheckResult = Builder.CreateCall(
- CGM.getIntrinsic(llvm::Intrinsic::type_test),
+ CGM.getIntrinsic(IID),
{Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 2bc1e8e8c5b9..f0c45654f8d9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -782,7 +782,7 @@ public:
LoadVTablePtr(CodeGenFunction &CGF, Address This,
const CXXRecordDecl *RD) override;
- virtual bool
+ bool
isPermittedToBeHomogeneousAggregate(const CXXRecordDecl *RD) const override;
private:
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp b/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp
index 8e97a298ce7f..c9a5e56c72c7 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp
@@ -23,6 +23,7 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <memory>
using namespace clang;
@@ -32,6 +33,7 @@ namespace {
class CodeGeneratorImpl : public CodeGenerator {
DiagnosticsEngine &Diags;
ASTContext *Ctx;
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS; // Only used for debug info.
const HeaderSearchOptions &HeaderSearchOpts; // Only used for debug info.
const PreprocessorOptions &PreprocessorOpts; // Only used for debug info.
const CodeGenOptions CodeGenOpts; // Intentionally copied in.
@@ -74,11 +76,12 @@ namespace {
public:
CodeGeneratorImpl(DiagnosticsEngine &diags, llvm::StringRef ModuleName,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
const HeaderSearchOptions &HSO,
const PreprocessorOptions &PPO, const CodeGenOptions &CGO,
llvm::LLVMContext &C,
CoverageSourceInfo *CoverageInfo = nullptr)
- : Diags(diags), Ctx(nullptr), HeaderSearchOpts(HSO),
+ : Diags(diags), Ctx(nullptr), FS(std::move(FS)), HeaderSearchOpts(HSO),
PreprocessorOpts(PPO), CodeGenOpts(CGO), HandlingTopLevelDecls(0),
CoverageInfo(CoverageInfo),
M(new llvm::Module(ExpandModuleName(ModuleName, CGO), C)) {
@@ -158,7 +161,7 @@ namespace {
if (auto TVSDKVersion =
Ctx->getTargetInfo().getDarwinTargetVariantSDKVersion())
M->setDarwinTargetVariantSDKVersion(*TVSDKVersion);
- Builder.reset(new CodeGen::CodeGenModule(Context, HeaderSearchOpts,
+ Builder.reset(new CodeGen::CodeGenModule(Context, FS, HeaderSearchOpts,
PreprocessorOpts, CodeGenOpts,
*M, Diags, CoverageInfo));
@@ -356,11 +359,14 @@ llvm::Module *CodeGenerator::StartModule(llvm::StringRef ModuleName,
return static_cast<CodeGeneratorImpl*>(this)->StartModule(ModuleName, C);
}
-CodeGenerator *clang::CreateLLVMCodeGen(
- DiagnosticsEngine &Diags, llvm::StringRef ModuleName,
- const HeaderSearchOptions &HeaderSearchOpts,
- const PreprocessorOptions &PreprocessorOpts, const CodeGenOptions &CGO,
- llvm::LLVMContext &C, CoverageSourceInfo *CoverageInfo) {
- return new CodeGeneratorImpl(Diags, ModuleName, HeaderSearchOpts,
- PreprocessorOpts, CGO, C, CoverageInfo);
+CodeGenerator *
+clang::CreateLLVMCodeGen(DiagnosticsEngine &Diags, llvm::StringRef ModuleName,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
+ const HeaderSearchOptions &HeaderSearchOpts,
+ const PreprocessorOptions &PreprocessorOpts,
+ const CodeGenOptions &CGO, llvm::LLVMContext &C,
+ CoverageSourceInfo *CoverageInfo) {
+ return new CodeGeneratorImpl(Diags, ModuleName, std::move(FS),
+ HeaderSearchOpts, PreprocessorOpts, CGO, C,
+ CoverageInfo);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp b/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
index d03e5bd50873..f6eaa35b4873 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
@@ -45,6 +45,7 @@ class PCHContainerGenerator : public ASTConsumer {
const std::string OutputFileName;
ASTContext *Ctx;
ModuleMap &MMap;
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS;
const HeaderSearchOptions &HeaderSearchOpts;
const PreprocessorOptions &PreprocessorOpts;
CodeGenOptions CodeGenOpts;
@@ -144,6 +145,7 @@ public:
: Diags(CI.getDiagnostics()), MainFileName(MainFileName),
OutputFileName(OutputFileName), Ctx(nullptr),
MMap(CI.getPreprocessor().getHeaderSearchInfo().getModuleMap()),
+ FS(&CI.getVirtualFileSystem()),
HeaderSearchOpts(CI.getHeaderSearchOpts()),
PreprocessorOpts(CI.getPreprocessorOpts()),
TargetOpts(CI.getTargetOpts()), LangOpts(CI.getLangOpts()),
@@ -173,7 +175,7 @@ public:
M.reset(new llvm::Module(MainFileName, *VMContext));
M->setDataLayout(Ctx->getTargetInfo().getDataLayoutString());
Builder.reset(new CodeGen::CodeGenModule(
- *Ctx, HeaderSearchOpts, PreprocessorOpts, CodeGenOpts, *M, Diags));
+ *Ctx, FS, HeaderSearchOpts, PreprocessorOpts, CodeGenOpts, *M, Diags));
// Prepare CGDebugInfo to emit debug info for a clang module.
auto *DI = Builder->getModuleDebugInfo();
diff --git a/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h b/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h
index bcad32ce31df..f5dd0e503cc0 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h
@@ -20,7 +20,6 @@
namespace llvm {
class GlobalVariable;
class Instruction;
-class MDNode;
} // namespace llvm
namespace clang {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
index 5c49db2f0837..a64909d9a6e7 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
@@ -548,6 +548,11 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (CPUArg)
checkARMCPUName(D, CPUArg, Args, CPUName, ArchName, ExtensionFeatures,
Triple, CPUArgFPUID);
+
+ // TODO Handle -mtune=. Suppress -Wunused-command-line-argument as a
+ // longstanding behavior.
+ (void)Args.getLastArg(options::OPT_mtune_EQ);
+
// Honor -mfpu=. ClangAs gives preference to -Wa,-mfpu=.
unsigned FPUID = llvm::ARM::FK_INVALID;
const Arg *FPUArg = Args.getLastArg(options::OPT_mfpu_EQ);
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp
index bcaecf4b2d98..7817ec595ceb 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp
@@ -107,6 +107,10 @@ const char *ppc::getPPCAsmModeForCPU(StringRef Name) {
void ppc::getPPCTargetFeatures(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args,
std::vector<StringRef> &Features) {
+ // TODO Handle -mtune=. Suppress -Wunused-command-line-argument as a
+ // longstanding behavior.
+ (void)Args.getLastArg(options::OPT_mtune_EQ);
+
if (Triple.getSubArch() == llvm::Triple::PPCSubArch_spe)
Features.push_back("+spe");
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
index 3044c2d92d21..b62a025c5072 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
@@ -5112,6 +5112,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-mabi=vec-default");
}
+ if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ_quadword_atomics)) {
+ if (!Triple.isOSAIX() || Triple.isPPC32())
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << RawTriple.str();
+ CmdArgs.push_back("-mabi=quadword-atomics");
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_mlong_double_128)) {
// Emit the unsupported option error until the Clang's library integration
// support for 128-bit long double is available for AIX.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
index e5451c20a00c..e49e8b0bf7d1 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
@@ -489,12 +489,14 @@ SanitizerMask FreeBSD::getSupportedSanitizers() const {
Res |= SanitizerKind::PointerCompare;
Res |= SanitizerKind::PointerSubtract;
Res |= SanitizerKind::Vptr;
- if (IsX86_64 || IsMIPS64) {
+ if (IsAArch64 || IsX86_64 || IsMIPS64) {
Res |= SanitizerKind::Leak;
Res |= SanitizerKind::Thread;
}
if (IsX86 || IsX86_64) {
Res |= SanitizerKind::Function;
+ }
+ if (IsAArch64 || IsX86 || IsX86_64) {
Res |= SanitizerKind::SafeStack;
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
@@ -502,8 +504,6 @@ SanitizerMask FreeBSD::getSupportedSanitizers() const {
if (IsAArch64 || IsX86_64) {
Res |= SanitizerKind::KernelAddress;
Res |= SanitizerKind::KernelMemory;
- }
- if (IsX86_64) {
Res |= SanitizerKind::Memory;
}
return Res;
diff --git a/contrib/llvm-project/clang/lib/Edit/EditedSource.cpp b/contrib/llvm-project/clang/lib/Edit/EditedSource.cpp
index ee57660b8c72..a3386b2489b0 100644
--- a/contrib/llvm-project/clang/lib/Edit/EditedSource.cpp
+++ b/contrib/llvm-project/clang/lib/Edit/EditedSource.cpp
@@ -84,11 +84,11 @@ bool EditedSource::canInsertInOffset(SourceLocation OrigLoc, FileOffset Offs) {
deconstructMacroArgLoc(OrigLoc, ExpLoc, ArgUse);
auto I = ExpansionToArgMap.find(ExpLoc);
if (I != ExpansionToArgMap.end() &&
- find_if(I->second, [&](const MacroArgUse &U) {
+ llvm::any_of(I->second, [&](const MacroArgUse &U) {
return ArgUse.Identifier == U.Identifier &&
std::tie(ArgUse.ImmediateExpansionLoc, ArgUse.UseLoc) !=
std::tie(U.ImmediateExpansionLoc, U.UseLoc);
- }) != I->second.end()) {
+ })) {
// Trying to write in a macro argument input that has already been
// written by a previous commit for another expansion of the same macro
// argument name. For example:
diff --git a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
index 66f03dcb53a1..3f9b68ccbb39 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
@@ -239,55 +239,6 @@ bool FormatTokenLexer::tryMergeCSharpStringLiteral() {
if (Tokens.size() < 2)
return false;
- // Interpolated strings could contain { } with " characters inside.
- // $"{x ?? "null"}"
- // should not be split into $"{x ?? ", null, "}" but should treated as a
- // single string-literal.
- //
- // We opt not to try and format expressions inside {} within a C#
- // interpolated string. Formatting expressions within an interpolated string
- // would require similar work as that done for JavaScript template strings
- // in `handleTemplateStrings()`.
- auto &CSharpInterpolatedString = *(Tokens.end() - 2);
- if (CSharpInterpolatedString->getType() == TT_CSharpStringLiteral &&
- (CSharpInterpolatedString->TokenText.startswith(R"($")") ||
- CSharpInterpolatedString->TokenText.startswith(R"($@")"))) {
- int UnmatchedOpeningBraceCount = 0;
-
- auto TokenTextSize = CSharpInterpolatedString->TokenText.size();
- for (size_t Index = 0; Index < TokenTextSize; ++Index) {
- char C = CSharpInterpolatedString->TokenText[Index];
- if (C == '{') {
- // "{{" inside an interpolated string is an escaped '{' so skip it.
- if (Index + 1 < TokenTextSize &&
- CSharpInterpolatedString->TokenText[Index + 1] == '{') {
- ++Index;
- continue;
- }
- ++UnmatchedOpeningBraceCount;
- } else if (C == '}') {
- // "}}" inside an interpolated string is an escaped '}' so skip it.
- if (Index + 1 < TokenTextSize &&
- CSharpInterpolatedString->TokenText[Index + 1] == '}') {
- ++Index;
- continue;
- }
- --UnmatchedOpeningBraceCount;
- }
- }
-
- if (UnmatchedOpeningBraceCount > 0) {
- auto &NextToken = *(Tokens.end() - 1);
- CSharpInterpolatedString->TokenText =
- StringRef(CSharpInterpolatedString->TokenText.begin(),
- NextToken->TokenText.end() -
- CSharpInterpolatedString->TokenText.begin());
- CSharpInterpolatedString->ColumnWidth += NextToken->ColumnWidth;
- Tokens.erase(Tokens.end() - 1);
- return true;
- }
- }
-
// Look for @"aaaaaa" or $"aaaaaa".
auto &String = *(Tokens.end() - 1);
if (!String->is(tok::string_literal))
@@ -571,45 +522,105 @@ void FormatTokenLexer::tryParseJSRegexLiteral() {
resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset)));
}
+static auto lexCSharpString(const char *Begin, const char *End, bool Verbatim,
+ bool Interpolated) {
+ auto Repeated = [&Begin, End]() {
+ return Begin + 1 < End && Begin[1] == Begin[0];
+ };
+
+ // Look for a terminating '"' in the current file buffer.
+ // Make no effort to format code within an interpolated or verbatim string.
+ //
+ // Interpolated strings could contain { } with " characters inside.
+ // $"{x ?? "null"}"
+ // should not be split into $"{x ?? ", null, "}" but should be treated as a
+ // single string-literal.
+ //
+ // We opt not to try and format expressions inside {} within a C#
+ // interpolated string. Formatting expressions within an interpolated string
+ // would require similar work as that done for JavaScript template strings
+ // in `handleTemplateStrings()`.
+ for (int UnmatchedOpeningBraceCount = 0; Begin < End; ++Begin) {
+ switch (*Begin) {
+ case '\\':
+ if (!Verbatim)
+ ++Begin;
+ break;
+ case '{':
+ if (Interpolated) {
+ // {{ inside an interpolated string is escaped, so skip it.
+ if (Repeated())
+ ++Begin;
+ else
+ ++UnmatchedOpeningBraceCount;
+ }
+ break;
+ case '}':
+ if (Interpolated) {
+ // }} inside an interpolated string is escaped, so skip it.
+ if (Repeated())
+ ++Begin;
+ else if (UnmatchedOpeningBraceCount > 0)
+ --UnmatchedOpeningBraceCount;
+ else
+ return End;
+ }
+ break;
+ case '"':
+ if (UnmatchedOpeningBraceCount > 0)
+ break;
+ // "" within a verbatim string is an escaped double quote: skip it.
+ if (Verbatim && Repeated()) {
+ ++Begin;
+ break;
+ }
+ return Begin;
+ }
+ }
+
+ return End;
+}
+
void FormatTokenLexer::handleCSharpVerbatimAndInterpolatedStrings() {
FormatToken *CSharpStringLiteral = Tokens.back();
- if (CSharpStringLiteral->getType() != TT_CSharpStringLiteral)
+ if (CSharpStringLiteral->isNot(TT_CSharpStringLiteral))
return;
+ auto &TokenText = CSharpStringLiteral->TokenText;
+
+ bool Verbatim = false;
+ bool Interpolated = false;
+ if (TokenText.startswith(R"($@")")) {
+ Verbatim = true;
+ Interpolated = true;
+ } else if (TokenText.startswith(R"(@")")) {
+ Verbatim = true;
+ } else if (TokenText.startswith(R"($")")) {
+ Interpolated = true;
+ }
+
// Deal with multiline strings.
- if (!(CSharpStringLiteral->TokenText.startswith(R"(@")") ||
- CSharpStringLiteral->TokenText.startswith(R"($@")"))) {
+ if (!Verbatim && !Interpolated)
return;
- }
- const char *StrBegin =
- Lex->getBufferLocation() - CSharpStringLiteral->TokenText.size();
+ const char *StrBegin = Lex->getBufferLocation() - TokenText.size();
const char *Offset = StrBegin;
- if (CSharpStringLiteral->TokenText.startswith(R"(@")"))
- Offset += 2;
- else // CSharpStringLiteral->TokenText.startswith(R"($@")")
+ if (Verbatim && Interpolated)
Offset += 3;
+ else
+ Offset += 2;
- // Look for a terminating '"' in the current file buffer.
- // Make no effort to format code within an interpolated or verbatim string.
- for (; Offset != Lex->getBuffer().end(); ++Offset) {
- if (Offset[0] == '"') {
- // "" within a verbatim string is an escaped double quote: skip it.
- if (Offset + 1 < Lex->getBuffer().end() && Offset[1] == '"')
- ++Offset;
- else
- break;
- }
- }
+ const auto End = Lex->getBuffer().end();
+ Offset = lexCSharpString(Offset, End, Verbatim, Interpolated);
// Make no attempt to format code properly if a verbatim string is
// unterminated.
- if (Offset == Lex->getBuffer().end())
+ if (Offset >= End)
return;
StringRef LiteralText(StrBegin, Offset - StrBegin + 1);
- CSharpStringLiteral->TokenText = LiteralText;
+ TokenText = LiteralText;
// Adjust width for potentially multiline string literals.
size_t FirstBreak = LiteralText.find('\n');
@@ -628,10 +639,8 @@ void FormatTokenLexer::handleCSharpVerbatimAndInterpolatedStrings() {
StartColumn, Style.TabWidth, Encoding);
}
- SourceLocation loc = Offset < Lex->getBuffer().end()
- ? Lex->getSourceLocation(Offset + 1)
- : SourceMgr.getLocForEndOfFile(ID);
- resetLexer(SourceMgr.getFileOffset(loc));
+ assert(Offset < End);
+ resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset + 1)));
}
void FormatTokenLexer::handleTemplateStrings() {
diff --git a/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp b/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
index 48cd6a394107..2dd96e68bb92 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
@@ -1926,6 +1926,12 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.EnableAIXExtendedAltivecABI = O.matches(OPT_mabi_EQ_vec_extabi);
}
+ if (Arg *A = Args.getLastArg(OPT_mabi_EQ_quadword_atomics)) {
+ if (!T.isOSAIX() || T.isPPC32())
+ Diags.Report(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << T.str();
+ }
+
bool NeedLocTracking = false;
if (!Opts.OptRecordFile.empty())
diff --git a/contrib/llvm-project/clang/lib/Headers/stdatomic.h b/contrib/llvm-project/clang/lib/Headers/stdatomic.h
index 3a0b9cc056be..318c7ca56e41 100644
--- a/contrib/llvm-project/clang/lib/Headers/stdatomic.h
+++ b/contrib/llvm-project/clang/lib/Headers/stdatomic.h
@@ -17,7 +17,8 @@
* explicitly disallows `stdatomic.h` in the C mode via an `#error`. Fallback
* to the clang resource header until that is fully supported.
*/
-#if __STDC_HOSTED__ && __has_include_next(<stdatomic.h>) && !defined(_MSC_VER)
+#if __STDC_HOSTED__ && \
+ __has_include_next(<stdatomic.h>) && !(defined(_MSC_VER) && !defined(__cplusplus))
# include_next <stdatomic.h>
#else
diff --git a/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h b/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h
index f11ec0aa9e75..5b0f982b62dd 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h
+++ b/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h
@@ -21,7 +21,6 @@
namespace llvm {
class Error;
-class Module;
namespace orc {
class LLJIT;
class ThreadSafeContext;
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp b/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
index 2f21b7b2fef0..aef9909a7c97 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
@@ -6344,23 +6344,27 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
diag::err_expected_member_name_or_semi)
<< (D.getDeclSpec().isEmpty() ? SourceRange()
: D.getDeclSpec().getSourceRange());
- } else if (getLangOpts().CPlusPlus) {
- if (Tok.isOneOf(tok::period, tok::arrow))
- Diag(Tok, diag::err_invalid_operator_on_type) << Tok.is(tok::arrow);
- else {
- SourceLocation Loc = D.getCXXScopeSpec().getEndLoc();
- if (Tok.isAtStartOfLine() && Loc.isValid())
- Diag(PP.getLocForEndOfToken(Loc), diag::err_expected_unqualified_id)
- << getLangOpts().CPlusPlus;
- else
- Diag(getMissingDeclaratorIdLoc(D, Tok.getLocation()),
- diag::err_expected_unqualified_id)
- << getLangOpts().CPlusPlus;
- }
} else {
- Diag(getMissingDeclaratorIdLoc(D, Tok.getLocation()),
- diag::err_expected_either)
- << tok::identifier << tok::l_paren;
+ if (Tok.getKind() == tok::TokenKind::kw_while) {
+ Diag(Tok, diag::err_while_loop_outside_of_a_function);
+ } else if (getLangOpts().CPlusPlus) {
+ if (Tok.isOneOf(tok::period, tok::arrow))
+ Diag(Tok, diag::err_invalid_operator_on_type) << Tok.is(tok::arrow);
+ else {
+ SourceLocation Loc = D.getCXXScopeSpec().getEndLoc();
+ if (Tok.isAtStartOfLine() && Loc.isValid())
+ Diag(PP.getLocForEndOfToken(Loc), diag::err_expected_unqualified_id)
+ << getLangOpts().CPlusPlus;
+ else
+ Diag(getMissingDeclaratorIdLoc(D, Tok.getLocation()),
+ diag::err_expected_unqualified_id)
+ << getLangOpts().CPlusPlus;
+ }
+ } else {
+ Diag(getMissingDeclaratorIdLoc(D, Tok.getLocation()),
+ diag::err_expected_either)
+ << tok::identifier << tok::l_paren;
+ }
}
D.SetIdentifier(nullptr, Tok.getLocation());
D.setInvalidType(true);
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp b/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
index 143b373e9ea5..bf73ddfd1031 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
@@ -60,7 +60,7 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc) {
assert(Tok.is(tok::kw_namespace) && "Not a namespace!");
- SourceLocation NamespaceLoc = ConsumeToken(); // eat the 'namespace'.
+ SourceLocation NamespaceLoc = ConsumeToken(); // eat the 'namespace'.
ObjCDeclContextSwitch ObjCDC(*this);
if (Tok.is(tok::code_completion)) {
@@ -99,7 +99,7 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
if (Tok.is(tok::identifier)) {
Ident = Tok.getIdentifierInfo();
- IdentLoc = ConsumeToken(); // eat the identifier.
+ IdentLoc = ConsumeToken(); // eat the identifier.
while (Tok.is(tok::coloncolon) &&
(NextToken().is(tok::identifier) ||
(NextToken().is(tok::kw_inline) &&
@@ -199,7 +199,7 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
std::string RBraces;
for (unsigned i = 0, e = ExtraNSs.size(); i != e; ++i)
- RBraces += "} ";
+ RBraces += "} ";
Diag(ExtraNSs[0].NamespaceLoc, diag::ext_nested_namespace_definition)
<< FixItHint::CreateReplacement(
@@ -216,8 +216,9 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
// If we're still good, complain about inline namespaces in non-C++0x now.
if (InlineLoc.isValid())
- Diag(InlineLoc, getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_inline_namespace : diag::ext_inline_namespace);
+ Diag(InlineLoc, getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_inline_namespace
+ : diag::ext_inline_namespace);
// Enter a scope for the namespace.
ParseScope NamespaceScope(this, Scope::DeclScope);
@@ -496,7 +497,7 @@ Parser::DeclGroupPtrTy Parser::ParseUsingDirectiveOrDeclaration(
if (TemplateInfo.Kind) {
SourceRange R = TemplateInfo.getSourceRange();
Diag(UsingLoc, diag::err_templated_using_directive_declaration)
- << 0 /* directive */ << R << FixItHint::CreateRemoval(R);
+ << 0 /* directive */ << R << FixItHint::CreateRemoval(R);
}
Decl *UsingDir = ParseUsingDirective(Context, UsingLoc, DeclEnd, Attrs);
@@ -651,9 +652,9 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
}
if (TryConsumeToken(tok::ellipsis, D.EllipsisLoc))
- Diag(Tok.getLocation(), getLangOpts().CPlusPlus17 ?
- diag::warn_cxx17_compat_using_declaration_pack :
- diag::ext_using_declaration_pack);
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus17
+ ? diag::warn_cxx17_compat_using_declaration_pack
+ : diag::ext_using_declaration_pack);
return false;
}
@@ -766,7 +767,7 @@ Parser::DeclGroupPtrTy Parser::ParseUsingDeclaration(
if (TemplateInfo.Kind) {
SourceRange R = TemplateInfo.getSourceRange();
Diag(UsingLoc, diag::err_templated_using_directive_declaration)
- << 1 /* declaration */ << R << FixItHint::CreateRemoval(R);
+ << 1 /* declaration */ << R << FixItHint::CreateRemoval(R);
// Unfortunately, we have to bail out instead of recovering by
// ignoring the parameters, just in case the nested name specifier
@@ -811,9 +812,10 @@ Parser::DeclGroupPtrTy Parser::ParseUsingDeclaration(
}
if (DeclsInGroup.size() > 1)
- Diag(Tok.getLocation(), getLangOpts().CPlusPlus17 ?
- diag::warn_cxx17_compat_multi_using_declaration :
- diag::ext_multi_using_declaration);
+ Diag(Tok.getLocation(),
+ getLangOpts().CPlusPlus17
+ ? diag::warn_cxx17_compat_multi_using_declaration
+ : diag::ext_multi_using_declaration);
// Eat ';'.
DeclEnd = Tok.getLocation();
@@ -835,9 +837,9 @@ Decl *Parser::ParseAliasDeclarationAfterDeclarator(
return nullptr;
}
- Diag(Tok.getLocation(), getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_alias_declaration :
- diag::ext_alias_declaration);
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_alias_declaration
+ : diag::ext_alias_declaration);
// Type alias templates cannot be specialized.
int SpecKind = -1;
@@ -856,7 +858,7 @@ Decl *Parser::ParseAliasDeclarationAfterDeclarator(
else
Range = TemplateInfo.getSourceRange();
Diag(Range.getBegin(), diag::err_alias_declaration_specialization)
- << SpecKind << Range;
+ << SpecKind << Range;
SkipUntil(tok::semi);
return nullptr;
}
@@ -869,15 +871,15 @@ Decl *Parser::ParseAliasDeclarationAfterDeclarator(
return nullptr;
} else if (D.TypenameLoc.isValid())
Diag(D.TypenameLoc, diag::err_alias_declaration_not_identifier)
- << FixItHint::CreateRemoval(SourceRange(
- D.TypenameLoc,
- D.SS.isNotEmpty() ? D.SS.getEndLoc() : D.TypenameLoc));
+ << FixItHint::CreateRemoval(
+ SourceRange(D.TypenameLoc, D.SS.isNotEmpty() ? D.SS.getEndLoc()
+ : D.TypenameLoc));
else if (D.SS.isNotEmpty())
Diag(D.SS.getBeginLoc(), diag::err_alias_declaration_not_identifier)
- << FixItHint::CreateRemoval(D.SS.getRange());
+ << FixItHint::CreateRemoval(D.SS.getRange());
if (D.EllipsisLoc.isValid())
Diag(D.EllipsisLoc, diag::err_alias_declaration_pack_expansion)
- << FixItHint::CreateRemoval(SourceRange(D.EllipsisLoc));
+ << FixItHint::CreateRemoval(SourceRange(D.EllipsisLoc));
Decl *DeclFromDeclSpec = nullptr;
TypeResult TypeAlias =
@@ -897,8 +899,8 @@ Decl *Parser::ParseAliasDeclarationAfterDeclarator(
TemplateParameterLists *TemplateParams = TemplateInfo.TemplateParams;
MultiTemplateParamsArg TemplateParamsArg(
- TemplateParams ? TemplateParams->data() : nullptr,
- TemplateParams ? TemplateParams->size() : 0);
+ TemplateParams ? TemplateParams->data() : nullptr,
+ TemplateParams ? TemplateParams->size() : 0);
return Actions.ActOnAliasDeclaration(getCurScope(), AS, TemplateParamsArg,
UsingLoc, D.Name, Attrs, TypeAlias,
DeclFromDeclSpec);
@@ -922,10 +924,13 @@ static FixItHint getStaticAssertNoMessageFixIt(const Expr *AssertExpr,
/// [C11] static_assert-declaration:
/// _Static_assert ( constant-expression , string-literal ) ;
///
-Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
+Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd) {
assert(Tok.isOneOf(tok::kw_static_assert, tok::kw__Static_assert) &&
"Not a static_assert declaration");
+ // Save the token used for static assertion.
+ Token SavedTok = Tok;
+
if (Tok.is(tok::kw__Static_assert) && !getLangOpts().C11)
Diag(Tok, diag::ext_c11_feature) << Tok.getName();
if (Tok.is(tok::kw_static_assert)) {
@@ -974,7 +979,7 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
if (!isTokenStringLiteral()) {
Diag(Tok, diag::err_expected_string_literal)
- << /*Source='static_assert'*/1;
+ << /*Source='static_assert'*/ 1;
SkipMalformedDecl();
return nullptr;
}
@@ -989,10 +994,11 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
T.consumeClose();
DeclEnd = Tok.getLocation();
- ExpectAndConsumeSemi(diag::err_expected_semi_after_static_assert);
+ // Passing the token used to the error message.
+ ExpectAndConsumeSemi(diag::err_expected_semi_after_static_assert,
+ SavedTok.getName());
- return Actions.ActOnStaticAssertDeclaration(StaticAssertLoc,
- AssertExpr.get(),
+ return Actions.ActOnStaticAssertDeclaration(StaticAssertLoc, AssertExpr.get(),
AssertMessage.get(),
T.getCloseLocation());
}
@@ -1003,8 +1009,8 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
/// 'decltype' ( 'auto' ) [C++1y]
///
SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
- assert(Tok.isOneOf(tok::kw_decltype, tok::annot_decltype)
- && "Not a decltype specifier");
+ assert(Tok.isOneOf(tok::kw_decltype, tok::annot_decltype) &&
+ "Not a decltype specifier");
ExprResult Result;
SourceLocation StartLoc = Tok.getLocation();
@@ -1028,11 +1034,11 @@ SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
ConsumeToken();
BalancedDelimiterTracker T(*this, tok::l_paren);
- if (T.expectAndConsume(diag::err_expected_lparen_after,
- "decltype", tok::r_paren)) {
+ if (T.expectAndConsume(diag::err_expected_lparen_after, "decltype",
+ tok::r_paren)) {
DS.SetTypeSpecError();
- return T.getOpenLocation() == Tok.getLocation() ?
- StartLoc : T.getOpenLocation();
+ return T.getOpenLocation() == Tok.getLocation() ? StartLoc
+ : T.getOpenLocation();
}
// Check for C++1y 'decltype(auto)'.
@@ -1041,8 +1047,8 @@ SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
// be 'auto' since C++2b.
Diag(Tok.getLocation(),
getLangOpts().CPlusPlus14
- ? diag::warn_cxx11_compat_decltype_auto_type_specifier
- : diag::ext_decltype_auto_type_specifier);
+ ? diag::warn_cxx11_compat_decltype_auto_type_specifier
+ : diag::ext_decltype_auto_type_specifier);
ConsumeToken();
} else {
// Parse the expression
@@ -1100,18 +1106,17 @@ SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
unsigned DiagID;
const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
// Check for duplicate type specifiers (e.g. "int decltype(a)").
- if (Result.get()
- ? DS.SetTypeSpecType(DeclSpec::TST_decltype, StartLoc, PrevSpec,
- DiagID, Result.get(), Policy)
- : DS.SetTypeSpecType(DeclSpec::TST_decltype_auto, StartLoc, PrevSpec,
- DiagID, Policy)) {
+ if (Result.get() ? DS.SetTypeSpecType(DeclSpec::TST_decltype, StartLoc,
+ PrevSpec, DiagID, Result.get(), Policy)
+ : DS.SetTypeSpecType(DeclSpec::TST_decltype_auto, StartLoc,
+ PrevSpec, DiagID, Policy)) {
Diag(StartLoc, DiagID) << PrevSpec;
DS.SetTypeSpecError();
}
return EndLoc;
}
-void Parser::AnnotateExistingDecltypeSpecifier(const DeclSpec& DS,
+void Parser::AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc) {
// make sure we have a token we can turn into an annotation token
@@ -1124,15 +1129,14 @@ void Parser::AnnotateExistingDecltypeSpecifier(const DeclSpec& DS,
// semi-colon.
EndLoc = PP.getLastCachedTokenLocation();
}
- }
- else
- PP.EnterToken(Tok, /*IsReinject*/true);
+ } else
+ PP.EnterToken(Tok, /*IsReinject*/ true);
Tok.setKind(tok::annot_decltype);
setExprAnnotation(Tok,
- DS.getTypeSpecType() == TST_decltype ? DS.getRepAsExpr() :
- DS.getTypeSpecType() == TST_decltype_auto ? ExprResult() :
- ExprError());
+ DS.getTypeSpecType() == TST_decltype ? DS.getRepAsExpr()
+ : DS.getTypeSpecType() == TST_decltype_auto ? ExprResult()
+ : ExprError());
Tok.setAnnotationEndLoc(EndLoc);
Tok.setLocation(StartLoc);
PP.AnnotateCachedTokens(Tok);
@@ -1144,8 +1148,8 @@ void Parser::ParseUnderlyingTypeSpecifier(DeclSpec &DS) {
SourceLocation StartLoc = ConsumeToken();
BalancedDelimiterTracker T(*this, tok::l_paren);
- if (T.expectAndConsume(diag::err_expected_lparen_after,
- "__underlying_type", tok::r_paren)) {
+ if (T.expectAndConsume(diag::err_expected_lparen_after, "__underlying_type",
+ tok::r_paren)) {
return;
}
@@ -1192,7 +1196,7 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
// Ignore attempts to use typename
if (Tok.is(tok::kw_typename)) {
Diag(Tok, diag::err_expected_class_name_not_template)
- << FixItHint::CreateRemoval(Tok.getLocation());
+ << FixItHint::CreateRemoval(Tok.getLocation());
ConsumeToken();
}
@@ -1211,7 +1215,7 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
if (Tok.isOneOf(tok::kw_decltype, tok::annot_decltype)) {
if (SS.isNotEmpty())
Diag(SS.getBeginLoc(), diag::err_unexpected_scope_on_base_decltype)
- << FixItHint::CreateRemoval(SS.getRange());
+ << FixItHint::CreateRemoval(SS.getRange());
// Fake up a Declarator to use with ActOnTypeName.
DeclSpec DS(AttrFactory);
@@ -1226,7 +1230,7 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
if (Tok.is(tok::annot_template_id)) {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
if (TemplateId->mightBeType()) {
- AnnotateTemplateIdTokenAsType(SS, /*IsClassName*/true);
+ AnnotateTemplateIdTokenAsType(SS, /*IsClassName*/ true);
assert(Tok.is(tok::annot_typename) && "template-id -> type failed");
TypeResult Type = getTypeAnnotation(Tok);
@@ -1253,10 +1257,9 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
// required nor permitted" mode, and do this there.
TemplateNameKind TNK = TNK_Non_template;
TemplateTy Template;
- if (!Actions.DiagnoseUnknownTemplateName(*Id, IdLoc, getCurScope(),
- &SS, Template, TNK)) {
- Diag(IdLoc, diag::err_unknown_template_name)
- << Id;
+ if (!Actions.DiagnoseUnknownTemplateName(*Id, IdLoc, getCurScope(), &SS,
+ Template, TNK)) {
+ Diag(IdLoc, diag::err_unknown_template_name) << Id;
}
// Form the template name
@@ -1269,7 +1272,7 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
return true;
if (Tok.is(tok::annot_template_id) &&
takeTemplateIdAnnotation(Tok)->mightBeType())
- AnnotateTemplateIdTokenAsType(SS, /*IsClassName*/true);
+ AnnotateTemplateIdTokenAsType(SS, /*IsClassName*/ true);
// If we didn't end up with a typename token, there's nothing more we
// can do.
@@ -1332,28 +1335,29 @@ void Parser::ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs) {
bool Parser::isValidAfterTypeSpecifier(bool CouldBeBitfield) {
// This switch enumerates the valid "follow" set for type-specifiers.
switch (Tok.getKind()) {
- default: break;
- case tok::semi: // struct foo {...} ;
- case tok::star: // struct foo {...} * P;
- case tok::amp: // struct foo {...} & R = ...
- case tok::ampamp: // struct foo {...} && R = ...
- case tok::identifier: // struct foo {...} V ;
- case tok::r_paren: //(struct foo {...} ) {4}
- case tok::coloncolon: // struct foo {...} :: a::b;
- case tok::annot_cxxscope: // struct foo {...} a:: b;
- case tok::annot_typename: // struct foo {...} a ::b;
- case tok::annot_template_id: // struct foo {...} a<int> ::b;
- case tok::kw_decltype: // struct foo {...} decltype (a)::b;
- case tok::l_paren: // struct foo {...} ( x);
- case tok::comma: // __builtin_offsetof(struct foo{...} ,
- case tok::kw_operator: // struct foo operator ++() {...}
- case tok::kw___declspec: // struct foo {...} __declspec(...)
- case tok::l_square: // void f(struct f [ 3])
- case tok::ellipsis: // void f(struct f ... [Ns])
+ default:
+ break;
+ case tok::semi: // struct foo {...} ;
+ case tok::star: // struct foo {...} * P;
+ case tok::amp: // struct foo {...} & R = ...
+ case tok::ampamp: // struct foo {...} && R = ...
+ case tok::identifier: // struct foo {...} V ;
+ case tok::r_paren: //(struct foo {...} ) {4}
+ case tok::coloncolon: // struct foo {...} :: a::b;
+ case tok::annot_cxxscope: // struct foo {...} a:: b;
+ case tok::annot_typename: // struct foo {...} a ::b;
+ case tok::annot_template_id: // struct foo {...} a<int> ::b;
+ case tok::kw_decltype: // struct foo {...} decltype (a)::b;
+ case tok::l_paren: // struct foo {...} ( x);
+ case tok::comma: // __builtin_offsetof(struct foo{...} ,
+ case tok::kw_operator: // struct foo operator ++() {...}
+ case tok::kw___declspec: // struct foo {...} __declspec(...)
+ case tok::l_square: // void f(struct f [ 3])
+ case tok::ellipsis: // void f(struct f ... [Ns])
// FIXME: we should emit semantic diagnostic when declaration
// attribute is in type attribute position.
- case tok::kw___attribute: // struct foo __attribute__((used)) x;
- case tok::annot_pragma_pack: // struct foo {...} _Pragma(pack(pop));
+ case tok::kw___attribute: // struct foo __attribute__((used)) x;
+ case tok::annot_pragma_pack: // struct foo {...} _Pragma(pack(pop));
// struct foo {...} _Pragma(section(...));
case tok::annot_pragma_ms_pragma:
// struct foo {...} _Pragma(vtordisp(pop));
@@ -1362,40 +1366,40 @@ bool Parser::isValidAfterTypeSpecifier(bool CouldBeBitfield) {
case tok::annot_pragma_ms_pointers_to_members:
return true;
case tok::colon:
- return CouldBeBitfield || // enum E { ... } : 2;
- ColonIsSacred; // _Generic(..., enum E : 2);
+ return CouldBeBitfield || // enum E { ... } : 2;
+ ColonIsSacred; // _Generic(..., enum E : 2);
// Microsoft compatibility
- case tok::kw___cdecl: // struct foo {...} __cdecl x;
- case tok::kw___fastcall: // struct foo {...} __fastcall x;
- case tok::kw___stdcall: // struct foo {...} __stdcall x;
- case tok::kw___thiscall: // struct foo {...} __thiscall x;
- case tok::kw___vectorcall: // struct foo {...} __vectorcall x;
+ case tok::kw___cdecl: // struct foo {...} __cdecl x;
+ case tok::kw___fastcall: // struct foo {...} __fastcall x;
+ case tok::kw___stdcall: // struct foo {...} __stdcall x;
+ case tok::kw___thiscall: // struct foo {...} __thiscall x;
+ case tok::kw___vectorcall: // struct foo {...} __vectorcall x;
// We will diagnose these calling-convention specifiers on non-function
// declarations later, so claim they are valid after a type specifier.
return getLangOpts().MicrosoftExt;
// Type qualifiers
- case tok::kw_const: // struct foo {...} const x;
- case tok::kw_volatile: // struct foo {...} volatile x;
- case tok::kw_restrict: // struct foo {...} restrict x;
- case tok::kw__Atomic: // struct foo {...} _Atomic x;
- case tok::kw___unaligned: // struct foo {...} __unaligned *x;
+ case tok::kw_const: // struct foo {...} const x;
+ case tok::kw_volatile: // struct foo {...} volatile x;
+ case tok::kw_restrict: // struct foo {...} restrict x;
+ case tok::kw__Atomic: // struct foo {...} _Atomic x;
+ case tok::kw___unaligned: // struct foo {...} __unaligned *x;
// Function specifiers
// Note, no 'explicit'. An explicit function must be either a conversion
// operator or a constructor. Either way, it can't have a return type.
- case tok::kw_inline: // struct foo inline f();
- case tok::kw_virtual: // struct foo virtual f();
- case tok::kw_friend: // struct foo friend f();
+ case tok::kw_inline: // struct foo inline f();
+ case tok::kw_virtual: // struct foo virtual f();
+ case tok::kw_friend: // struct foo friend f();
// Storage-class specifiers
- case tok::kw_static: // struct foo {...} static x;
- case tok::kw_extern: // struct foo {...} extern x;
- case tok::kw_typedef: // struct foo {...} typedef x;
- case tok::kw_register: // struct foo {...} register x;
- case tok::kw_auto: // struct foo {...} auto x;
- case tok::kw_mutable: // struct foo {...} mutable x;
- case tok::kw_thread_local: // struct foo {...} thread_local x;
- case tok::kw_constexpr: // struct foo {...} constexpr x;
- case tok::kw_consteval: // struct foo {...} consteval x;
- case tok::kw_constinit: // struct foo {...} constinit x;
+ case tok::kw_static: // struct foo {...} static x;
+ case tok::kw_extern: // struct foo {...} extern x;
+ case tok::kw_typedef: // struct foo {...} typedef x;
+ case tok::kw_register: // struct foo {...} register x;
+ case tok::kw_auto: // struct foo {...} auto x;
+ case tok::kw_mutable: // struct foo {...} mutable x;
+ case tok::kw_thread_local: // struct foo {...} thread_local x;
+ case tok::kw_constexpr: // struct foo {...} constexpr x;
+ case tok::kw_consteval: // struct foo {...} consteval x;
+ case tok::kw_constinit: // struct foo {...} constinit x;
// As shown above, type qualifiers and storage class specifiers absolutely
// can occur after class specifiers according to the grammar. However,
// almost no one actually writes code like this. If we see one of these,
@@ -1414,7 +1418,7 @@ bool Parser::isValidAfterTypeSpecifier(bool CouldBeBitfield) {
if (!isKnownToBeTypeSpecifier(NextToken()))
return true;
break;
- case tok::r_brace: // struct bar { struct foo {...} }
+ case tok::r_brace: // struct bar { struct foo {...} }
// Missing ';' at end of struct is accepted as an extension in C mode.
if (!getLangOpts().CPlusPlus)
return true;
@@ -1507,8 +1511,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
MaybeParseAttributes(PAKM_CXX11 | PAKM_Declspec | PAKM_GNU, attrs);
// Parse inheritance specifiers.
- if (Tok.isOneOf(tok::kw___single_inheritance,
- tok::kw___multiple_inheritance,
+ if (Tok.isOneOf(tok::kw___single_inheritance, tok::kw___multiple_inheritance,
tok::kw___virtual_inheritance))
ParseMicrosoftInheritanceClassAttributes(attrs);
@@ -1519,61 +1522,32 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// C++11 attributes
SourceLocation AttrFixitLoc = Tok.getLocation();
- if (TagType == DeclSpec::TST_struct &&
- Tok.isNot(tok::identifier) &&
- !Tok.isAnnotation() &&
- Tok.getIdentifierInfo() &&
- Tok.isOneOf(tok::kw___is_abstract,
- tok::kw___is_aggregate,
- tok::kw___is_arithmetic,
- tok::kw___is_array,
- tok::kw___is_assignable,
- tok::kw___is_base_of,
- tok::kw___is_class,
- tok::kw___is_complete_type,
- tok::kw___is_compound,
- tok::kw___is_const,
- tok::kw___is_constructible,
- tok::kw___is_convertible,
- tok::kw___is_convertible_to,
- tok::kw___is_destructible,
- tok::kw___is_empty,
- tok::kw___is_enum,
- tok::kw___is_floating_point,
- tok::kw___is_final,
- tok::kw___is_function,
- tok::kw___is_fundamental,
- tok::kw___is_integral,
- tok::kw___is_interface_class,
- tok::kw___is_literal,
- tok::kw___is_lvalue_expr,
- tok::kw___is_lvalue_reference,
- tok::kw___is_member_function_pointer,
- tok::kw___is_member_object_pointer,
- tok::kw___is_member_pointer,
- tok::kw___is_nothrow_assignable,
- tok::kw___is_nothrow_constructible,
- tok::kw___is_nothrow_destructible,
- tok::kw___is_object,
- tok::kw___is_pod,
- tok::kw___is_pointer,
- tok::kw___is_polymorphic,
- tok::kw___is_reference,
- tok::kw___is_rvalue_expr,
- tok::kw___is_rvalue_reference,
- tok::kw___is_same,
- tok::kw___is_scalar,
- tok::kw___is_sealed,
- tok::kw___is_signed,
- tok::kw___is_standard_layout,
- tok::kw___is_trivial,
- tok::kw___is_trivially_assignable,
- tok::kw___is_trivially_constructible,
- tok::kw___is_trivially_copyable,
- tok::kw___is_union,
- tok::kw___is_unsigned,
- tok::kw___is_void,
- tok::kw___is_volatile))
+ if (TagType == DeclSpec::TST_struct && Tok.isNot(tok::identifier) &&
+ !Tok.isAnnotation() && Tok.getIdentifierInfo() &&
+ Tok.isOneOf(
+ tok::kw___is_abstract, tok::kw___is_aggregate,
+ tok::kw___is_arithmetic, tok::kw___is_array, tok::kw___is_assignable,
+ tok::kw___is_base_of, tok::kw___is_class, tok::kw___is_complete_type,
+ tok::kw___is_compound, tok::kw___is_const, tok::kw___is_constructible,
+ tok::kw___is_convertible, tok::kw___is_convertible_to,
+ tok::kw___is_destructible, tok::kw___is_empty, tok::kw___is_enum,
+ tok::kw___is_floating_point, tok::kw___is_final,
+ tok::kw___is_function, tok::kw___is_fundamental,
+ tok::kw___is_integral, tok::kw___is_interface_class,
+ tok::kw___is_literal, tok::kw___is_lvalue_expr,
+ tok::kw___is_lvalue_reference, tok::kw___is_member_function_pointer,
+ tok::kw___is_member_object_pointer, tok::kw___is_member_pointer,
+ tok::kw___is_nothrow_assignable, tok::kw___is_nothrow_constructible,
+ tok::kw___is_nothrow_destructible, tok::kw___is_object,
+ tok::kw___is_pod, tok::kw___is_pointer, tok::kw___is_polymorphic,
+ tok::kw___is_reference, tok::kw___is_rvalue_expr,
+ tok::kw___is_rvalue_reference, tok::kw___is_same, tok::kw___is_scalar,
+ tok::kw___is_sealed, tok::kw___is_signed,
+ tok::kw___is_standard_layout, tok::kw___is_trivial,
+ tok::kw___is_trivially_assignable,
+ tok::kw___is_trivially_constructible, tok::kw___is_trivially_copyable,
+ tok::kw___is_union, tok::kw___is_unsigned, tok::kw___is_void,
+ tok::kw___is_volatile))
// GNU libstdc++ 4.2 and libc++ use certain intrinsic names as the
// name of struct templates, but some are keywords in GCC >= 4.3
// and Clang. Therefore, when we see the token sequence "struct
@@ -1767,7 +1741,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// C++ [class.friend]p2:
// A class shall not be defined in a friend declaration.
Diag(Tok.getLocation(), diag::err_friend_decl_defines_type)
- << SourceRange(DS.getFriendSpecLoc());
+ << SourceRange(DS.getFriendSpecLoc());
// Skip everything up to the semicolon, so that this looks like a proper
// friend class (or template thereof) declaration.
@@ -1821,7 +1795,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// A semicolon was missing after this declaration. Diagnose and recover.
ExpectAndConsume(tok::semi, diag::err_expected_after,
DeclSpec::getSpecifierName(TagType, PPol));
- PP.EnterToken(Tok, /*IsReinject*/true);
+ PP.EnterToken(Tok, /*IsReinject*/ true);
Tok.setKind(tok::semi);
}
} else
@@ -1838,10 +1812,10 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
SourceRange AttrRange = Attributes.Range;
if (AttrRange.isValid()) {
Diag(AttrRange.getBegin(), diag::err_attributes_not_allowed)
- << AttrRange
- << FixItHint::CreateInsertionFromRange(AttrFixitLoc,
- CharSourceRange(AttrRange, true))
- << FixItHint::CreateRemoval(AttrRange);
+ << AttrRange
+ << FixItHint::CreateInsertionFromRange(
+ AttrFixitLoc, CharSourceRange(AttrRange, true))
+ << FixItHint::CreateRemoval(AttrRange);
// Recover by adding misplaced attributes to the attribute list
// of the class so they can be applied on the class later.
@@ -1849,12 +1823,13 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
}
}
- if (!Name && !TemplateId && (DS.getTypeSpecType() == DeclSpec::TST_error ||
- TUK != Sema::TUK_Definition)) {
+ if (!Name && !TemplateId &&
+ (DS.getTypeSpecType() == DeclSpec::TST_error ||
+ TUK != Sema::TUK_Definition)) {
if (DS.getTypeSpecType() != DeclSpec::TST_error) {
// We have a declaration or reference to an anonymous class.
Diag(StartLoc, diag::err_anon_type_definition)
- << DeclSpec::getSpecifierName(TagType, Policy);
+ << DeclSpec::getSpecifierName(TagType, Policy);
}
// If we are parsing a definition and stop at a base-clause, continue on
@@ -1869,7 +1844,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// Create the tag portion of the class or class template.
DeclResult TagOrTempResult = true; // invalid
- TypeResult TypeResult = true; // invalid
+ TypeResult TypeResult = true; // invalid
bool Owned = false;
Sema::SkipBodyInfo SkipBody;
@@ -1881,7 +1856,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
if (TemplateId->isInvalid()) {
// Can't build the declaration.
} else if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
- TUK == Sema::TUK_Declaration) {
+ TUK == Sema::TUK_Declaration) {
// This is an explicit instantiation of a class template.
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
/*DiagnoseEmptyAttrs=*/true);
@@ -1901,14 +1876,10 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TemplateInfo.Kind == ParsedTemplateInfo::NonTemplate)) {
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
/*DiagnoseEmptyAttrs=*/true);
- TypeResult = Actions.ActOnTagTemplateIdType(TUK, TagType, StartLoc,
- SS,
- TemplateId->TemplateKWLoc,
- TemplateId->Template,
- TemplateId->TemplateNameLoc,
- TemplateId->LAngleLoc,
- TemplateArgsPtr,
- TemplateId->RAngleLoc);
+ TypeResult = Actions.ActOnTagTemplateIdType(
+ TUK, TagType, StartLoc, SS, TemplateId->TemplateKWLoc,
+ TemplateId->Template, TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc, TemplateArgsPtr, TemplateId->RAngleLoc);
} else {
// This is an explicit specialization or a class template
// partial specialization.
@@ -1990,7 +1961,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// If the declarator-id is not a template-id, issue a diagnostic and
// recover by ignoring the 'template' keyword.
Diag(Tok, diag::err_template_defn_explicit_instantiation)
- << 1 << FixItHint::CreateRemoval(TemplateInfo.TemplateLoc);
+ << 1 << FixItHint::CreateRemoval(TemplateInfo.TemplateLoc);
TemplateParams = nullptr;
}
@@ -2002,7 +1973,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
MultiTemplateParamsArg TParams;
if (TUK != Sema::TUK_Reference && TemplateParams)
TParams =
- MultiTemplateParamsArg(&(*TemplateParams)[0], TemplateParams->size());
+ MultiTemplateParamsArg(&(*TemplateParams)[0], TemplateParams->size());
stripTypeAttributesOffDeclSpec(attrs, DS, TUK);
@@ -2020,8 +1991,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// less common call.
if (IsDependent) {
assert(TUK == Sema::TUK_Reference || TUK == Sema::TUK_Friend);
- TypeResult = Actions.ActOnDependentTag(getCurScope(), TagType, TUK,
- SS, Name, StartLoc, NameLoc);
+ TypeResult = Actions.ActOnDependentTag(getCurScope(), TagType, TUK, SS,
+ Name, StartLoc, NameLoc);
}
}
@@ -2071,10 +2042,9 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
NameLoc.isValid() ? NameLoc : StartLoc,
PrevSpec, DiagID, TypeResult.get(), Policy);
} else if (!TagOrTempResult.isInvalid()) {
- Result = DS.SetTypeSpecType(TagType, StartLoc,
- NameLoc.isValid() ? NameLoc : StartLoc,
- PrevSpec, DiagID, TagOrTempResult.get(), Owned,
- Policy);
+ Result = DS.SetTypeSpecType(
+ TagType, StartLoc, NameLoc.isValid() ? NameLoc : StartLoc, PrevSpec,
+ DiagID, TagOrTempResult.get(), Owned, Policy);
} else {
DS.SetTypeSpecError();
return;
@@ -2183,12 +2153,12 @@ BaseResult Parser::ParseBaseSpecifier(Decl *ClassDecl) {
// Parse the 'virtual' keyword (again!), in case it came after the
// access specifier.
- if (Tok.is(tok::kw_virtual)) {
+ if (Tok.is(tok::kw_virtual)) {
SourceLocation VirtualLoc = ConsumeToken();
if (IsVirtual) {
// Complain about duplicate 'virtual'
Diag(VirtualLoc, diag::err_dup_virtual)
- << FixItHint::CreateRemoval(VirtualLoc);
+ << FixItHint::CreateRemoval(VirtualLoc);
}
IsVirtual = true;
@@ -2237,10 +2207,14 @@ BaseResult Parser::ParseBaseSpecifier(Decl *ClassDecl) {
/// 'public'
AccessSpecifier Parser::getAccessSpecifierIfPresent() const {
switch (Tok.getKind()) {
- default: return AS_none;
- case tok::kw_private: return AS_private;
- case tok::kw_protected: return AS_protected;
- case tok::kw_public: return AS_public;
+ default:
+ return AS_none;
+ case tok::kw_private:
+ return AS_private;
+ case tok::kw_protected:
+ return AS_protected;
+ case tok::kw_public:
+ return AS_public;
}
}
@@ -2248,10 +2222,9 @@ AccessSpecifier Parser::getAccessSpecifierIfPresent() const {
/// delayed, e.g., default arguments or an exception-specification, create a
/// late-parsed method declaration record to handle the parsing at the end of
/// the class definition.
-void Parser::HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
+void Parser::HandleMemberFunctionDeclDelays(Declarator &DeclaratorInfo,
Decl *ThisDecl) {
- DeclaratorChunk::FunctionTypeInfo &FTI
- = DeclaratorInfo.getFunctionTypeInfo();
+ DeclaratorChunk::FunctionTypeInfo &FTI = DeclaratorInfo.getFunctionTypeInfo();
// If there was a late-parsed exception-specification, we'll need a
// late parse
bool NeedLateParse = FTI.getExceptionSpecType() == EST_Unparsed;
@@ -2348,9 +2321,9 @@ void Parser::ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS,
if (FriendLoc.isValid()) {
Diag(Tok.getLocation(), diag::err_friend_decl_spec)
- << VirtSpecifiers::getSpecifierName(Specifier)
- << FixItHint::CreateRemoval(Tok.getLocation())
- << SourceRange(FriendLoc, FriendLoc);
+ << VirtSpecifiers::getSpecifierName(Specifier)
+ << FixItHint::CreateRemoval(Tok.getLocation())
+ << SourceRange(FriendLoc, FriendLoc);
ConsumeToken();
continue;
}
@@ -2360,13 +2333,12 @@ void Parser::ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS,
const char *PrevSpec = nullptr;
if (VS.SetSpecifier(Specifier, Tok.getLocation(), PrevSpec))
Diag(Tok.getLocation(), diag::err_duplicate_virt_specifier)
- << PrevSpec
- << FixItHint::CreateRemoval(Tok.getLocation());
+ << PrevSpec << FixItHint::CreateRemoval(Tok.getLocation());
if (IsInterface && (Specifier == VirtSpecifiers::VS_Final ||
Specifier == VirtSpecifiers::VS_Sealed)) {
Diag(Tok.getLocation(), diag::err_override_control_interface)
- << VirtSpecifiers::getSpecifierName(Specifier);
+ << VirtSpecifiers::getSpecifierName(Specifier);
} else if (Specifier == VirtSpecifiers::VS_Sealed) {
Diag(Tok.getLocation(), diag::ext_ms_sealed_keyword);
} else if (Specifier == VirtSpecifiers::VS_Abstract) {
@@ -2436,7 +2408,8 @@ bool Parser::ParseCXXMemberDeclaratorBeforeInitializer(
VS, getCurrentClass().IsInterface,
DeclaratorInfo.getDeclSpec().getFriendSpecLoc());
if (!VS.isUnset())
- MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(DeclaratorInfo, VS);
+ MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(DeclaratorInfo,
+ VS);
}
// If a simple-asm-expr is present, parse it.
@@ -2470,7 +2443,8 @@ bool Parser::ParseCXXMemberDeclaratorBeforeInitializer(
if (AL.isKnownToGCC() && !AL.isCXX11Attribute())
Diag(AL.getLoc(), diag::warn_gcc_attribute_location);
- MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(DeclaratorInfo, VS);
+ MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(DeclaratorInfo,
+ VS);
}
}
@@ -2487,8 +2461,7 @@ bool Parser::ParseCXXMemberDeclaratorBeforeInitializer(
/// Look for declaration specifiers possibly occurring after C++11
/// virt-specifier-seq and diagnose them.
void Parser::MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(
- Declarator &D,
- VirtSpecifiers &VS) {
+ Declarator &D, VirtSpecifiers &VS) {
DeclSpec DS(AttrFactory);
// GNU-style and C++11 attributes are not allowed here, but they will be
@@ -2526,15 +2499,15 @@ void Parser::MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(
SourceLocation RefQualifierLoc;
if (ParseRefQualifier(RefQualifierIsLValueRef, RefQualifierLoc)) {
const char *Name = (RefQualifierIsLValueRef ? "& " : "&& ");
- FixItHint Insertion = FixItHint::CreateInsertion(VS.getFirstLocation(), Name);
+ FixItHint Insertion =
+ FixItHint::CreateInsertion(VS.getFirstLocation(), Name);
Function.RefQualifierIsLValueRef = RefQualifierIsLValueRef;
Function.RefQualifierLoc = RefQualifierLoc;
Diag(RefQualifierLoc, diag::err_declspec_after_virtspec)
- << (RefQualifierIsLValueRef ? "&" : "&&")
- << VirtSpecifiers::getSpecifierName(VS.getLastSpecifier())
- << FixItHint::CreateRemoval(RefQualifierLoc)
- << Insertion;
+ << (RefQualifierIsLValueRef ? "&" : "&&")
+ << VirtSpecifiers::getSpecifierName(VS.getLastSpecifier())
+ << FixItHint::CreateRemoval(RefQualifierLoc) << Insertion;
D.SetRangeEnd(RefQualifierLoc);
}
}
@@ -2676,10 +2649,10 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// Handle: member-declaration ::= '__extension__' member-declaration
if (Tok.is(tok::kw___extension__)) {
// __extension__ silences extension warnings in the subexpression.
- ExtensionRAIIObject O(Diags); // Use RAII to do this.
+ ExtensionRAIIObject O(Diags); // Use RAII to do this.
ConsumeToken();
- return ParseCXXClassMemberDeclaration(AS, AccessAttrs,
- TemplateInfo, TemplateDiags);
+ return ParseCXXClassMemberDeclaration(AS, AccessAttrs, TemplateInfo,
+ TemplateDiags);
}
ParsedAttributes DeclAttrs(AttrFactory);
@@ -2756,9 +2729,9 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
return nullptr;
MultiTemplateParamsArg TemplateParams(
- TemplateInfo.TemplateParams? TemplateInfo.TemplateParams->data()
- : nullptr,
- TemplateInfo.TemplateParams? TemplateInfo.TemplateParams->size() : 0);
+ TemplateInfo.TemplateParams ? TemplateInfo.TemplateParams->data()
+ : nullptr,
+ TemplateInfo.TemplateParams ? TemplateInfo.TemplateParams->size() : 0);
if (TryConsumeToken(tok::semi)) {
if (DS.isFriendSpecified())
@@ -2769,7 +2742,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
getCurScope(), AS, DS, DeclAttrs, TemplateParams, false, AnonRecord);
DS.complete(TheDecl);
if (AnonRecord) {
- Decl* decls[] = {AnonRecord, TheDecl};
+ Decl *decls[] = {AnonRecord, TheDecl};
return Actions.BuildDeclaratorGroup(decls);
}
return Actions.ConvertDeclToDeclGroup(TheDecl);
@@ -2787,7 +2760,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
SourceLocation EqualLoc;
SourceLocation PureSpecLoc;
- auto TryConsumePureSpecifier = [&] (bool AllowDefinition) {
+ auto TryConsumePureSpecifier = [&](bool AllowDefinition) {
if (Tok.isNot(tok::equal))
return false;
@@ -2891,9 +2864,8 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
DS.ClearStorageClassSpecs();
}
- Decl *FunDecl =
- ParseCXXInlineMethodDef(AS, AccessAttrs, DeclaratorInfo, TemplateInfo,
- VS, PureSpecLoc);
+ Decl *FunDecl = ParseCXXInlineMethodDef(AS, AccessAttrs, DeclaratorInfo,
+ TemplateInfo, VS, PureSpecLoc);
if (FunDecl) {
for (unsigned i = 0, ni = CommonLateParsedAttrs.size(); i < ni; ++i) {
@@ -2961,17 +2933,16 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
SmallVector<SourceRange, 4> Ranges;
DeclaratorInfo.getCXX11AttributeRanges(Ranges);
for (SmallVectorImpl<SourceRange>::iterator I = Ranges.begin(),
- E = Ranges.end(); I != E; ++I)
+ E = Ranges.end();
+ I != E; ++I)
Diag((*I).getBegin(), diag::err_attributes_not_allowed) << *I;
ThisDecl = Actions.ActOnFriendFunctionDecl(getCurScope(), DeclaratorInfo,
TemplateParams);
} else {
- ThisDecl = Actions.ActOnCXXMemberDeclarator(getCurScope(), AS,
- DeclaratorInfo,
- TemplateParams,
- BitfieldSize.get(),
- VS, HasInClassInit);
+ ThisDecl = Actions.ActOnCXXMemberDeclarator(
+ getCurScope(), AS, DeclaratorInfo, TemplateParams, BitfieldSize.get(),
+ VS, HasInClassInit);
if (VarTemplateDecl *VT =
ThisDecl ? dyn_cast<VarTemplateDecl>(ThisDecl) : nullptr)
@@ -3031,7 +3002,8 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
Actions.ActOnUninitializedDecl(ThisDecl);
SkipUntil(tok::comma, StopAtSemi | StopBeforeMatch);
} else if (ThisDecl)
- Actions.AddInitializerToDecl(ThisDecl, Init.get(), EqualLoc.isInvalid());
+ Actions.AddInitializerToDecl(ThisDecl, Init.get(),
+ EqualLoc.isInvalid());
} else if (ThisDecl && DS.getStorageClassSpec() == DeclSpec::SCS_static)
// No initializer.
Actions.ActOnUninitializedDecl(ThisDecl);
@@ -3069,7 +3041,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// the start of a declarator. The comma was probably a typo for a
// semicolon.
Diag(CommaLoc, diag::err_expected_semi_declaration)
- << FixItHint::CreateReplacement(CommaLoc, ";");
+ << FixItHint::CreateReplacement(CommaLoc, ";");
ExpectSemi = false;
break;
}
@@ -3127,23 +3099,23 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
/// be a constant-expression.
ExprResult Parser::ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc) {
- assert(Tok.isOneOf(tok::equal, tok::l_brace)
- && "Data member initializer not starting with '=' or '{'");
+ assert(Tok.isOneOf(tok::equal, tok::l_brace) &&
+ "Data member initializer not starting with '=' or '{'");
EnterExpressionEvaluationContext Context(
Actions, Sema::ExpressionEvaluationContext::PotentiallyEvaluated, D);
if (TryConsumeToken(tok::equal, EqualLoc)) {
if (Tok.is(tok::kw_delete)) {
// In principle, an initializer of '= delete p;' is legal, but it will
- // never type-check. It's better to diagnose it as an ill-formed expression
- // than as an ill-formed deleted non-function member.
- // An initializer of '= delete p, foo' will never be parsed, because
- // a top-level comma always ends the initializer expression.
+ // never type-check. It's better to diagnose it as an ill-formed
+ // expression than as an ill-formed deleted non-function member. An
+ // initializer of '= delete p, foo' will never be parsed, because a
+ // top-level comma always ends the initializer expression.
const Token &Next = NextToken();
if (IsFunction || Next.isOneOf(tok::semi, tok::comma, tok::eof)) {
if (IsFunction)
Diag(ConsumeToken(), diag::err_default_delete_in_multiple_declaration)
- << 1 /* delete */;
+ << 1 /* delete */;
else
Diag(ConsumeToken(), diag::err_deleted_non_function);
return ExprError();
@@ -3151,7 +3123,7 @@ ExprResult Parser::ParseCXXMemberInitializer(Decl *D, bool IsFunction,
} else if (Tok.is(tok::kw_default)) {
if (IsFunction)
Diag(Tok, diag::err_default_delete_in_multiple_declaration)
- << 0 /* default */;
+ << 0 /* default */;
else
Diag(ConsumeToken(), diag::err_default_special_members)
<< getLangOpts().CPlusPlus20;
@@ -3190,7 +3162,7 @@ void Parser::SkipCXXMemberSpecification(SourceLocation RecordLoc,
// within a template argument).
if (Tok.is(tok::colon)) {
// Enter the scope of the class so that we can correctly parse its bases.
- ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope);
+ ParseScope ClassScope(this, Scope::ClassScope | Scope::DeclScope);
ParsingClassDefinition ParsingDef(*this, TagDecl, /*NonNestedClass*/ true,
TagType == DeclSpec::TST_interface);
auto OldContext =
@@ -3318,8 +3290,8 @@ Parser::DeclGroupPtrTy Parser::ParseCXXClassMemberDeclarationWithPragmas(
default:
if (tok::isPragmaAnnotation(Tok.getKind())) {
Diag(Tok.getLocation(), diag::err_pragma_misplaced_in_decl)
- << DeclSpec::getSpecifierName(TagType,
- Actions.getASTContext().getPrintingPolicy());
+ << DeclSpec::getSpecifierName(
+ TagType, Actions.getASTContext().getPrintingPolicy());
ConsumeAnnotationToken();
return nullptr;
}
@@ -3338,9 +3310,9 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
ParsedAttributes &Attrs,
unsigned TagType, Decl *TagDecl) {
assert((TagType == DeclSpec::TST_struct ||
- TagType == DeclSpec::TST_interface ||
- TagType == DeclSpec::TST_union ||
- TagType == DeclSpec::TST_class) && "Invalid TagType!");
+ TagType == DeclSpec::TST_interface ||
+ TagType == DeclSpec::TST_union || TagType == DeclSpec::TST_class) &&
+ "Invalid TagType!");
llvm::TimeTraceScope TimeScope("ParseClass", [&]() {
if (auto *TD = dyn_cast_or_null<NamedDecl>(TagDecl))
@@ -3363,10 +3335,10 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
// The Microsoft extension __interface does not permit nested classes.
if (getCurrentClass().IsInterface) {
Diag(RecordLoc, diag::err_invalid_member_in_interface)
- << /*ErrorType=*/6
- << (isa<NamedDecl>(TagDecl)
- ? cast<NamedDecl>(TagDecl)->getQualifiedNameAsString()
- : "(anonymous)");
+ << /*ErrorType=*/6
+ << (isa<NamedDecl>(TagDecl)
+ ? cast<NamedDecl>(TagDecl)->getQualifiedNameAsString()
+ : "(anonymous)");
}
break;
}
@@ -3379,7 +3351,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
}
// Enter a scope for the class.
- ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope);
+ ParseScope ClassScope(this, Scope::ClassScope | Scope::DeclScope);
// Note that we are parsing a new (potentially-nested) class definition.
ParsingClassDefinition ParsingDef(*this, TagDecl, NonNestedClass,
@@ -3492,7 +3464,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
if (SuggestFixIt) {
LBraceDiag << FixItHint::CreateInsertion(BraceLoc, " {");
// Try recovering from missing { after base-clause.
- PP.EnterToken(Tok, /*IsReinject*/true);
+ PP.EnterToken(Tok, /*IsReinject*/ true);
Tok.setKind(tok::l_brace);
} else {
if (TagDecl)
@@ -3586,10 +3558,8 @@ void Parser::DiagnoseUnexpectedNamespace(NamedDecl *D) {
// FIXME: Suggest where the close brace should have gone by looking
// at indentation changes within the definition body.
- Diag(D->getLocation(),
- diag::err_missing_end_of_definition) << D;
- Diag(Tok.getLocation(),
- diag::note_missing_end_of_definition_before) << D;
+ Diag(D->getLocation(), diag::err_missing_end_of_definition) << D;
+ Diag(Tok.getLocation(), diag::note_missing_end_of_definition_before) << D;
// Push '};' onto the token stream to recover.
PP.EnterToken(Tok, /*IsReinject*/ true);
@@ -3632,7 +3602,7 @@ void Parser::ParseConstructorInitializer(Decl *ConstructorDecl) {
PoisonSEHIdentifiersRAIIObject PoisonSEHIdentifiers(*this, true);
SourceLocation ColonLoc = ConsumeToken();
- SmallVector<CXXCtorInitializer*, 4> MemInitializers;
+ SmallVector<CXXCtorInitializer *, 4> MemInitializers;
bool AnyErrors = false;
do {
@@ -3659,12 +3629,12 @@ void Parser::ParseConstructorInitializer(Decl *ConstructorDecl) {
Tok.isOneOf(tok::identifier, tok::coloncolon)) {
SourceLocation Loc = PP.getLocForEndOfToken(PrevTokLocation);
Diag(Loc, diag::err_ctor_init_missing_comma)
- << FixItHint::CreateInsertion(Loc, ", ");
+ << FixItHint::CreateInsertion(Loc, ", ");
} else {
// Skip over garbage, until we get to '{'. Don't eat the '{'.
if (!MemInit.isInvalid())
- Diag(Tok.getLocation(), diag::err_expected_either) << tok::l_brace
- << tok::comma;
+ Diag(Tok.getLocation(), diag::err_expected_either)
+ << tok::l_brace << tok::comma;
SkipUntil(tok::l_brace, StopAtSemi | StopBeforeMatch);
break;
}
@@ -3718,7 +3688,7 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
? takeTemplateIdAnnotation(Tok)
: nullptr;
if (TemplateId && TemplateId->mightBeType()) {
- AnnotateTemplateIdTokenAsType(SS, /*IsClassName*/true);
+ AnnotateTemplateIdTokenAsType(SS, /*IsClassName*/ true);
assert(Tok.is(tok::annot_typename) && "template-id -> type failed");
TemplateTypeTy = getTypeAnnotation(Tok);
ConsumeAnnotationToken();
@@ -3745,7 +3715,7 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II,
TemplateTypeTy.get(), DS, IdLoc,
InitList.get(), EllipsisLoc);
- } else if(Tok.is(tok::l_paren)) {
+ } else if (Tok.is(tok::l_paren)) {
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
@@ -3779,10 +3749,9 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
if (TemplateTypeTy.isInvalid())
return true;
- return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II,
- TemplateTypeTy.get(), DS, IdLoc,
- T.getOpenLocation(), ArgExprs,
- T.getCloseLocation(), EllipsisLoc);
+ return Actions.ActOnMemInitializer(
+ ConstructorDecl, getCurScope(), SS, II, TemplateTypeTy.get(), DS, IdLoc,
+ T.getOpenLocation(), ArgExprs, T.getCloseLocation(), EllipsisLoc);
}
if (TemplateTypeTy.isInvalid())
@@ -3803,13 +3772,11 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
/// noexcept-specification:
/// 'noexcept'
/// 'noexcept' '(' constant-expression ')'
-ExceptionSpecificationType
-Parser::tryParseExceptionSpecification(bool Delayed,
- SourceRange &SpecificationRange,
- SmallVectorImpl<ParsedType> &DynamicExceptions,
- SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
- ExprResult &NoexceptExpr,
- CachedTokens *&ExceptionSpecTokens) {
+ExceptionSpecificationType Parser::tryParseExceptionSpecification(
+ bool Delayed, SourceRange &SpecificationRange,
+ SmallVectorImpl<ParsedType> &DynamicExceptions,
+ SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
+ ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens) {
ExceptionSpecificationType Result = EST_None;
ExceptionSpecTokens = nullptr;
@@ -3838,8 +3805,8 @@ Parser::tryParseExceptionSpecification(bool Delayed,
// Cache the tokens for the exception-specification.
ExceptionSpecTokens = new CachedTokens;
- ExceptionSpecTokens->push_back(StartTok); // 'throw' or 'noexcept'
- ExceptionSpecTokens->push_back(Tok); // '('
+ ExceptionSpecTokens->push_back(StartTok); // 'throw' or 'noexcept'
+ ExceptionSpecTokens->push_back(Tok); // '('
SpecificationRange.setEnd(ConsumeParen()); // '('
ConsumeAndStoreUntil(tok::r_paren, *ExceptionSpecTokens,
@@ -3852,9 +3819,8 @@ Parser::tryParseExceptionSpecification(bool Delayed,
// See if there's a dynamic specification.
if (Tok.is(tok::kw_throw)) {
- Result = ParseDynamicExceptionSpecification(SpecificationRange,
- DynamicExceptions,
- DynamicExceptionRanges);
+ Result = ParseDynamicExceptionSpecification(
+ SpecificationRange, DynamicExceptions, DynamicExceptionRanges);
assert(DynamicExceptions.size() == DynamicExceptionRanges.size() &&
"Produced different number of exception types and ranges.");
}
@@ -3878,8 +3844,8 @@ Parser::tryParseExceptionSpecification(bool Delayed,
NoexceptExpr = ParseConstantExpression();
T.consumeClose();
if (!NoexceptExpr.isInvalid()) {
- NoexceptExpr = Actions.ActOnNoexceptSpec(NoexceptExpr.get(),
- NoexceptType);
+ NoexceptExpr =
+ Actions.ActOnNoexceptSpec(NoexceptExpr.get(), NoexceptType);
NoexceptRange = SourceRange(KeywordLoc, T.getCloseLocation());
} else {
NoexceptType = EST_BasicNoexcept;
@@ -3908,17 +3874,16 @@ Parser::tryParseExceptionSpecification(bool Delayed,
return Result;
}
-static void diagnoseDynamicExceptionSpecification(
- Parser &P, SourceRange Range, bool IsNoexcept) {
+static void diagnoseDynamicExceptionSpecification(Parser &P, SourceRange Range,
+ bool IsNoexcept) {
if (P.getLangOpts().CPlusPlus11) {
const char *Replacement = IsNoexcept ? "noexcept" : "noexcept(false)";
- P.Diag(Range.getBegin(),
- P.getLangOpts().CPlusPlus17 && !IsNoexcept
- ? diag::ext_dynamic_exception_spec
- : diag::warn_exception_spec_deprecated)
+ P.Diag(Range.getBegin(), P.getLangOpts().CPlusPlus17 && !IsNoexcept
+ ? diag::ext_dynamic_exception_spec
+ : diag::warn_exception_spec_deprecated)
<< Range;
P.Diag(Range.getBegin(), diag::note_exception_spec_deprecated)
- << Replacement << FixItHint::CreateReplacement(Range, Replacement);
+ << Replacement << FixItHint::CreateReplacement(Range, Replacement);
}
}
@@ -3934,9 +3899,8 @@ static void diagnoseDynamicExceptionSpecification(
/// type-id-list ',' type-id ... [opt]
///
ExceptionSpecificationType Parser::ParseDynamicExceptionSpecification(
- SourceRange &SpecificationRange,
- SmallVectorImpl<ParsedType> &Exceptions,
- SmallVectorImpl<SourceRange> &Ranges) {
+ SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions,
+ SmallVectorImpl<SourceRange> &Ranges) {
assert(Tok.is(tok::kw_throw) && "expected throw");
SpecificationRange.setBegin(ConsumeToken());
@@ -4010,10 +3974,9 @@ void Parser::ParseTrailingRequiresClause(Declarator &D) {
SourceLocation RequiresKWLoc = ConsumeToken();
ExprResult TrailingRequiresClause;
- ParseScope ParamScope(this,
- Scope::DeclScope |
- Scope::FunctionDeclarationScope |
- Scope::FunctionPrototypeScope);
+ ParseScope ParamScope(this, Scope::DeclScope |
+ Scope::FunctionDeclarationScope |
+ Scope::FunctionPrototypeScope);
Actions.ActOnStartTrailingRequiresClause(getCurScope(), D);
@@ -4063,9 +4026,9 @@ void Parser::ParseTrailingRequiresClause(Declarator &D) {
/// We have just started parsing the definition of a new class,
/// so push that class onto our stack of classes that is currently
/// being parsed.
-Sema::ParsingClassState
-Parser::PushParsingClass(Decl *ClassDecl, bool NonNestedClass,
- bool IsInterface) {
+Sema::ParsingClassState Parser::PushParsingClass(Decl *ClassDecl,
+ bool NonNestedClass,
+ bool IsInterface) {
assert((NonNestedClass || !ClassStack.empty()) &&
"Nested class without outer class");
ClassStack.push(new ParsingClass(ClassDecl, NonNestedClass, IsInterface));
@@ -4113,7 +4076,8 @@ void Parser::PopParsingClass(Sema::ParsingClassState state) {
// This nested class has some members that will need to be processed
// after the top-level class is completely defined. Therefore, add
// it to the list of nested classes within its parent.
- assert(getCurScope()->isClassScope() && "Nested class outside of class scope?");
+ assert(getCurScope()->isClassScope() &&
+ "Nested class outside of class scope?");
ClassStack.top()->LateParsedDeclarations.push_back(
new LateParsedClass(this, Victim));
}
@@ -4295,13 +4259,10 @@ static bool IsBuiltInOrStandardCXX11Attribute(IdentifierInfo *AttrName,
/// '[' balanced-token-seq ']'
/// '{' balanced-token-seq '}'
/// any token but '(', ')', '[', ']', '{', or '}'
-bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
- SourceLocation AttrNameLoc,
- ParsedAttributes &Attrs,
- SourceLocation *EndLoc,
- IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc,
- CachedTokens &OpenMPTokens) {
+bool Parser::ParseCXX11AttributeArgs(
+ IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc, CachedTokens &OpenMPTokens) {
assert(Tok.is(tok::l_paren) && "Not a C++11 attribute argument list");
SourceLocation LParenLoc = Tok.getLocation();
const LangOptions &LO = getLangOpts();
@@ -4321,7 +4282,8 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
!hasAttribute(LO.CPlusPlus ? AttributeCommonInfo::Syntax::AS_CXX11
: AttributeCommonInfo::Syntax::AS_C2x,
ScopeName, AttrName, getTargetInfo(), getLangOpts())) {
- if (getLangOpts().MicrosoftExt || getLangOpts().HLSL) {}
+ if (getLangOpts().MicrosoftExt || getLangOpts().HLSL) {
+ }
// Eat the left paren, then skip to the ending right paren.
ConsumeParen();
SkipUntil(tok::r_paren);
@@ -4339,7 +4301,7 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
if (ScopeName && ScopeName->isStr("omp")) {
Diag(AttrNameLoc, getLangOpts().OpenMP >= 51
? diag::warn_omp51_compat_attributes
- : diag::ext_omp_attributes);
+ : diag::ext_omp_attributes);
ParseOpenMPAttributeArgs(AttrName, OpenMPTokens);
@@ -4354,9 +4316,8 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
NumArgs = ParseClangAttributeArgs(AttrName, AttrNameLoc, Attrs, EndLoc,
ScopeName, ScopeLoc, Syntax);
else
- NumArgs =
- ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ NumArgs = ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, EndLoc,
+ ScopeName, ScopeLoc, Syntax);
if (!Attrs.empty() &&
IsBuiltInOrStandardCXX11Attribute(AttrName, ScopeName)) {
@@ -4444,7 +4405,7 @@ void Parser::ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
Diag(Tok.getLocation(), diag::err_expected) << tok::colon;
}
- llvm::SmallDenseMap<IdentifierInfo*, SourceLocation, 4> SeenAttrs;
+ llvm::SmallDenseMap<IdentifierInfo *, SourceLocation, 4> SeenAttrs;
bool AttrParsed = false;
while (!Tok.isOneOf(tok::r_square, tok::semi, tok::eof)) {
@@ -4510,8 +4471,7 @@ void Parser::ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
}
if (TryConsumeToken(tok::ellipsis))
- Diag(Tok, diag::err_cxx11_attribute_forbids_ellipsis)
- << AttrName;
+ Diag(Tok, diag::err_cxx11_attribute_forbids_ellipsis) << AttrName;
}
// If we hit an error and recovered by parsing up to a semicolon, eat the
@@ -4556,8 +4516,7 @@ void Parser::DiagnoseAndSkipCXX11Attributes() {
if (EndLoc.isValid()) {
SourceRange Range(StartLoc, EndLoc);
- Diag(StartLoc, diag::err_attributes_not_allowed)
- << Range;
+ Diag(StartLoc, diag::err_attributes_not_allowed) << Range;
}
}
@@ -4756,7 +4715,7 @@ void Parser::ParseMicrosoftIfExistsClassDeclaration(
case IEB_Dependent:
Diag(Result.KeywordLoc, diag::warn_microsoft_dependent_exists)
- << Result.IsIfExists;
+ << Result.IsIfExists;
// Fall through to skip.
LLVM_FALLTHROUGH;
@@ -4768,8 +4727,7 @@ void Parser::ParseMicrosoftIfExistsClassDeclaration(
while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
// __if_exists, __if_not_exists can nest.
if (Tok.isOneOf(tok::kw___if_exists, tok::kw___if_not_exists)) {
- ParseMicrosoftIfExistsClassDeclaration(TagType,
- AccessAttrs, CurAS);
+ ParseMicrosoftIfExistsClassDeclaration(TagType, AccessAttrs, CurAS);
continue;
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp b/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp
index 6ca98876b8fc..74fa70379858 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp
@@ -350,6 +350,16 @@ struct PragmaMaxTokensTotalHandler : public PragmaHandler {
Token &FirstToken) override;
};
+struct PragmaRISCVHandler : public PragmaHandler {
+ PragmaRISCVHandler(Sema &Actions)
+ : PragmaHandler("riscv"), Actions(Actions) {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
+ Token &FirstToken) override;
+
+private:
+ Sema &Actions;
+};
+
void markAsReinjectedForRelexing(llvm::MutableArrayRef<clang::Token> Toks) {
for (auto &T : Toks)
T.setFlag(clang::Token::IsReinjected);
@@ -493,6 +503,11 @@ void Parser::initializePragmaHandlers() {
MaxTokensTotalPragmaHandler = std::make_unique<PragmaMaxTokensTotalHandler>();
PP.AddPragmaHandler("clang", MaxTokensTotalPragmaHandler.get());
+
+ if (getTargetInfo().getTriple().isRISCV()) {
+ RISCVPragmaHandler = std::make_unique<PragmaRISCVHandler>(Actions);
+ PP.AddPragmaHandler("clang", RISCVPragmaHandler.get());
+ }
}
void Parser::resetPragmaHandlers() {
@@ -617,6 +632,11 @@ void Parser::resetPragmaHandlers() {
PP.RemovePragmaHandler("clang", MaxTokensTotalPragmaHandler.get());
MaxTokensTotalPragmaHandler.reset();
+
+ if (getTargetInfo().getTriple().isRISCV()) {
+ PP.RemovePragmaHandler("clang", RISCVPragmaHandler.get());
+ RISCVPragmaHandler.reset();
+ }
}
/// Handle the annotation token produced for #pragma unused(...)
@@ -3929,3 +3949,35 @@ void PragmaMaxTokensTotalHandler::HandlePragma(Preprocessor &PP,
PP.overrideMaxTokens(MaxTokens, Loc);
}
+
+// Handle '#pragma clang riscv intrinsic vector'.
+void PragmaRISCVHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducer Introducer,
+ Token &FirstToken) {
+ Token Tok;
+ PP.Lex(Tok);
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+
+ if (!II || !II->isStr("intrinsic")) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_invalid_argument)
+ << PP.getSpelling(Tok) << "riscv" << /*Expected=*/true << "'intrinsic'";
+ return;
+ }
+
+ PP.Lex(Tok);
+ II = Tok.getIdentifierInfo();
+ if (!II || !II->isStr("vector")) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_invalid_argument)
+ << PP.getSpelling(Tok) << "riscv" << /*Expected=*/true << "'vector'";
+ return;
+ }
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "clang riscv intrinsic";
+ return;
+ }
+
+ Actions.DeclareRISCVVBuiltins = true;
+}
diff --git a/contrib/llvm-project/clang/lib/Parse/Parser.cpp b/contrib/llvm-project/clang/lib/Parse/Parser.cpp
index ab8748c2c63d..fd044660845b 100644
--- a/contrib/llvm-project/clang/lib/Parse/Parser.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/Parser.cpp
@@ -153,7 +153,7 @@ bool Parser::ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned DiagID,
return true;
}
-bool Parser::ExpectAndConsumeSemi(unsigned DiagID) {
+bool Parser::ExpectAndConsumeSemi(unsigned DiagID, StringRef TokenUsed) {
if (TryConsumeToken(tok::semi))
return false;
@@ -172,7 +172,7 @@ bool Parser::ExpectAndConsumeSemi(unsigned DiagID) {
return false;
}
- return ExpectAndConsume(tok::semi, DiagID);
+ return ExpectAndConsume(tok::semi, DiagID , TokenUsed);
}
void Parser::ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST TST) {
diff --git a/contrib/llvm-project/clang/lib/Sema/Scope.cpp b/contrib/llvm-project/clang/lib/Sema/Scope.cpp
index 499279a2659d..98260226dfd3 100644
--- a/contrib/llvm-project/clang/lib/Sema/Scope.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/Scope.cpp
@@ -91,7 +91,7 @@ void Scope::Init(Scope *parent, unsigned flags) {
UsingDirectives.clear();
Entity = nullptr;
ErrorTrap.reset();
- NRVO.setPointerAndInt(nullptr, false);
+ NRVO = None;
}
bool Scope::containedInPrototypeScope() const {
@@ -118,19 +118,71 @@ void Scope::AddFlags(unsigned FlagsToSet) {
Flags |= FlagsToSet;
}
-void Scope::mergeNRVOIntoParent() {
- if (VarDecl *Candidate = NRVO.getPointer()) {
- if (isDeclScope(Candidate))
- Candidate->setNRVOVariable(true);
+// The algorithm for updating NRVO candidate is as follows:
+// 1. All previous candidates become invalid because a new NRVO candidate is
+// obtained. Therefore, we need to clear return slots for other
+// variables defined before the current return statement in the current
+// scope and in outer scopes.
+// 2. Store the new candidate if its return slot is available. Otherwise,
+// there is no NRVO candidate so far.
+void Scope::updateNRVOCandidate(VarDecl *VD) {
+ auto UpdateReturnSlotsInScopeForVD = [VD](Scope *S) -> bool {
+ bool IsReturnSlotFound = S->ReturnSlots.contains(VD);
+
+ // We found a candidate variable that can be put into a return slot.
+ // Clear the set, because other variables cannot occupy a return
+ // slot in the same scope.
+ S->ReturnSlots.clear();
+
+ if (IsReturnSlotFound)
+ S->ReturnSlots.insert(VD);
+
+ return IsReturnSlotFound;
+ };
+
+ bool CanBePutInReturnSlot = false;
+
+ for (auto *S = this; S; S = S->getParent()) {
+ CanBePutInReturnSlot |= UpdateReturnSlotsInScopeForVD(S);
+
+ if (S->getEntity())
+ break;
}
- if (getEntity())
+ // Consider the variable as NRVO candidate if the return slot is available
+ // for it in the current scope, or if it can be available in outer scopes.
+ NRVO = CanBePutInReturnSlot ? VD : nullptr;
+}
+
+void Scope::applyNRVO() {
+ // There is no NRVO candidate in the current scope.
+ if (!NRVO.hasValue())
return;
- if (NRVO.getInt())
- getParent()->setNoNRVO();
- else if (NRVO.getPointer())
- getParent()->addNRVOCandidate(NRVO.getPointer());
+ if (*NRVO && isDeclScope(*NRVO))
+ NRVO.getValue()->setNRVOVariable(true);
+
+ // It's necessary to propagate NRVO candidate to the parent scope for cases
+ // when the parent scope doesn't contain a return statement.
+ // For example:
+ // X foo(bool b) {
+ // X x;
+ // if (b)
+ // return x;
+ // exit(0);
+ // }
+ // Also, we need to propagate nullptr value that means NRVO is not
+ // allowed in this scope.
+ // For example:
+ // X foo(bool b) {
+ // X x;
+ // if (b)
+ // return x;
+ // else
+ // return X(); // NRVO is not allowed
+ // }
+ if (!getEntity())
+ getParent()->NRVO = *NRVO;
}
LLVM_DUMP_METHOD void Scope::dump() const { dumpImpl(llvm::errs()); }
@@ -193,8 +245,10 @@ void Scope::dumpImpl(raw_ostream &OS) const {
if (const DeclContext *DC = getEntity())
OS << "Entity : (clang::DeclContext*)" << DC << '\n';
- if (NRVO.getInt())
- OS << "NRVO not allowed\n";
- else if (NRVO.getPointer())
- OS << "NRVO candidate : (clang::VarDecl*)" << NRVO.getPointer() << '\n';
+ if (!NRVO)
+ OS << "there is no NRVO candidate\n";
+ else if (*NRVO)
+ OS << "NRVO candidate : (clang::VarDecl*)" << *NRVO << '\n';
+ else
+ OS << "NRVO is not allowed\n";
}
diff --git a/contrib/llvm-project/clang/lib/Sema/Sema.cpp b/contrib/llvm-project/clang/lib/Sema/Sema.cpp
index 326010d4d93f..08957ce9fada 100644
--- a/contrib/llvm-project/clang/lib/Sema/Sema.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/Sema.cpp
@@ -37,6 +37,7 @@
#include "clang/Sema/Initialization.h"
#include "clang/Sema/MultiplexExternalSemaSource.h"
#include "clang/Sema/ObjCMethodList.h"
+#include "clang/Sema/RISCVIntrinsicManager.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaConsumer.h"
@@ -137,9 +138,9 @@ public:
void reset() { S = nullptr; }
- virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
- SrcMgr::CharacteristicKind FileType,
- FileID PrevFID) override {
+ void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID) override {
if (!S)
return;
switch (Reason) {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
index 8d2fc5331a0d..985005d0b79b 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
@@ -2092,7 +2092,7 @@ static void CheckPoppedLabel(LabelDecl *L, Sema &S) {
}
void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
- S->mergeNRVOIntoParent();
+ S->applyNRVO();
if (S->decl_empty()) return;
assert((S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) &&
@@ -18899,14 +18899,24 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
const llvm::APSInt &InitVal = ECD->getInitVal();
// Keep track of the size of positive and negative values.
- if (InitVal.isUnsigned() || InitVal.isNonNegative())
- NumPositiveBits = std::max(NumPositiveBits,
- (unsigned)InitVal.getActiveBits());
- else
+ if (InitVal.isUnsigned() || InitVal.isNonNegative()) {
+ // If the enumerator is zero that should still be counted as a positive
+ // bit since we need a bit to store the value zero.
+ unsigned ActiveBits = InitVal.getActiveBits();
+ NumPositiveBits = std::max({NumPositiveBits, ActiveBits, 1u});
+ } else {
NumNegativeBits = std::max(NumNegativeBits,
(unsigned)InitVal.getMinSignedBits());
+ }
}
+ // If we have have an empty set of enumerators we still need one bit.
+ // From [dcl.enum]p8
+ // If the enumerator-list is empty, the values of the enumeration are as if
+ // the enumeration had a single enumerator with value 0
+ if (!NumPositiveBits && !NumNegativeBits)
+ NumPositiveBits = 1;
+
// Figure out the type that should be used for this enum.
QualType BestType;
unsigned BestWidth;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
index cd5cdbde7f3f..0f79978b0911 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
@@ -13824,7 +13824,8 @@ static void CheckIdentityFieldAssignment(Expr *LHSExpr, Expr *RHSExpr,
// C99 6.5.16.1
QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
SourceLocation Loc,
- QualType CompoundType) {
+ QualType CompoundType,
+ BinaryOperatorKind Opc) {
assert(!LHSExpr->hasPlaceholderType(BuiltinType::PseudoObject));
// Verify that LHS is a modifiable lvalue, and emit error if not.
@@ -13937,10 +13938,18 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
// expression or an unevaluated operand
ExprEvalContexts.back().VolatileAssignmentLHSs.push_back(LHSExpr);
} else {
- // C++2a [expr.ass]p6:
+ // C++20 [expr.ass]p6:
// [Compound-assignment] expressions are deprecated if E1 has
- // volatile-qualified type
- Diag(Loc, diag::warn_deprecated_compound_assign_volatile) << LHSType;
+ // volatile-qualified type and op is not one of the bitwise
+ // operators |, &, ˆ.
+ switch (Opc) {
+ case BO_OrAssign:
+ case BO_AndAssign:
+ case BO_XorAssign:
+ break;
+ default:
+ Diag(Loc, diag::warn_deprecated_compound_assign_volatile) << LHSType;
+ }
}
}
@@ -14879,7 +14888,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
switch (Opc) {
case BO_Assign:
- ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, QualType());
+ ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, QualType(), Opc);
if (getLangOpts().CPlusPlus &&
LHS.get()->getObjectKind() != OK_ObjCProperty) {
VK = LHS.get()->getValueKind();
@@ -14976,32 +14985,37 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
Opc == BO_DivAssign);
CompLHSTy = CompResultTy;
if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
- ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ ResultTy =
+ CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy, Opc);
break;
case BO_RemAssign:
CompResultTy = CheckRemainderOperands(LHS, RHS, OpLoc, true);
CompLHSTy = CompResultTy;
if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
- ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ ResultTy =
+ CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy, Opc);
break;
case BO_AddAssign:
ConvertHalfVec = true;
CompResultTy = CheckAdditionOperands(LHS, RHS, OpLoc, Opc, &CompLHSTy);
if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
- ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ ResultTy =
+ CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy, Opc);
break;
case BO_SubAssign:
ConvertHalfVec = true;
CompResultTy = CheckSubtractionOperands(LHS, RHS, OpLoc, &CompLHSTy);
if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
- ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ ResultTy =
+ CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy, Opc);
break;
case BO_ShlAssign:
case BO_ShrAssign:
CompResultTy = CheckShiftOperands(LHS, RHS, OpLoc, Opc, true);
CompLHSTy = CompResultTy;
if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
- ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ ResultTy =
+ CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy, Opc);
break;
case BO_AndAssign:
case BO_OrAssign: // fallthrough
@@ -15011,7 +15025,8 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
CompResultTy = CheckBitwiseOperands(LHS, RHS, OpLoc, Opc);
CompLHSTy = CompResultTy;
if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
- ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ ResultTy =
+ CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy, Opc);
break;
case BO_Comma:
ResultTy = CheckCommaOperands(*this, LHS, RHS, OpLoc);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp b/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
index 242e1f81d75c..68158ec977cf 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
@@ -29,6 +29,7 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Overload.h"
+#include "clang/Sema/RISCVIntrinsicManager.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Sema.h"
@@ -928,6 +929,14 @@ bool Sema::LookupBuiltin(LookupResult &R) {
}
}
+ if (DeclareRISCVVBuiltins) {
+ if (!RVIntrinsicManager)
+ RVIntrinsicManager = CreateRISCVIntrinsicManager(*this);
+
+ if (RVIntrinsicManager->CreateIntrinsicIfFound(R, II, PP))
+ return true;
+ }
+
// If this is a builtin on this (or all) targets, create the decl.
if (unsigned BuiltinID = II->getBuiltinID()) {
// In C++, C2x, and OpenCL (spec v1.2 s6.9.f), we don't have any
@@ -3838,6 +3847,12 @@ void Sema::ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
// associated classes are visible within their respective
// namespaces even if they are not visible during an ordinary
// lookup (11.4).
+ //
+ // C++20 [basic.lookup.argdep] p4.3
+ // -- are exported, are attached to a named module M, do not appear
+ // in the translation unit containing the point of the lookup, and
+ // have the same innermost enclosing non-inline namespace scope as
+ // a declaration of an associated entity attached to M.
DeclContext::lookup_result R = NS->lookup(Name);
for (auto *D : R) {
auto *Underlying = D;
@@ -3858,6 +3873,36 @@ void Sema::ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
if (isVisible(D)) {
Visible = true;
break;
+ } else if (getLangOpts().CPlusPlusModules &&
+ D->isInExportDeclContext()) {
+ // C++20 [basic.lookup.argdep] p4.3 .. are exported ...
+ Module *FM = D->getOwningModule();
+ // exports are only valid in module purview and outside of any
+ // PMF (although a PMF should not even be present in a module
+ // with an import).
+ assert(FM && FM->isModulePurview() && !FM->isPrivateModule() &&
+ "bad export context");
+ // .. are attached to a named module M, do not appear in the
+ // translation unit containing the point of the lookup..
+ if (!isModuleUnitOfCurrentTU(FM) &&
+ llvm::any_of(AssociatedClasses, [&](auto *E) {
+ // ... and have the same innermost enclosing non-inline
+ // namespace scope as a declaration of an associated entity
+ // attached to M
+ if (!E->hasOwningModule() ||
+ E->getOwningModule()->getTopLevelModuleName() !=
+ FM->getTopLevelModuleName())
+ return false;
+ // TODO: maybe this could be cached when generating the
+ // associated namespaces / entities.
+ DeclContext *Ctx = E->getDeclContext();
+ while (!Ctx->isFileContext() || Ctx->isInlineNamespace())
+ Ctx = Ctx->getParent();
+ return Ctx == NS;
+ })) {
+ Visible = true;
+ break;
+ }
}
} else if (D->getFriendObjectKind()) {
auto *RD = cast<CXXRecordDecl>(D->getLexicalDeclContext());
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp b/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
index c226ed625479..d72cc33ed0f5 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
@@ -63,8 +63,9 @@ static ExprResult CreateFunctionRefExpr(
// being used.
if (FoundDecl != Fn && S.DiagnoseUseOfDecl(Fn, Loc))
return ExprError();
- DeclRefExpr *DRE = new (S.Context)
- DeclRefExpr(S.Context, Fn, false, Fn->getType(), VK_LValue, Loc, LocInfo);
+ DeclRefExpr *DRE =
+ DeclRefExpr::Create(S.Context, Fn->getQualifierLoc(), SourceLocation(),
+ Fn, false, Loc, Fn->getType(), VK_LValue, FoundDecl);
if (HadMultipleCandidates)
DRE->setHadMultipleCandidates(true);
@@ -6400,6 +6401,27 @@ void Sema::AddOverloadCandidate(
return;
}
+ // Functions with internal linkage are only viable in the same module unit.
+ if (auto *MF = Function->getOwningModule()) {
+ if (getLangOpts().CPlusPlusModules && !MF->isModuleMapModule() &&
+ !isModuleUnitOfCurrentTU(MF)) {
+ /// FIXME: Currently, the semantics of linkage in clang is slightly
+ /// different from the semantics in C++ spec. In C++ spec, only names
+ /// have linkage. So that all entities of the same should share one
+ /// linkage. But in clang, different entities of the same could have
+ /// different linkage.
+ NamedDecl *ND = Function;
+ if (auto *SpecInfo = Function->getTemplateSpecializationInfo())
+ ND = SpecInfo->getTemplate();
+
+ if (ND->getFormalLinkage() == Linkage::InternalLinkage) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_module_mismatched;
+ return;
+ }
+ }
+ }
+
if (Function->isMultiVersion() && Function->hasAttr<TargetAttr>() &&
!Function->getAttr<TargetAttr>()->isDefaultVersion()) {
Candidate.Viable = false;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp b/contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp
new file mode 100644
index 000000000000..50fd841c231b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp
@@ -0,0 +1,395 @@
+//==- SemaRISCVVectorLookup.cpp - Name Lookup for RISC-V Vector Intrinsic -==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements name lookup for RISC-V vector intrinsic.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/RISCVIntrinsicManager.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Support/RISCVVIntrinsicUtils.h"
+#include "llvm/ADT/SmallVector.h"
+#include <string>
+#include <vector>
+
+using namespace llvm;
+using namespace clang;
+using namespace clang::RISCV;
+
+namespace {
+
+// Function definition of a RVV intrinsic.
+struct RVVIntrinsicDef {
+ /// Full function name with suffix, e.g. vadd_vv_i32m1.
+ std::string Name;
+
+ /// Overloaded function name, e.g. vadd.
+ std::string OverloadName;
+
+ /// Mapping to which clang built-in function, e.g. __builtin_rvv_vadd.
+ std::string BuiltinName;
+
+ /// Function signature, first element is return type.
+ RVVTypes Signature;
+};
+
+struct RVVOverloadIntrinsicDef {
+ // Indexes of RISCVIntrinsicManagerImpl::IntrinsicList.
+ SmallVector<size_t, 8> Indexes;
+};
+
+} // namespace
+
+static const PrototypeDescriptor RVVSignatureTable[] = {
+#define DECL_SIGNATURE_TABLE
+#include "clang/Basic/riscv_vector_builtin_sema.inc"
+#undef DECL_SIGNATURE_TABLE
+};
+
+static const RVVIntrinsicRecord RVVIntrinsicRecords[] = {
+#define DECL_INTRINSIC_RECORDS
+#include "clang/Basic/riscv_vector_builtin_sema.inc"
+#undef DECL_INTRINSIC_RECORDS
+};
+
+// Get subsequence of signature table.
+static ArrayRef<PrototypeDescriptor> ProtoSeq2ArrayRef(uint16_t Index,
+ uint8_t Length) {
+ return makeArrayRef(&RVVSignatureTable[Index], Length);
+}
+
+static QualType RVVType2Qual(ASTContext &Context, const RVVType *Type) {
+ QualType QT;
+ switch (Type->getScalarType()) {
+ case ScalarTypeKind::Void:
+ QT = Context.VoidTy;
+ break;
+ case ScalarTypeKind::Size_t:
+ QT = Context.getSizeType();
+ break;
+ case ScalarTypeKind::Ptrdiff_t:
+ QT = Context.getPointerDiffType();
+ break;
+ case ScalarTypeKind::UnsignedLong:
+ QT = Context.UnsignedLongTy;
+ break;
+ case ScalarTypeKind::SignedLong:
+ QT = Context.LongTy;
+ break;
+ case ScalarTypeKind::Boolean:
+ QT = Context.BoolTy;
+ break;
+ case ScalarTypeKind::SignedInteger:
+ QT = Context.getIntTypeForBitwidth(Type->getElementBitwidth(), true);
+ break;
+ case ScalarTypeKind::UnsignedInteger:
+ QT = Context.getIntTypeForBitwidth(Type->getElementBitwidth(), false);
+ break;
+ case ScalarTypeKind::Float:
+ switch (Type->getElementBitwidth()) {
+ case 64:
+ QT = Context.DoubleTy;
+ break;
+ case 32:
+ QT = Context.FloatTy;
+ break;
+ case 16:
+ QT = Context.Float16Ty;
+ break;
+ default:
+ llvm_unreachable("Unsupported floating point width.");
+ }
+ break;
+ case Invalid:
+ llvm_unreachable("Unhandled type.");
+ }
+ if (Type->isVector())
+ QT = Context.getScalableVectorType(QT, Type->getScale().getValue());
+
+ if (Type->isConstant())
+ QT = Context.getConstType(QT);
+
+ // Transform the type to a pointer as the last step, if necessary.
+ if (Type->isPointer())
+ QT = Context.getPointerType(QT);
+
+ return QT;
+}
+
+namespace {
+class RISCVIntrinsicManagerImpl : public sema::RISCVIntrinsicManager {
+private:
+ Sema &S;
+ ASTContext &Context;
+
+ // List of all RVV intrinsic.
+ std::vector<RVVIntrinsicDef> IntrinsicList;
+ // Mapping function name to index of IntrinsicList.
+ StringMap<size_t> Intrinsics;
+ // Mapping function name to RVVOverloadIntrinsicDef.
+ StringMap<RVVOverloadIntrinsicDef> OverloadIntrinsics;
+
+ // Create IntrinsicList
+ void InitIntrinsicList();
+
+ // Create RVVIntrinsicDef.
+ void InitRVVIntrinsic(const RVVIntrinsicRecord &Record, StringRef SuffixStr,
+ StringRef OverloadedSuffixStr, bool IsMask,
+ RVVTypes &Types);
+
+ // Create FunctionDecl for a vector intrinsic.
+ void CreateRVVIntrinsicDecl(LookupResult &LR, IdentifierInfo *II,
+ Preprocessor &PP, unsigned Index,
+ bool IsOverload);
+
+public:
+ RISCVIntrinsicManagerImpl(clang::Sema &S) : S(S), Context(S.Context) {
+ InitIntrinsicList();
+ }
+
+ // Create RISC-V vector intrinsic and insert into symbol table if found, and
+ // return true, otherwise return false.
+ bool CreateIntrinsicIfFound(LookupResult &LR, IdentifierInfo *II,
+ Preprocessor &PP) override;
+};
+} // namespace
+
+void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
+ const TargetInfo &TI = Context.getTargetInfo();
+ bool HasVectorFloat32 = TI.hasFeature("zve32f");
+ bool HasVectorFloat64 = TI.hasFeature("zve64d");
+ bool HasZvfh = TI.hasFeature("experimental-zvfh");
+ bool HasRV64 = TI.hasFeature("64bit");
+ bool HasFullMultiply = TI.hasFeature("v");
+
+ // Construction of RVVIntrinsicRecords need to sync with createRVVIntrinsics
+ // in RISCVVEmitter.cpp.
+ for (auto &Record : RVVIntrinsicRecords) {
+ // Create Intrinsics for each type and LMUL.
+ BasicType BaseType = BasicType::Unknown;
+ ArrayRef<PrototypeDescriptor> BasicProtoSeq =
+ ProtoSeq2ArrayRef(Record.PrototypeIndex, Record.PrototypeLength);
+ ArrayRef<PrototypeDescriptor> SuffixProto =
+ ProtoSeq2ArrayRef(Record.SuffixIndex, Record.SuffixLength);
+ ArrayRef<PrototypeDescriptor> OverloadedSuffixProto = ProtoSeq2ArrayRef(
+ Record.OverloadedSuffixIndex, Record.OverloadedSuffixSize);
+
+ llvm::SmallVector<PrototypeDescriptor> ProtoSeq =
+ RVVIntrinsic::computeBuiltinTypes(BasicProtoSeq, /*IsMasked=*/false,
+ /*HasMaskedOffOperand=*/false,
+ Record.HasVL, Record.NF);
+
+ llvm::SmallVector<PrototypeDescriptor> ProtoMaskSeq =
+ RVVIntrinsic::computeBuiltinTypes(BasicProtoSeq, /*IsMasked=*/true,
+ Record.HasMaskedOffOperand,
+ Record.HasVL, Record.NF);
+
+ for (unsigned int TypeRangeMaskShift = 0;
+ TypeRangeMaskShift <= static_cast<unsigned int>(BasicType::MaxOffset);
+ ++TypeRangeMaskShift) {
+ unsigned int BaseTypeI = 1 << TypeRangeMaskShift;
+ BaseType = static_cast<BasicType>(BaseTypeI);
+
+ if ((BaseTypeI & Record.TypeRangeMask) != BaseTypeI)
+ continue;
+
+ // Check requirement.
+ if (BaseType == BasicType::Float16 && !HasZvfh)
+ continue;
+
+ if (BaseType == BasicType::Float32 && !HasVectorFloat32)
+ continue;
+
+ if (BaseType == BasicType::Float64 && !HasVectorFloat64)
+ continue;
+
+ if (((Record.RequiredExtensions & RVV_REQ_RV64) == RVV_REQ_RV64) &&
+ !HasRV64)
+ continue;
+
+ if ((BaseType == BasicType::Int64) &&
+ ((Record.RequiredExtensions & RVV_REQ_FullMultiply) ==
+ RVV_REQ_FullMultiply) &&
+ !HasFullMultiply)
+ continue;
+
+ // Expanded with different LMUL.
+ for (int Log2LMUL = -3; Log2LMUL <= 3; Log2LMUL++) {
+ if (!(Record.Log2LMULMask & (1 << (Log2LMUL + 3))))
+ continue;
+
+ Optional<RVVTypes> Types =
+ RVVType::computeTypes(BaseType, Log2LMUL, Record.NF, ProtoSeq);
+
+ // Ignored to create new intrinsic if there are any illegal types.
+ if (!Types.hasValue())
+ continue;
+
+ std::string SuffixStr =
+ RVVIntrinsic::getSuffixStr(BaseType, Log2LMUL, SuffixProto);
+ std::string OverloadedSuffixStr = RVVIntrinsic::getSuffixStr(
+ BaseType, Log2LMUL, OverloadedSuffixProto);
+
+ // Create non-masked intrinsic.
+ InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, false, *Types);
+
+ if (Record.HasMasked) {
+ // Create masked intrinsic.
+ Optional<RVVTypes> MaskTypes = RVVType::computeTypes(
+ BaseType, Log2LMUL, Record.NF, ProtoMaskSeq);
+
+ InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, true,
+ *MaskTypes);
+ }
+ }
+ }
+ }
+}
+
+// Compute name and signatures for intrinsic with practical types.
+void RISCVIntrinsicManagerImpl::InitRVVIntrinsic(
+ const RVVIntrinsicRecord &Record, StringRef SuffixStr,
+ StringRef OverloadedSuffixStr, bool IsMask, RVVTypes &Signature) {
+ // Function name, e.g. vadd_vv_i32m1.
+ std::string Name = Record.Name;
+ if (!SuffixStr.empty())
+ Name += "_" + SuffixStr.str();
+
+ if (IsMask)
+ Name += "_m";
+
+ // Overloaded function name, e.g. vadd.
+ std::string OverloadedName;
+ if (!Record.OverloadedName)
+ OverloadedName = StringRef(Record.Name).split("_").first.str();
+ else
+ OverloadedName = Record.OverloadedName;
+ if (!OverloadedSuffixStr.empty())
+ OverloadedName += "_" + OverloadedSuffixStr.str();
+
+ // clang built-in function name, e.g. __builtin_rvv_vadd.
+ std::string BuiltinName = "__builtin_rvv_" + std::string(Record.Name);
+ if (IsMask)
+ BuiltinName += "_m";
+
+ // Put into IntrinsicList.
+ size_t Index = IntrinsicList.size();
+ IntrinsicList.push_back({Name, OverloadedName, BuiltinName, Signature});
+
+ // Creating mapping to Intrinsics.
+ Intrinsics.insert({Name, Index});
+
+ // Get the RVVOverloadIntrinsicDef.
+ RVVOverloadIntrinsicDef &OverloadIntrinsicDef =
+ OverloadIntrinsics[OverloadedName];
+
+ // And added the index.
+ OverloadIntrinsicDef.Indexes.push_back(Index);
+}
+
+void RISCVIntrinsicManagerImpl::CreateRVVIntrinsicDecl(LookupResult &LR,
+ IdentifierInfo *II,
+ Preprocessor &PP,
+ unsigned Index,
+ bool IsOverload) {
+ ASTContext &Context = S.Context;
+ RVVIntrinsicDef &IDef = IntrinsicList[Index];
+ RVVTypes Sigs = IDef.Signature;
+ size_t SigLength = Sigs.size();
+ RVVType *ReturnType = Sigs[0];
+ QualType RetType = RVVType2Qual(Context, ReturnType);
+ SmallVector<QualType, 8> ArgTypes;
+ QualType BuiltinFuncType;
+
+ // Skip return type, and convert RVVType to QualType for arguments.
+ for (size_t i = 1; i < SigLength; ++i)
+ ArgTypes.push_back(RVVType2Qual(Context, Sigs[i]));
+
+ FunctionProtoType::ExtProtoInfo PI(
+ Context.getDefaultCallingConvention(false, false, true));
+
+ PI.Variadic = false;
+
+ SourceLocation Loc = LR.getNameLoc();
+ BuiltinFuncType = Context.getFunctionType(RetType, ArgTypes, PI);
+ DeclContext *Parent = Context.getTranslationUnitDecl();
+
+ FunctionDecl *RVVIntrinsicDecl = FunctionDecl::Create(
+ Context, Parent, Loc, Loc, II, BuiltinFuncType, /*TInfo=*/nullptr,
+ SC_Extern, S.getCurFPFeatures().isFPConstrained(),
+ /*isInlineSpecified*/ false,
+ /*hasWrittenPrototype*/ true);
+
+ // Create Decl objects for each parameter, adding them to the
+ // FunctionDecl.
+ const auto *FP = cast<FunctionProtoType>(BuiltinFuncType);
+ SmallVector<ParmVarDecl *, 8> ParmList;
+ for (unsigned IParm = 0, E = FP->getNumParams(); IParm != E; ++IParm) {
+ ParmVarDecl *Parm =
+ ParmVarDecl::Create(Context, RVVIntrinsicDecl, Loc, Loc, nullptr,
+ FP->getParamType(IParm), nullptr, SC_None, nullptr);
+ Parm->setScopeInfo(0, IParm);
+ ParmList.push_back(Parm);
+ }
+ RVVIntrinsicDecl->setParams(ParmList);
+
+ // Add function attributes.
+ if (IsOverload)
+ RVVIntrinsicDecl->addAttr(OverloadableAttr::CreateImplicit(Context));
+
+ // Setup alias to __builtin_rvv_*
+ IdentifierInfo &IntrinsicII = PP.getIdentifierTable().get(IDef.BuiltinName);
+ RVVIntrinsicDecl->addAttr(
+ BuiltinAliasAttr::CreateImplicit(S.Context, &IntrinsicII));
+
+ // Add to symbol table.
+ LR.addDecl(RVVIntrinsicDecl);
+}
+
+bool RISCVIntrinsicManagerImpl::CreateIntrinsicIfFound(LookupResult &LR,
+ IdentifierInfo *II,
+ Preprocessor &PP) {
+ StringRef Name = II->getName();
+
+ // Lookup the function name from the overload intrinsics first.
+ auto OvIItr = OverloadIntrinsics.find(Name);
+ if (OvIItr != OverloadIntrinsics.end()) {
+ const RVVOverloadIntrinsicDef &OvIntrinsicDef = OvIItr->second;
+ for (auto Index : OvIntrinsicDef.Indexes)
+ CreateRVVIntrinsicDecl(LR, II, PP, Index,
+ /*IsOverload*/ true);
+
+ // If we added overloads, need to resolve the lookup result.
+ LR.resolveKind();
+ return true;
+ }
+
+ // Lookup the function name from the intrinsics.
+ auto Itr = Intrinsics.find(Name);
+ if (Itr != Intrinsics.end()) {
+ CreateRVVIntrinsicDecl(LR, II, PP, Itr->second,
+ /*IsOverload*/ false);
+ return true;
+ }
+
+ // It's not an RVV intrinsics.
+ return false;
+}
+
+namespace clang {
+std::unique_ptr<clang::sema::RISCVIntrinsicManager>
+CreateRISCVIntrinsicManager(Sema &S) {
+ return std::make_unique<RISCVIntrinsicManagerImpl>(S);
+}
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp b/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
index f25694ce48c9..c6ca10c0342c 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
@@ -3898,12 +3898,10 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
if (R.isInvalid() || ExprEvalContexts.back().isDiscardedStatementContext())
return R;
- if (VarDecl *VD =
- const_cast<VarDecl*>(cast<ReturnStmt>(R.get())->getNRVOCandidate())) {
- CurScope->addNRVOCandidate(VD);
- } else {
- CurScope->setNoNRVO();
- }
+ VarDecl *VD =
+ const_cast<VarDecl *>(cast<ReturnStmt>(R.get())->getNRVOCandidate());
+
+ CurScope->updateNRVOCandidate(VD);
CheckJumpOutOfSEHFinally(*this, ReturnLoc, *CurScope->getFnParent());
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
index 95c83ebfaeab..1542a07713fb 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/TemplateName.h"
#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/Builtins.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/Stack.h"
@@ -8707,23 +8708,59 @@ Decl *Sema::ActOnConceptDefinition(Scope *S,
// Check for conflicting previous declaration.
DeclarationNameInfo NameInfo(NewDecl->getDeclName(), NameLoc);
LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
- ForVisibleRedeclaration);
+ forRedeclarationInCurContext());
LookupName(Previous, S);
-
FilterLookupForScope(Previous, DC, S, /*ConsiderLinkage=*/false,
/*AllowInlineNamespace*/false);
- if (!Previous.empty()) {
- auto *Old = Previous.getRepresentativeDecl();
- Diag(NameLoc, isa<ConceptDecl>(Old) ? diag::err_redefinition :
- diag::err_redefinition_different_kind) << NewDecl->getDeclName();
- Diag(Old->getLocation(), diag::note_previous_definition);
- }
+ bool AddToScope = true;
+ CheckConceptRedefinition(NewDecl, Previous, AddToScope);
ActOnDocumentableDecl(NewDecl);
- PushOnScopeChains(NewDecl, S);
+ if (AddToScope)
+ PushOnScopeChains(NewDecl, S);
return NewDecl;
}
+void Sema::CheckConceptRedefinition(ConceptDecl *NewDecl,
+ LookupResult &Previous, bool &AddToScope) {
+ AddToScope = true;
+
+ if (Previous.empty())
+ return;
+
+ auto *OldConcept = dyn_cast<ConceptDecl>(Previous.getRepresentativeDecl());
+ if (!OldConcept) {
+ auto *Old = Previous.getRepresentativeDecl();
+ Diag(NewDecl->getLocation(), diag::err_redefinition_different_kind)
+ << NewDecl->getDeclName();
+ notePreviousDefinition(Old, NewDecl->getLocation());
+ AddToScope = false;
+ return;
+ }
+ // Check if we can merge with a concept declaration.
+ bool IsSame = Context.isSameEntity(NewDecl, OldConcept);
+ if (!IsSame) {
+ Diag(NewDecl->getLocation(), diag::err_redefinition_different_concept)
+ << NewDecl->getDeclName();
+ notePreviousDefinition(OldConcept, NewDecl->getLocation());
+ AddToScope = false;
+ return;
+ }
+ if (hasReachableDefinition(OldConcept)) {
+ Diag(NewDecl->getLocation(), diag::err_redefinition)
+ << NewDecl->getDeclName();
+ notePreviousDefinition(OldConcept, NewDecl->getLocation());
+ AddToScope = false;
+ return;
+ }
+ if (!Previous.isSingleResult()) {
+ // FIXME: we should produce an error in case of ambig and failed lookups.
+ // Other decls (e.g. namespaces) also have this shortcoming.
+ return;
+ }
+ Context.setPrimaryMergedDecl(NewDecl, OldConcept);
+}
+
/// \brief Strips various properties off an implicit instantiation
/// that has just been explicitly specialized.
static void StripImplicitInstantiation(NamedDecl *D) {
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
index d70e824224df..73800191dfc1 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -2926,7 +2926,8 @@ Attr *ASTRecordReader::readAttr() {
/// Reads attributes from the current stream position.
void ASTRecordReader::readAttributes(AttrVec &Attrs) {
for (unsigned I = 0, E = readInt(); I != E; ++I)
- Attrs.push_back(readAttr());
+ if (auto *A = readAttr())
+ Attrs.push_back(A);
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
index fac8fc141d2c..0739dcc1ce60 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
@@ -4347,8 +4347,12 @@ void ASTWriter::WriteModuleFileExtension(Sema &SemaRef,
void ASTRecordWriter::AddAttr(const Attr *A) {
auto &Record = *this;
- if (!A)
+ // FIXME: Clang can't handle the serialization/deserialization of
+ // preferred_name properly now. See
+ // https://github.com/llvm/llvm-project/issues/56490 for example.
+ if (!A || (isa<PreferredNameAttr>(A) && Writer->isWritingNamedModules()))
return Record.push_back(0);
+
Record.push_back(A->getKind() + 1); // FIXME: stable encoding, target attrs
Record.AddIdentifierRef(A->getAttrName());
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 987cf65d6fec..9a6c013bcf66 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -26,9 +26,11 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
+#include <functional>
using namespace clang;
using namespace ento;
+using namespace std::placeholders;
namespace {
struct AnyArgExpr {
@@ -118,10 +120,14 @@ public:
const LocationContext *LCtx,
const CallEvent *Call) const;
- typedef void (CStringChecker::*FnCheck)(CheckerContext &,
- const CallExpr *) const;
+ using FnCheck = std::function<void(const CStringChecker *, CheckerContext &,
+ const CallExpr *)>;
+
CallDescriptionMap<FnCheck> Callbacks = {
- {{CDF_MaybeBuiltin, "memcpy", 3}, &CStringChecker::evalMemcpy},
+ {{CDF_MaybeBuiltin, "memcpy", 3},
+ std::bind(&CStringChecker::evalMemcpy, _1, _2, _3, false)},
+ {{CDF_MaybeBuiltin, "wmemcpy", 3},
+ std::bind(&CStringChecker::evalMemcpy, _1, _2, _3, true)},
{{CDF_MaybeBuiltin, "mempcpy", 3}, &CStringChecker::evalMempcpy},
{{CDF_MaybeBuiltin, "memcmp", 3}, &CStringChecker::evalMemcmp},
{{CDF_MaybeBuiltin, "memmove", 3}, &CStringChecker::evalMemmove},
@@ -135,7 +141,9 @@ public:
{{CDF_MaybeBuiltin, "strncat", 3}, &CStringChecker::evalStrncat},
{{CDF_MaybeBuiltin, "strlcat", 3}, &CStringChecker::evalStrlcat},
{{CDF_MaybeBuiltin, "strlen", 1}, &CStringChecker::evalstrLength},
+ {{CDF_MaybeBuiltin, "wcslen", 1}, &CStringChecker::evalstrLength},
{{CDF_MaybeBuiltin, "strnlen", 2}, &CStringChecker::evalstrnLength},
+ {{CDF_MaybeBuiltin, "wcsnlen", 2}, &CStringChecker::evalstrnLength},
{{CDF_MaybeBuiltin, "strcmp", 2}, &CStringChecker::evalStrcmp},
{{CDF_MaybeBuiltin, "strncmp", 3}, &CStringChecker::evalStrncmp},
{{CDF_MaybeBuiltin, "strcasecmp", 2}, &CStringChecker::evalStrcasecmp},
@@ -152,14 +160,14 @@ public:
StdCopyBackward{{"std", "copy_backward"}, 3};
FnCheck identifyCall(const CallEvent &Call, CheckerContext &C) const;
- void evalMemcpy(CheckerContext &C, const CallExpr *CE) const;
+ void evalMemcpy(CheckerContext &C, const CallExpr *CE, bool IsWide) const;
void evalMempcpy(CheckerContext &C, const CallExpr *CE) const;
void evalMemmove(CheckerContext &C, const CallExpr *CE) const;
void evalBcopy(CheckerContext &C, const CallExpr *CE) const;
void evalCopyCommon(CheckerContext &C, const CallExpr *CE,
ProgramStateRef state, SizeArgExpr Size,
DestinationArgExpr Dest, SourceArgExpr Source,
- bool Restricted, bool IsMempcpy) const;
+ bool Restricted, bool IsMempcpy, bool IsWide) const;
void evalMemcmp(CheckerContext &C, const CallExpr *CE) const;
@@ -240,13 +248,14 @@ public:
AnyArgExpr Arg, SVal l) const;
ProgramStateRef CheckLocation(CheckerContext &C, ProgramStateRef state,
AnyArgExpr Buffer, SVal Element,
- AccessKind Access) const;
+ AccessKind Access, bool IsWide = false) const;
ProgramStateRef CheckBufferAccess(CheckerContext &C, ProgramStateRef State,
AnyArgExpr Buffer, SizeArgExpr Size,
- AccessKind Access) const;
+ AccessKind Access,
+ bool IsWide = false) const;
ProgramStateRef CheckOverlap(CheckerContext &C, ProgramStateRef state,
SizeArgExpr Size, AnyArgExpr First,
- AnyArgExpr Second) const;
+ AnyArgExpr Second, bool IsWide = false) const;
void emitOverlapBug(CheckerContext &C,
ProgramStateRef state,
const Stmt *First,
@@ -329,7 +338,8 @@ ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
ProgramStateRef state,
AnyArgExpr Buffer, SVal Element,
- AccessKind Access) const {
+ AccessKind Access,
+ bool IsWide) const {
// If a previous check has failed, propagate the failure.
if (!state)
@@ -344,17 +354,36 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
if (!ER)
return state;
- if (ER->getValueType() != C.getASTContext().CharTy)
- return state;
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ ASTContext &Ctx = svalBuilder.getContext();
+
+ // Get the index of the accessed element.
+ NonLoc Idx = ER->getIndex();
+
+ if (!IsWide) {
+ if (ER->getValueType() != Ctx.CharTy)
+ return state;
+ } else {
+ if (ER->getValueType() != Ctx.WideCharTy)
+ return state;
+
+ QualType SizeTy = Ctx.getSizeType();
+ NonLoc WideSize =
+ svalBuilder
+ .makeIntVal(Ctx.getTypeSizeInChars(Ctx.WideCharTy).getQuantity(),
+ SizeTy)
+ .castAs<NonLoc>();
+ SVal Offset = svalBuilder.evalBinOpNN(state, BO_Mul, Idx, WideSize, SizeTy);
+ if (Offset.isUnknown())
+ return state;
+ Idx = Offset.castAs<NonLoc>();
+ }
// Get the size of the array.
const auto *superReg = cast<SubRegion>(ER->getSuperRegion());
DefinedOrUnknownSVal Size =
getDynamicExtent(state, superReg, C.getSValBuilder());
- // Get the index of the accessed element.
- DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
-
ProgramStateRef StInBound, StOutBound;
std::tie(StInBound, StOutBound) = state->assumeInBoundDual(Idx, Size);
if (StOutBound && !StInBound) {
@@ -385,11 +414,10 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
return StInBound;
}
-ProgramStateRef CStringChecker::CheckBufferAccess(CheckerContext &C,
- ProgramStateRef State,
- AnyArgExpr Buffer,
- SizeArgExpr Size,
- AccessKind Access) const {
+ProgramStateRef
+CStringChecker::CheckBufferAccess(CheckerContext &C, ProgramStateRef State,
+ AnyArgExpr Buffer, SizeArgExpr Size,
+ AccessKind Access, bool IsWide) const {
// If a previous check has failed, propagate the failure.
if (!State)
return nullptr;
@@ -398,7 +426,7 @@ ProgramStateRef CStringChecker::CheckBufferAccess(CheckerContext &C,
ASTContext &Ctx = svalBuilder.getContext();
QualType SizeTy = Size.Expression->getType();
- QualType PtrTy = Ctx.getPointerType(Ctx.CharTy);
+ QualType PtrTy = Ctx.getPointerType(IsWide ? Ctx.WideCharTy : Ctx.CharTy);
// Check that the first buffer is non-null.
SVal BufVal = C.getSVal(Buffer.Expression);
@@ -432,7 +460,7 @@ ProgramStateRef CStringChecker::CheckBufferAccess(CheckerContext &C,
SVal BufEnd =
svalBuilder.evalBinOpLN(State, BO_Add, *BufLoc, LastOffset, PtrTy);
- State = CheckLocation(C, State, Buffer, BufEnd, Access);
+ State = CheckLocation(C, State, Buffer, BufEnd, Access, IsWide);
// If the buffer isn't large enough, abort.
if (!State)
@@ -446,7 +474,8 @@ ProgramStateRef CStringChecker::CheckBufferAccess(CheckerContext &C,
ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
ProgramStateRef state,
SizeArgExpr Size, AnyArgExpr First,
- AnyArgExpr Second) const {
+ AnyArgExpr Second,
+ bool IsWide) const {
if (!Filter.CheckCStringBufferOverlap)
return state;
@@ -525,7 +554,7 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
// Convert the first buffer's start address to char*.
// Bail out if the cast fails.
ASTContext &Ctx = svalBuilder.getContext();
- QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
+ QualType CharPtrTy = Ctx.getPointerType(IsWide ? Ctx.WideCharTy : Ctx.CharTy);
SVal FirstStart =
svalBuilder.evalCast(*firstLoc, CharPtrTy, First.Expression->getType());
Optional<Loc> FirstStartLoc = FirstStart.getAs<Loc>();
@@ -1161,7 +1190,7 @@ void CStringChecker::evalCopyCommon(CheckerContext &C, const CallExpr *CE,
ProgramStateRef state, SizeArgExpr Size,
DestinationArgExpr Dest,
SourceArgExpr Source, bool Restricted,
- bool IsMempcpy) const {
+ bool IsMempcpy, bool IsWide) const {
CurrentFunctionDescription = "memory copy function";
// See if the size argument is zero.
@@ -1204,11 +1233,11 @@ void CStringChecker::evalCopyCommon(CheckerContext &C, const CallExpr *CE,
return;
// Ensure the accesses are valid and that the buffers do not overlap.
- state = CheckBufferAccess(C, state, Dest, Size, AccessKind::write);
- state = CheckBufferAccess(C, state, Source, Size, AccessKind::read);
+ state = CheckBufferAccess(C, state, Dest, Size, AccessKind::write, IsWide);
+ state = CheckBufferAccess(C, state, Source, Size, AccessKind::read, IsWide);
if (Restricted)
- state = CheckOverlap(C, state, Size, Dest, Source);
+ state = CheckOverlap(C, state, Size, Dest, Source, IsWide);
if (!state)
return;
@@ -1258,7 +1287,8 @@ void CStringChecker::evalCopyCommon(CheckerContext &C, const CallExpr *CE,
}
}
-void CStringChecker::evalMemcpy(CheckerContext &C, const CallExpr *CE) const {
+void CStringChecker::evalMemcpy(CheckerContext &C, const CallExpr *CE,
+ bool IsWide) const {
// void *memcpy(void *restrict dst, const void *restrict src, size_t n);
// The return value is the address of the destination buffer.
DestinationArgExpr Dest = {CE->getArg(0), 0};
@@ -1269,7 +1299,8 @@ void CStringChecker::evalMemcpy(CheckerContext &C, const CallExpr *CE) const {
constexpr bool IsRestricted = true;
constexpr bool IsMempcpy = false;
- evalCopyCommon(C, CE, State, Size, Dest, Src, IsRestricted, IsMempcpy);
+ evalCopyCommon(C, CE, State, Size, Dest, Src, IsRestricted, IsMempcpy,
+ IsWide);
}
void CStringChecker::evalMempcpy(CheckerContext &C, const CallExpr *CE) const {
@@ -1281,7 +1312,8 @@ void CStringChecker::evalMempcpy(CheckerContext &C, const CallExpr *CE) const {
constexpr bool IsRestricted = true;
constexpr bool IsMempcpy = true;
- evalCopyCommon(C, CE, C.getState(), Size, Dest, Src, IsRestricted, IsMempcpy);
+ evalCopyCommon(C, CE, C.getState(), Size, Dest, Src, IsRestricted, IsMempcpy,
+ false);
}
void CStringChecker::evalMemmove(CheckerContext &C, const CallExpr *CE) const {
@@ -1293,7 +1325,8 @@ void CStringChecker::evalMemmove(CheckerContext &C, const CallExpr *CE) const {
constexpr bool IsRestricted = false;
constexpr bool IsMempcpy = false;
- evalCopyCommon(C, CE, C.getState(), Size, Dest, Src, IsRestricted, IsMempcpy);
+ evalCopyCommon(C, CE, C.getState(), Size, Dest, Src, IsRestricted, IsMempcpy,
+ false);
}
void CStringChecker::evalBcopy(CheckerContext &C, const CallExpr *CE) const {
@@ -1304,7 +1337,8 @@ void CStringChecker::evalBcopy(CheckerContext &C, const CallExpr *CE) const {
constexpr bool IsRestricted = false;
constexpr bool IsMempcpy = false;
- evalCopyCommon(C, CE, C.getState(), Size, Dest, Src, IsRestricted, IsMempcpy);
+ evalCopyCommon(C, CE, C.getState(), Size, Dest, Src, IsRestricted, IsMempcpy,
+ false);
}
void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
@@ -2336,7 +2370,7 @@ bool CStringChecker::evalCall(const CallEvent &Call, CheckerContext &C) const {
// Check and evaluate the call.
const auto *CE = cast<CallExpr>(Call.getOriginExpr());
- (this->*Callback)(C, CE);
+ Callback(this, C, CE);
// If the evaluate call resulted in no change, chain to the next eval call
// handler.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
index 895212d134b8..b673b51c4623 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
@@ -55,9 +55,9 @@ public:
ID.AddPointer(getTag());
}
- virtual PathDiagnosticPieceRef
- VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
- PathSensitiveBugReport &BR) override;
+ PathDiagnosticPieceRef VisitNode(const ExplodedNode *N,
+ BugReporterContext &BRC,
+ PathSensitiveBugReport &BR) override;
// FIXME: Scan the map once in the visitor's constructor and do a direct
// lookup by region.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index 36464707d06a..adedc9c30fad 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -852,9 +852,8 @@ protected:
return false;
}
- virtual bool
- wasModifiedInFunction(const ExplodedNode *CallEnterN,
- const ExplodedNode *CallExitEndN) override {
+ bool wasModifiedInFunction(const ExplodedNode *CallEnterN,
+ const ExplodedNode *CallExitEndN) override {
if (!doesFnIntendToHandleOwnership(
CallExitEndN->getFirstPred()->getLocationContext()->getDecl(),
CallExitEndN->getState()->getAnalysisManager().getASTContext()))
@@ -885,7 +884,7 @@ protected:
"later deallocation");
}
- virtual PathDiagnosticPieceRef
+ PathDiagnosticPieceRef
maybeEmitNoteForObjCSelf(PathSensitiveBugReport &R,
const ObjCMethodCall &Call,
const ExplodedNode *N) override {
@@ -893,7 +892,7 @@ protected:
return nullptr;
}
- virtual PathDiagnosticPieceRef
+ PathDiagnosticPieceRef
maybeEmitNoteForCXXThis(PathSensitiveBugReport &R,
const CXXConstructorCall &Call,
const ExplodedNode *N) override {
@@ -901,7 +900,7 @@ protected:
return nullptr;
}
- virtual PathDiagnosticPieceRef
+ PathDiagnosticPieceRef
maybeEmitNoteForParameters(PathSensitiveBugReport &R, const CallEvent &Call,
const ExplodedNode *N) override {
// TODO: Factor the logic of "what constitutes as an entity being passed
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
index cddf206728b1..27fd40a441fa 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -182,7 +182,7 @@ public:
return false;
};
- if (std::any_of(RD->field_begin(), RD->field_end(), IsTrickyField))
+ if (llvm::any_of(RD->fields(), IsTrickyField))
return true;
return false;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
index ef673ae41a3d..5897e5096461 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
@@ -240,7 +240,7 @@ class StdLibraryFunctionsChecker
ArgNo OtherArgN;
public:
- virtual StringRef getName() const override { return "Comparison"; };
+ StringRef getName() const override { return "Comparison"; };
ComparisonConstraint(ArgNo ArgN, BinaryOperator::Opcode Opcode,
ArgNo OtherArgN)
: ValueConstraint(ArgN), Opcode(Opcode), OtherArgN(OtherArgN) {}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
index 38e69e81d800..cd91fa9b090c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
@@ -57,19 +57,17 @@ class RegularField final : public FieldNode {
public:
RegularField(const FieldRegion *FR) : FieldNode(FR) {}
- virtual void printNoteMsg(llvm::raw_ostream &Out) const override {
+ void printNoteMsg(llvm::raw_ostream &Out) const override {
Out << "uninitialized field ";
}
- virtual void printPrefix(llvm::raw_ostream &Out) const override {}
+ void printPrefix(llvm::raw_ostream &Out) const override {}
- virtual void printNode(llvm::raw_ostream &Out) const override {
+ void printNode(llvm::raw_ostream &Out) const override {
Out << getVariableName(getDecl());
}
- virtual void printSeparator(llvm::raw_ostream &Out) const override {
- Out << '.';
- }
+ void printSeparator(llvm::raw_ostream &Out) const override { Out << '.'; }
};
/// Represents that the FieldNode that comes after this is declared in a base
@@ -85,20 +83,20 @@ public:
assert(T->getAsCXXRecordDecl());
}
- virtual void printNoteMsg(llvm::raw_ostream &Out) const override {
+ void printNoteMsg(llvm::raw_ostream &Out) const override {
llvm_unreachable("This node can never be the final node in the "
"fieldchain!");
}
- virtual void printPrefix(llvm::raw_ostream &Out) const override {}
+ void printPrefix(llvm::raw_ostream &Out) const override {}
- virtual void printNode(llvm::raw_ostream &Out) const override {
+ void printNode(llvm::raw_ostream &Out) const override {
Out << BaseClassT->getAsCXXRecordDecl()->getName() << "::";
}
- virtual void printSeparator(llvm::raw_ostream &Out) const override {}
+ void printSeparator(llvm::raw_ostream &Out) const override {}
- virtual bool isBase() const override { return true; }
+ bool isBase() const override { return true; }
};
} // end of anonymous namespace
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
index a6e81b3657a2..f5bd765ff679 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
@@ -34,20 +34,20 @@ public:
LocField(const FieldRegion *FR, const bool IsDereferenced = true)
: FieldNode(FR), IsDereferenced(IsDereferenced) {}
- virtual void printNoteMsg(llvm::raw_ostream &Out) const override {
+ void printNoteMsg(llvm::raw_ostream &Out) const override {
if (IsDereferenced)
Out << "uninitialized pointee ";
else
Out << "uninitialized pointer ";
}
- virtual void printPrefix(llvm::raw_ostream &Out) const override {}
+ void printPrefix(llvm::raw_ostream &Out) const override {}
- virtual void printNode(llvm::raw_ostream &Out) const override {
+ void printNode(llvm::raw_ostream &Out) const override {
Out << getVariableName(getDecl());
}
- virtual void printSeparator(llvm::raw_ostream &Out) const override {
+ void printSeparator(llvm::raw_ostream &Out) const override {
if (getDecl()->getType()->isPointerType())
Out << "->";
else
@@ -64,11 +64,11 @@ public:
NeedsCastLocField(const FieldRegion *FR, const QualType &T)
: FieldNode(FR), CastBackType(T) {}
- virtual void printNoteMsg(llvm::raw_ostream &Out) const override {
+ void printNoteMsg(llvm::raw_ostream &Out) const override {
Out << "uninitialized pointee ";
}
- virtual void printPrefix(llvm::raw_ostream &Out) const override {
+ void printPrefix(llvm::raw_ostream &Out) const override {
// If this object is a nonloc::LocAsInteger.
if (getDecl()->getType()->isIntegerType())
Out << "reinterpret_cast";
@@ -78,13 +78,11 @@ public:
Out << '<' << CastBackType.getAsString() << ">(";
}
- virtual void printNode(llvm::raw_ostream &Out) const override {
+ void printNode(llvm::raw_ostream &Out) const override {
Out << getVariableName(getDecl()) << ')';
}
- virtual void printSeparator(llvm::raw_ostream &Out) const override {
- Out << "->";
- }
+ void printSeparator(llvm::raw_ostream &Out) const override { Out << "->"; }
};
/// Represents a Loc field that points to itself.
@@ -93,17 +91,17 @@ class CyclicLocField final : public FieldNode {
public:
CyclicLocField(const FieldRegion *FR) : FieldNode(FR) {}
- virtual void printNoteMsg(llvm::raw_ostream &Out) const override {
+ void printNoteMsg(llvm::raw_ostream &Out) const override {
Out << "object references itself ";
}
- virtual void printPrefix(llvm::raw_ostream &Out) const override {}
+ void printPrefix(llvm::raw_ostream &Out) const override {}
- virtual void printNode(llvm::raw_ostream &Out) const override {
+ void printNode(llvm::raw_ostream &Out) const override {
Out << getVariableName(getDecl());
}
- virtual void printSeparator(llvm::raw_ostream &Out) const override {
+ void printSeparator(llvm::raw_ostream &Out) const override {
llvm_unreachable("CyclicLocField objects must be the last node of the "
"fieldchain!");
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index 2caa5bbc16df..3a90c37a36da 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -530,9 +530,8 @@ public:
private:
/// \return Whether \c RegionOfInterest was modified at \p CurrN compared to
/// the value it holds in \p CallExitBeginN.
- virtual bool
- wasModifiedBeforeCallExit(const ExplodedNode *CurrN,
- const ExplodedNode *CallExitBeginN) override;
+ bool wasModifiedBeforeCallExit(const ExplodedNode *CurrN,
+ const ExplodedNode *CallExitBeginN) override;
/// Attempts to find the region of interest in a given record decl,
/// by either following the base classes or fields.
@@ -547,19 +546,17 @@ private:
// Region of interest corresponds to an IVar, exiting a method
// which could have written into that IVar, but did not.
- virtual PathDiagnosticPieceRef
- maybeEmitNoteForObjCSelf(PathSensitiveBugReport &R,
- const ObjCMethodCall &Call,
- const ExplodedNode *N) override final;
+ PathDiagnosticPieceRef maybeEmitNoteForObjCSelf(PathSensitiveBugReport &R,
+ const ObjCMethodCall &Call,
+ const ExplodedNode *N) final;
- virtual PathDiagnosticPieceRef
- maybeEmitNoteForCXXThis(PathSensitiveBugReport &R,
- const CXXConstructorCall &Call,
- const ExplodedNode *N) override final;
+ PathDiagnosticPieceRef maybeEmitNoteForCXXThis(PathSensitiveBugReport &R,
+ const CXXConstructorCall &Call,
+ const ExplodedNode *N) final;
- virtual PathDiagnosticPieceRef
+ PathDiagnosticPieceRef
maybeEmitNoteForParameters(PathSensitiveBugReport &R, const CallEvent &Call,
- const ExplodedNode *N) override final;
+ const ExplodedNode *N) final;
/// Consume the information on the no-store stack frame in order to
/// either emit a note or suppress the report enirely.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index d8f56f2f8cff..19149d079822 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -196,6 +196,14 @@ typedef llvm::ImmutableMap<
IndexOfElementToConstructMap;
REGISTER_TRAIT_WITH_PROGRAMSTATE(IndexOfElementToConstruct,
IndexOfElementToConstructMap)
+
+// This trait is responsible for holding our pending ArrayInitLoopExprs.
+// It pairs the LocationContext and the initializer CXXConstructExpr with
+// the size of the array that's being copy initialized.
+typedef llvm::ImmutableMap<
+ std::pair<const CXXConstructExpr *, const LocationContext *>, unsigned>
+ PendingInitLoopMap;
+REGISTER_TRAIT_WITH_PROGRAMSTATE(PendingInitLoop, PendingInitLoopMap)
//===----------------------------------------------------------------------===//
// Engine construction and deletion.
//===----------------------------------------------------------------------===//
@@ -462,6 +470,34 @@ ProgramStateRef ExprEngine::setIndexOfElementToConstruct(
return State->set<IndexOfElementToConstruct>(Key, Idx);
}
+Optional<unsigned> ExprEngine::getPendingInitLoop(ProgramStateRef State,
+ const CXXConstructExpr *E,
+ const LocationContext *LCtx) {
+
+ return Optional<unsigned>::create(
+ State->get<PendingInitLoop>({E, LCtx->getStackFrame()}));
+}
+
+ProgramStateRef ExprEngine::removePendingInitLoop(ProgramStateRef State,
+ const CXXConstructExpr *E,
+ const LocationContext *LCtx) {
+ auto Key = std::make_pair(E, LCtx->getStackFrame());
+
+ assert(E && State->contains<PendingInitLoop>(Key));
+ return State->remove<PendingInitLoop>(Key);
+}
+
+ProgramStateRef ExprEngine::setPendingInitLoop(ProgramStateRef State,
+ const CXXConstructExpr *E,
+ const LocationContext *LCtx,
+ unsigned Size) {
+ auto Key = std::make_pair(E, LCtx->getStackFrame());
+
+ assert(!State->contains<PendingInitLoop>(Key) && Size > 0);
+
+ return State->set<PendingInitLoop>(Key, Size);
+}
+
Optional<unsigned>
ExprEngine::getIndexOfElementToConstruct(ProgramStateRef State,
const CXXConstructExpr *E,
@@ -487,17 +523,23 @@ ExprEngine::addObjectUnderConstruction(ProgramStateRef State,
const LocationContext *LC, SVal V) {
ConstructedObjectKey Key(Item, LC->getStackFrame());
- const CXXConstructExpr *E = nullptr;
+ const Expr *Init = nullptr;
if (auto DS = dyn_cast_or_null<DeclStmt>(Item.getStmtOrNull())) {
if (auto VD = dyn_cast_or_null<VarDecl>(DS->getSingleDecl()))
- E = dyn_cast<CXXConstructExpr>(VD->getInit());
+ Init = VD->getInit();
}
- if (!E && !Item.getStmtOrNull()) {
- auto CtorInit = Item.getCXXCtorInitializer();
- E = dyn_cast<CXXConstructExpr>(CtorInit->getInit());
- }
+ if (auto LE = dyn_cast_or_null<LambdaExpr>(Item.getStmtOrNull()))
+ Init = *(LE->capture_init_begin() + Item.getIndex());
+
+ if (!Init && !Item.getStmtOrNull())
+ Init = Item.getCXXCtorInitializer()->getInit();
+
+ // In an ArrayInitLoopExpr the real initializer is returned by
+ // getSubExpr().
+ if (const auto *AILE = dyn_cast_or_null<ArrayInitLoopExpr>(Init))
+ Init = AILE->getSubExpr();
// FIXME: Currently the state might already contain the marker due to
// incorrect handling of temporaries bound to default parameters.
@@ -508,7 +550,8 @@ ExprEngine::addObjectUnderConstruction(ProgramStateRef State,
assert((!State->get<ObjectsUnderConstruction>(Key) ||
Key.getItem().getKind() ==
ConstructionContextItem::TemporaryDestructorKind ||
- State->contains<IndexOfElementToConstruct>({E, LC})) &&
+ State->contains<IndexOfElementToConstruct>(
+ {dyn_cast_or_null<CXXConstructExpr>(Init), LC})) &&
"The object is already marked as `UnderConstruction`, when it's not "
"supposed to!");
return State->set<ObjectsUnderConstruction>(Key, V);
@@ -2744,7 +2787,10 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
SVal Base = state->getLValue(DD, LCtx);
if (DD->getType()->isReferenceType()) {
- Base = state->getSVal(Base.getAsRegion());
+ if (const MemRegion *R = Base.getAsRegion())
+ Base = state->getSVal(R);
+ else
+ Base = UnknownVal();
}
SVal V = UnknownVal();
@@ -2765,15 +2811,27 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
V = state->getLValue(BD->getType(), Idx, Base);
}
- // Handle binding to tuple-like strcutures
- else if (BD->getHoldingVar()) {
- // FIXME: handle tuples
- return;
+ // Handle binding to tuple-like structures
+ else if (const auto *HV = BD->getHoldingVar()) {
+ V = state->getLValue(HV, LCtx);
+
+ if (HV->getType()->isReferenceType()) {
+ if (const MemRegion *R = V.getAsRegion())
+ V = state->getSVal(R);
+ else
+ V = UnknownVal();
+ }
} else
llvm_unreachable("An unknown case of structured binding encountered!");
- if (BD->getType()->isReferenceType())
- V = state->getSVal(V.getAsRegion());
+ // In case of tuple-like types the references are already handled, so we
+ // don't want to handle them again.
+ if (BD->getType()->isReferenceType() && !BD->getHoldingVar()) {
+ if (const MemRegion *R = V.getAsRegion())
+ V = state->getSVal(R);
+ else
+ V = UnknownVal();
+ }
Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), nullptr,
ProgramPoint::PostLValueKind);
@@ -2797,6 +2855,11 @@ void ExprEngine::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *Ex,
const Expr *Arr = Ex->getCommonExpr()->getSourceExpr();
for (auto *Node : CheckerPreStmt) {
+
+ // The constructor visitior has already taken care of everything.
+ if (auto *CE = dyn_cast<CXXConstructExpr>(Ex->getSubExpr()))
+ break;
+
const LocationContext *LCtx = Node->getLocationContext();
ProgramStateRef state = Node->getState();
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index 08fac9fb2e69..04e00274b2a7 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -290,6 +290,23 @@ SVal ExprEngine::computeObjectUnderConstruction(
return loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(E, LCtx));
}
+ case ConstructionContext::LambdaCaptureKind: {
+ CallOpts.IsTemporaryCtorOrDtor = true;
+
+ const auto *LCC = cast<LambdaCaptureConstructionContext>(CC);
+
+ SVal Base = loc::MemRegionVal(
+ MRMgr.getCXXTempObjectRegion(LCC->getInitializer(), LCtx));
+
+ const auto *CE = dyn_cast_or_null<CXXConstructExpr>(E);
+ if (getIndexOfElementToConstruct(State, CE, LCtx)) {
+ CallOpts.IsArrayCtorOrDtor = true;
+ Base = State->getLValue(E->getType(), svalBuilder.makeArrayIndex(Idx),
+ Base);
+ }
+
+ return Base;
+ }
case ConstructionContext::ArgumentKind: {
// Arguments are technically temporaries.
CallOpts.IsTemporaryCtorOrDtor = true;
@@ -450,6 +467,17 @@ ProgramStateRef ExprEngine::updateObjectsUnderConstruction(
return State;
}
+ case ConstructionContext::LambdaCaptureKind: {
+ const auto *LCC = cast<LambdaCaptureConstructionContext>(CC);
+
+ // If we capture and array, we want to store the super region, not a
+ // sub-region.
+ if (const auto *EL = dyn_cast_or_null<ElementRegion>(V.getAsRegion()))
+ V = loc::MemRegionVal(EL->getSuperRegion());
+
+ return addObjectUnderConstruction(
+ State, {LCC->getLambdaExpr(), LCC->getIndex()}, LCtx, V);
+ }
case ConstructionContext::ArgumentKind: {
const auto *ACC = cast<ArgumentConstructionContext>(CC);
if (const auto *BTE = ACC->getCXXBindTemporaryExpr())
@@ -462,6 +490,59 @@ ProgramStateRef ExprEngine::updateObjectsUnderConstruction(
llvm_unreachable("Unhandled construction context!");
}
+static ProgramStateRef
+bindRequiredArrayElementToEnvironment(ProgramStateRef State,
+ const ArrayInitLoopExpr *AILE,
+ const LocationContext *LCtx, SVal Idx) {
+ // The ctor in this case is guaranteed to be a copy ctor, otherwise we hit a
+ // compile time error.
+ //
+ // -ArrayInitLoopExpr <-- we're here
+ // |-OpaqueValueExpr
+ // | `-DeclRefExpr <-- match this
+ // `-CXXConstructExpr
+ // `-ImplicitCastExpr
+ // `-ArraySubscriptExpr
+ // |-ImplicitCastExpr
+ // | `-OpaqueValueExpr
+ // | `-DeclRefExpr
+ // `-ArrayInitIndexExpr
+ //
+ // The resulting expression might look like the one below in an implicit
+ // copy/move ctor.
+ //
+ // ArrayInitLoopExpr <-- we're here
+ // |-OpaqueValueExpr
+ // | `-MemberExpr <-- match this
+ // | (`-CXXStaticCastExpr) <-- move ctor only
+ // | `-DeclRefExpr
+ // `-CXXConstructExpr
+ // `-ArraySubscriptExpr
+ // |-ImplicitCastExpr
+ // | `-OpaqueValueExpr
+ // | `-MemberExpr
+ // | `-DeclRefExpr
+ // `-ArrayInitIndexExpr
+ //
+ // HACK: There is no way we can put the index of the array element into the
+ // CFG unless we unroll the loop, so we manually select and bind the required
+ // parameter to the environment.
+ const auto *CE = cast<CXXConstructExpr>(AILE->getSubExpr());
+ const auto *OVESrc = AILE->getCommonExpr()->getSourceExpr();
+
+ SVal Base = UnknownVal();
+ if (const auto *ME = dyn_cast<MemberExpr>(OVESrc))
+ Base = State->getSVal(ME, LCtx);
+ else if (const auto *DRE = cast<DeclRefExpr>(OVESrc))
+ Base = State->getLValue(cast<VarDecl>(DRE->getDecl()), LCtx);
+ else
+ llvm_unreachable("ArrayInitLoopExpr contains unexpected source expression");
+
+ SVal NthElem = State->getLValue(CE->getType(), Idx, Base);
+
+ return State->BindExpr(CE->getArg(0), LCtx, NthElem);
+}
+
void ExprEngine::handleConstructor(const Expr *E,
ExplodedNode *Pred,
ExplodedNodeSet &destNodes) {
@@ -502,12 +583,26 @@ void ExprEngine::handleConstructor(const Expr *E,
// Inherited constructors are always base class constructors.
assert(CE && !CIE && "A complete constructor is inherited?!");
+ // If the ctor is part of an ArrayInitLoopExpr, we want to handle it
+ // differently.
+ auto *AILE = CC ? CC->getArrayInitLoop() : nullptr;
+
unsigned Idx = 0;
- if (CE->getType()->isArrayType()) {
+ if (CE->getType()->isArrayType() || AILE) {
Idx = getIndexOfElementToConstruct(State, CE, LCtx).value_or(0u);
State = setIndexOfElementToConstruct(State, CE, LCtx, Idx + 1);
}
+ if (AILE) {
+ // Only set this once even though we loop through it multiple times.
+ if (!getPendingInitLoop(State, CE, LCtx))
+ State = setPendingInitLoop(State, CE, LCtx,
+ AILE->getArraySize().getLimitedValue());
+
+ State = bindRequiredArrayElementToEnvironment(
+ State, AILE, LCtx, svalBuilder.makeArrayIndex(Idx));
+ }
+
// The target region is found from construction context.
std::tie(State, Target) =
handleConstructionContext(CE, State, LCtx, CC, CallOpts, Idx);
@@ -908,7 +1003,7 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
// values are properly placed inside the required region, however if an
// initializer list is used, this doesn't happen automatically.
auto *Init = CNE->getInitializer();
- bool isInitList = dyn_cast_or_null<InitListExpr>(Init);
+ bool isInitList = isa_and_nonnull<InitListExpr>(Init);
QualType ObjTy =
isInitList ? Init->getType() : CNE->getType()->getPointeeType();
@@ -1038,19 +1133,40 @@ void ExprEngine::VisitLambdaExpr(const LambdaExpr *LE, ExplodedNode *Pred,
// If we created a new MemRegion for the lambda, we should explicitly bind
// the captures.
+ unsigned Idx = 0;
CXXRecordDecl::field_iterator CurField = LE->getLambdaClass()->field_begin();
for (LambdaExpr::const_capture_init_iterator i = LE->capture_init_begin(),
e = LE->capture_init_end();
- i != e; ++i, ++CurField) {
+ i != e; ++i, ++CurField, ++Idx) {
FieldDecl *FieldForCapture = *CurField;
SVal FieldLoc = State->getLValue(FieldForCapture, V);
SVal InitVal;
if (!FieldForCapture->hasCapturedVLAType()) {
Expr *InitExpr = *i;
+
+ if (const auto AILE = dyn_cast<ArrayInitLoopExpr>(InitExpr)) {
+ // If the AILE initializes a POD array, we need to keep it as the
+ // InitExpr.
+ if (dyn_cast<CXXConstructExpr>(AILE->getSubExpr()))
+ InitExpr = AILE->getSubExpr();
+ }
+
assert(InitExpr && "Capture missing initialization expression");
- InitVal = State->getSVal(InitExpr, LocCtxt);
+
+ if (dyn_cast<CXXConstructExpr>(InitExpr)) {
+ InitVal = *getObjectUnderConstruction(State, {LE, Idx}, LocCtxt);
+ InitVal = State->getSVal(InitVal.getAsRegion());
+
+ State = finishObjectConstruction(State, {LE, Idx}, LocCtxt);
+ } else
+ InitVal = State->getSVal(InitExpr, LocCtxt);
+
} else {
+
+ assert(!getObjectUnderConstruction(State, {LE, Idx}, LocCtxt) &&
+ "VLA capture by value is a compile time error!");
+
// The field stores the length of a captured variable-length array.
// These captures don't have initialization expressions; instead we
// get the length from the VLAType size expression.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index ebcca92a3e4e..8fb2ce9cd18f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -265,9 +265,13 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
ShouldRepeatCall = shouldRepeatCtorCall(state, CCE, callerCtx);
- if (!ShouldRepeatCall &&
- getIndexOfElementToConstruct(state, CCE, callerCtx))
- state = removeIndexOfElementToConstruct(state, CCE, callerCtx);
+ if (!ShouldRepeatCall) {
+ if (getIndexOfElementToConstruct(state, CCE, callerCtx))
+ state = removeIndexOfElementToConstruct(state, CCE, callerCtx);
+
+ if (getPendingInitLoop(state, CCE, callerCtx))
+ state = removePendingInitLoop(state, CCE, callerCtx);
+ }
}
if (const auto *CNE = dyn_cast<CXXNewExpr>(CE)) {
@@ -815,8 +819,7 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
// We still allow construction into ElementRegion targets when they don't
// represent array elements.
if (CallOpts.IsArrayCtorOrDtor) {
- if (!shouldInlineArrayConstruction(
- dyn_cast<ArrayType>(CtorExpr->getType())))
+ if (!shouldInlineArrayConstruction(Pred->getState(), CtorExpr, CurLC))
return CIP_DisallowedOnce;
}
@@ -1082,10 +1085,14 @@ bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
return true;
}
-bool ExprEngine::shouldInlineArrayConstruction(const ArrayType *Type) {
- if (!Type)
+bool ExprEngine::shouldInlineArrayConstruction(const ProgramStateRef State,
+ const CXXConstructExpr *CE,
+ const LocationContext *LCtx) {
+ if (!CE)
return false;
+ auto Type = CE->getType();
+
// FIXME: Handle other arrays types.
if (const auto *CAT = dyn_cast<ConstantArrayType>(Type)) {
unsigned Size = getContext().getConstantArrayElementCount(CAT);
@@ -1093,6 +1100,10 @@ bool ExprEngine::shouldInlineArrayConstruction(const ArrayType *Type) {
return Size <= AMgr.options.maxBlockVisitOnPath;
}
+ // Check if we're inside an ArrayInitLoopExpr, and it's sufficiently small.
+ if (auto Size = getPendingInitLoop(State, CE, LCtx))
+ return *Size <= AMgr.options.maxBlockVisitOnPath;
+
return false;
}
@@ -1111,6 +1122,9 @@ bool ExprEngine::shouldRepeatCtorCall(ProgramStateRef State,
return Size > getIndexOfElementToConstruct(State, E, LCtx);
}
+ if (auto Size = getPendingInitLoop(State, E, LCtx))
+ return Size > getIndexOfElementToConstruct(State, E, LCtx);
+
return false;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
index 5e946483a93d..d8ece9f39a25 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -1888,6 +1888,30 @@ SVal RegionStoreManager::getSValFromStringLiteral(const StringLiteral *SL,
return svalBuilder.makeIntVal(Code, ElemT);
}
+static Optional<SVal> getDerivedSymbolForBinding(
+ RegionBindingsConstRef B, const TypedValueRegion *BaseRegion,
+ const TypedValueRegion *SubReg, const ASTContext &Ctx, SValBuilder &SVB) {
+ assert(BaseRegion);
+ QualType BaseTy = BaseRegion->getValueType();
+ QualType Ty = SubReg->getValueType();
+ if (BaseTy->isScalarType() && Ty->isScalarType()) {
+ if (Ctx.getTypeSizeInChars(BaseTy) >= Ctx.getTypeSizeInChars(Ty)) {
+ if (const Optional<SVal> &ParentValue = B.getDirectBinding(BaseRegion)) {
+ if (SymbolRef ParentValueAsSym = ParentValue->getAsSymbol())
+ return SVB.getDerivedRegionValueSymbolVal(ParentValueAsSym, SubReg);
+
+ if (ParentValue->isUndef())
+ return UndefinedVal();
+
+ // Other cases: give up. We are indexing into a larger object
+ // that has some value, but we don't know how to handle that yet.
+ return UnknownVal();
+ }
+ }
+ }
+ return None;
+}
+
SVal RegionStoreManager::getBindingForElement(RegionBindingsConstRef B,
const ElementRegion* R) {
// Check if the region has a binding.
@@ -1932,27 +1956,10 @@ SVal RegionStoreManager::getBindingForElement(RegionBindingsConstRef B,
if (!O.getRegion())
return UnknownVal();
- if (const TypedValueRegion *baseR =
- dyn_cast_or_null<TypedValueRegion>(O.getRegion())) {
- QualType baseT = baseR->getValueType();
- if (baseT->isScalarType()) {
- QualType elemT = R->getElementType();
- if (elemT->isScalarType()) {
- if (Ctx.getTypeSizeInChars(baseT) >= Ctx.getTypeSizeInChars(elemT)) {
- if (const Optional<SVal> &V = B.getDirectBinding(superR)) {
- if (SymbolRef parentSym = V->getAsSymbol())
- return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R);
-
- if (V->isUnknownOrUndef())
- return *V;
- // Other cases: give up. We are indexing into a larger object
- // that has some value, but we don't know how to handle that yet.
- return UnknownVal();
- }
- }
- }
- }
- }
+ if (const TypedValueRegion *baseR = dyn_cast<TypedValueRegion>(O.getRegion()))
+ if (auto V = getDerivedSymbolForBinding(B, baseR, R, Ctx, svalBuilder))
+ return *V;
+
return getBindingForFieldOrElementCommon(B, R, R->getElementType());
}
@@ -1988,6 +1995,26 @@ SVal RegionStoreManager::getBindingForField(RegionBindingsConstRef B,
}
}
+ // Handle the case where we are accessing into a larger scalar object.
+ // For example, this handles:
+ // struct header {
+ // unsigned a : 1;
+ // unsigned b : 1;
+ // };
+ // struct parse_t {
+ // unsigned bits0 : 1;
+ // unsigned bits2 : 2; // <-- header
+ // unsigned bits4 : 4;
+ // };
+ // int parse(parse_t *p) {
+ // unsigned copy = p->bits2;
+ // header *bits = (header *)&copy;
+ // return bits->b; <-- here
+ // }
+ if (const auto *Base = dyn_cast<TypedValueRegion>(R->getBaseRegion()))
+ if (auto V = getDerivedSymbolForBinding(B, Base, R, Ctx, svalBuilder))
+ return *V;
+
return getBindingForFieldOrElementCommon(B, R, Ty);
}
diff --git a/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index 19eb65b39b0a..513e6376f5ae 100644
--- a/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -873,27 +873,6 @@ RVVIntrinsic::RVVIntrinsic(
Name += "_m";
}
- // Init RISC-V extensions
- for (const auto &T : OutInTypes) {
- if (T->isFloatVector(16) || T->isFloat(16))
- RISCVPredefinedMacros |= RISCVPredefinedMacro::Zvfh;
- if (T->isFloatVector(32))
- RISCVPredefinedMacros |= RISCVPredefinedMacro::VectorMaxELenFp32;
- if (T->isFloatVector(64))
- RISCVPredefinedMacros |= RISCVPredefinedMacro::VectorMaxELenFp64;
- if (T->isVector(64))
- RISCVPredefinedMacros |= RISCVPredefinedMacro::VectorMaxELen64;
- }
- for (auto Feature : RequiredFeatures) {
- if (Feature == "RV64")
- RISCVPredefinedMacros |= RISCVPredefinedMacro::RV64;
- // Note: Full multiply instruction (mulh, mulhu, mulhsu, smul) for EEW=64
- // require V.
- if (Feature == "FullMultiply" &&
- (RISCVPredefinedMacros & RISCVPredefinedMacro::VectorMaxELen64))
- RISCVPredefinedMacros |= RISCVPredefinedMacro::V;
- }
-
// Init OutputType and InputTypes
OutputType = OutInTypes[0];
InputTypes.assign(OutInTypes.begin() + 1, OutInTypes.end());
@@ -930,6 +909,48 @@ std::string RVVIntrinsic::getSuffixStr(
return join(SuffixStrs, "_");
}
+llvm::SmallVector<PrototypeDescriptor>
+RVVIntrinsic::computeBuiltinTypes(llvm::ArrayRef<PrototypeDescriptor> Prototype,
+ bool IsMasked, bool HasMaskedOffOperand,
+ bool HasVL, unsigned NF) {
+ SmallVector<PrototypeDescriptor> NewPrototype(Prototype.begin(),
+ Prototype.end());
+ if (IsMasked) {
+ // If HasMaskedOffOperand, insert result type as first input operand.
+ if (HasMaskedOffOperand) {
+ if (NF == 1) {
+ NewPrototype.insert(NewPrototype.begin() + 1, NewPrototype[0]);
+ } else {
+ // Convert
+ // (void, op0 address, op1 address, ...)
+ // to
+ // (void, op0 address, op1 address, ..., maskedoff0, maskedoff1, ...)
+ PrototypeDescriptor MaskoffType = NewPrototype[1];
+ MaskoffType.TM &= ~static_cast<uint8_t>(TypeModifier::Pointer);
+ for (unsigned I = 0; I < NF; ++I)
+ NewPrototype.insert(NewPrototype.begin() + NF + 1, MaskoffType);
+ }
+ }
+ if (HasMaskedOffOperand && NF > 1) {
+ // Convert
+ // (void, op0 address, op1 address, ..., maskedoff0, maskedoff1, ...)
+ // to
+ // (void, op0 address, op1 address, ..., mask, maskedoff0, maskedoff1,
+ // ...)
+ NewPrototype.insert(NewPrototype.begin() + NF + 1,
+ PrototypeDescriptor::Mask);
+ } else {
+ // If IsMasked, insert PrototypeDescriptor:Mask as first input operand.
+ NewPrototype.insert(NewPrototype.begin() + 1, PrototypeDescriptor::Mask);
+ }
+ }
+
+ // If HasVL, append PrototypeDescriptor:VL to last operand
+ if (HasVL)
+ NewPrototype.push_back(PrototypeDescriptor::VL);
+ return NewPrototype;
+}
+
SmallVector<PrototypeDescriptor> parsePrototypes(StringRef Prototypes) {
SmallVector<PrototypeDescriptor> PrototypeDescriptors;
const StringRef Primaries("evwqom0ztul");
@@ -951,5 +972,30 @@ SmallVector<PrototypeDescriptor> parsePrototypes(StringRef Prototypes) {
return PrototypeDescriptors;
}
+raw_ostream &operator<<(raw_ostream &OS, const RVVIntrinsicRecord &Record) {
+ OS << "{";
+ OS << "\"" << Record.Name << "\",";
+ if (Record.OverloadedName == nullptr ||
+ StringRef(Record.OverloadedName).empty())
+ OS << "nullptr,";
+ else
+ OS << "\"" << Record.OverloadedName << "\",";
+ OS << Record.PrototypeIndex << ",";
+ OS << Record.SuffixIndex << ",";
+ OS << Record.OverloadedSuffixIndex << ",";
+ OS << (int)Record.PrototypeLength << ",";
+ OS << (int)Record.SuffixLength << ",";
+ OS << (int)Record.OverloadedSuffixSize << ",";
+ OS << (int)Record.RequiredExtensions << ",";
+ OS << (int)Record.TypeRangeMask << ",";
+ OS << (int)Record.Log2LMULMask << ",";
+ OS << (int)Record.NF << ",";
+ OS << (int)Record.HasMasked << ",";
+ OS << (int)Record.HasVL << ",";
+ OS << (int)Record.HasMaskedOffOperand << ",";
+ OS << "},\n";
+ return OS;
+}
+
} // end namespace RISCV
} // end namespace clang
diff --git a/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp
index db4cd77d8c53..fc5f705b7fd4 100644
--- a/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -20,6 +20,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
@@ -29,6 +30,60 @@ using namespace llvm;
using namespace clang::RISCV;
namespace {
+struct SemaRecord {
+ // Intrinsic name, e.g. vadd_vv
+ std::string Name;
+
+ // Overloaded intrinsic name, could be empty if can be computed from Name
+ // e.g. vadd
+ std::string OverloadedName;
+
+ // Supported type, mask of BasicType.
+ unsigned TypeRangeMask;
+
+ // Supported LMUL.
+ unsigned Log2LMULMask;
+
+ // Required extensions for this intrinsic.
+ unsigned RequiredExtensions;
+
+ // Prototype for this intrinsic.
+ SmallVector<PrototypeDescriptor> Prototype;
+
+ // Suffix of intrinsic name.
+ SmallVector<PrototypeDescriptor> Suffix;
+
+ // Suffix of overloaded intrinsic name.
+ SmallVector<PrototypeDescriptor> OverloadedSuffix;
+
+ // Number of field, large than 1 if it's segment load/store.
+ unsigned NF;
+
+ bool HasMasked :1;
+ bool HasVL :1;
+ bool HasMaskedOffOperand :1;
+};
+
+// Compressed function signature table.
+class SemaSignatureTable {
+private:
+ std::vector<PrototypeDescriptor> SignatureTable;
+
+ void insert(ArrayRef<PrototypeDescriptor> Signature);
+
+public:
+ static constexpr unsigned INVALID_INDEX = ~0U;
+
+ // Create compressed signature table from SemaRecords.
+ void init(ArrayRef<SemaRecord> SemaRecords);
+
+ // Query the Signature, return INVALID_INDEX if not found.
+ unsigned getIndex(ArrayRef<PrototypeDescriptor> Signature);
+
+ /// Print signature table in RVVHeader Record to \p OS
+ void print(raw_ostream &OS);
+};
+
class RVVEmitter {
private:
RecordKeeper &Records;
@@ -45,22 +100,22 @@ public:
/// Emit all the information needed to map builtin -> LLVM IR intrinsic.
void createCodeGen(raw_ostream &o);
+ /// Emit all the information needed by SemaRISCVVectorLookup.cpp.
+ /// We've large number of intrinsic function for RVV, creating a customized
+ /// could speed up the compilation time.
+ void createSema(raw_ostream &o);
+
private:
- /// Create all intrinsics and add them to \p Out
- void createRVVIntrinsics(std::vector<std::unique_ptr<RVVIntrinsic>> &Out);
+ /// Create all intrinsics and add them to \p Out and SemaRecords.
+ void createRVVIntrinsics(std::vector<std::unique_ptr<RVVIntrinsic>> &Out,
+ std::vector<SemaRecord> *SemaRecords = nullptr);
+ /// Create all intrinsic records and SemaSignatureTable from SemaRecords.
+ void createRVVIntrinsicRecords(std::vector<RVVIntrinsicRecord> &Out,
+ SemaSignatureTable &SST,
+ ArrayRef<SemaRecord> SemaRecords);
+
/// Print HeaderCode in RVVHeader Record to \p Out
void printHeaderCode(raw_ostream &OS);
-
- /// Emit Acrh predecessor definitions and body, assume the element of Defs are
- /// sorted by extension.
- void emitArchMacroAndBody(
- std::vector<std::unique_ptr<RVVIntrinsic>> &Defs, raw_ostream &o,
- std::function<void(raw_ostream &, const RVVIntrinsic &)>);
-
- // Emit the architecture preprocessor definitions. Return true when emits
- // non-empty string.
- bool emitMacroRestrictionStr(RISCVPredefinedMacroT PredefinedMacros,
- raw_ostream &o);
};
} // namespace
@@ -151,33 +206,82 @@ void emitCodeGenSwitchBody(const RVVIntrinsic *RVVI, raw_ostream &OS) {
OS << " break;\n";
}
-void emitIntrinsicFuncDef(const RVVIntrinsic &RVVI, raw_ostream &OS) {
- OS << "__attribute__((__clang_builtin_alias__(";
- OS << "__builtin_rvv_" << RVVI.getBuiltinName() << ")))\n";
- OS << RVVI.getOutputType()->getTypeStr() << " " << RVVI.getName() << "(";
- // Emit function arguments
- const RVVTypes &InputTypes = RVVI.getInputTypes();
- if (!InputTypes.empty()) {
- ListSeparator LS;
- for (unsigned i = 0; i < InputTypes.size(); ++i)
- OS << LS << InputTypes[i]->getTypeStr();
- }
- OS << ");\n";
+//===----------------------------------------------------------------------===//
+// SemaSignatureTable implementation
+//===----------------------------------------------------------------------===//
+void SemaSignatureTable::init(ArrayRef<SemaRecord> SemaRecords) {
+ // Sort signature entries by length, let longer signature insert first, to
+ // make it more possible to reuse table entries, that can reduce ~10% table
+ // size.
+ struct Compare {
+ bool operator()(const SmallVector<PrototypeDescriptor> &A,
+ const SmallVector<PrototypeDescriptor> &B) const {
+ if (A.size() != B.size())
+ return A.size() > B.size();
+
+ size_t Len = A.size();
+ for (size_t i = 0; i < Len; ++i) {
+ if (A[i] != B[i])
+ return A[i] < B[i];
+ }
+
+ return false;
+ }
+ };
+
+ std::set<SmallVector<PrototypeDescriptor>, Compare> Signatures;
+ auto InsertToSignatureSet =
+ [&](const SmallVector<PrototypeDescriptor> &Signature) {
+ if (Signature.empty())
+ return;
+
+ Signatures.insert(Signature);
+ };
+
+ assert(!SemaRecords.empty());
+
+ llvm::for_each(SemaRecords, [&](const SemaRecord &SR) {
+ InsertToSignatureSet(SR.Prototype);
+ InsertToSignatureSet(SR.Suffix);
+ InsertToSignatureSet(SR.OverloadedSuffix);
+ });
+
+ llvm::for_each(Signatures, [this](auto &Sig) { insert(Sig); });
+}
+
+void SemaSignatureTable::insert(ArrayRef<PrototypeDescriptor> Signature) {
+ if (getIndex(Signature) != INVALID_INDEX)
+ return;
+
+ // Insert Signature into SignatureTable if not found in the table.
+ SignatureTable.insert(SignatureTable.begin(), Signature.begin(),
+ Signature.end());
}
-void emitOverloadedFuncDef(const RVVIntrinsic &RVVI, raw_ostream &OS) {
- OS << "__attribute__((__clang_builtin_alias__(";
- OS << "__builtin_rvv_" << RVVI.getBuiltinName() << ")))\n";
- OS << RVVI.getOutputType()->getTypeStr() << " " << RVVI.getOverloadedName()
- << "(";
- // Emit function arguments
- const RVVTypes &InputTypes = RVVI.getInputTypes();
- if (!InputTypes.empty()) {
- ListSeparator LS;
- for (unsigned i = 0; i < InputTypes.size(); ++i)
- OS << LS << InputTypes[i]->getTypeStr();
+unsigned SemaSignatureTable::getIndex(ArrayRef<PrototypeDescriptor> Signature) {
+ // Empty signature could be point into any index since there is length
+ // field when we use, so just always point it to 0.
+ if (Signature.empty())
+ return 0;
+
+ // Checking Signature already in table or not.
+ if (Signature.size() < SignatureTable.size()) {
+ size_t Bound = SignatureTable.size() - Signature.size() + 1;
+ for (size_t Index = 0; Index < Bound; ++Index) {
+ if (equal(Signature.begin(), Signature.end(),
+ SignatureTable.begin() + Index))
+ return Index;
+ }
}
- OS << ");\n";
+
+ return INVALID_INDEX;
+}
+
+void SemaSignatureTable::print(raw_ostream &OS) {
+ for (const auto &Sig : SignatureTable)
+ OS << "PrototypeDescriptor(" << static_cast<int>(Sig.PT) << ", "
+ << static_cast<int>(Sig.VTM) << ", " << static_cast<int>(Sig.TM)
+ << "),\n";
}
//===----------------------------------------------------------------------===//
@@ -212,10 +316,9 @@ void RVVEmitter::createHeader(raw_ostream &OS) {
OS << "extern \"C\" {\n";
OS << "#endif\n\n";
- printHeaderCode(OS);
+ OS << "#pragma clang riscv intrinsic vector\n\n";
- std::vector<std::unique_ptr<RVVIntrinsic>> Defs;
- createRVVIntrinsics(Defs);
+ printHeaderCode(OS);
auto printType = [&](auto T) {
OS << "typedef " << T->getClangBuiltinStr() << " " << T->getTypeStr()
@@ -255,7 +358,7 @@ void RVVEmitter::createHeader(raw_ostream &OS) {
}
OS << "#endif\n";
- OS << "#if defined(__riscv_f)\n";
+ OS << "#if (__riscv_v_elen_fp >= 32)\n";
for (int Log2LMUL : Log2LMULs) {
auto T = RVVType::computeType(BasicType::Float32, Log2LMUL,
PrototypeDescriptor::Vector);
@@ -264,7 +367,7 @@ void RVVEmitter::createHeader(raw_ostream &OS) {
}
OS << "#endif\n";
- OS << "#if defined(__riscv_d)\n";
+ OS << "#if (__riscv_v_elen_fp >= 64)\n";
for (int Log2LMUL : Log2LMULs) {
auto T = RVVType::computeType(BasicType::Float64, Log2LMUL,
PrototypeDescriptor::Vector);
@@ -273,37 +376,8 @@ void RVVEmitter::createHeader(raw_ostream &OS) {
}
OS << "#endif\n\n";
- // The same extension include in the same arch guard marco.
- llvm::stable_sort(Defs, [](const std::unique_ptr<RVVIntrinsic> &A,
- const std::unique_ptr<RVVIntrinsic> &B) {
- return A->getRISCVPredefinedMacros() < B->getRISCVPredefinedMacros();
- });
-
- OS << "#define __rvv_ai static __inline__\n";
-
- // Print intrinsic functions with macro
- emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) {
- OS << "__rvv_ai ";
- emitIntrinsicFuncDef(Inst, OS);
- });
-
- OS << "#undef __rvv_ai\n\n";
-
OS << "#define __riscv_v_intrinsic_overloading 1\n";
- // Print Overloaded APIs
- OS << "#define __rvv_aio static __inline__ "
- "__attribute__((__overloadable__))\n";
-
- emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) {
- if (!Inst.isMasked() && !Inst.hasUnMaskedOverloaded())
- return;
- OS << "__rvv_aio ";
- emitOverloadedFuncDef(Inst, OS);
- });
-
- OS << "#undef __rvv_aio\n";
-
OS << "\n#ifdef __cplusplus\n";
OS << "}\n";
OS << "#endif // __cplusplus\n";
@@ -392,7 +466,8 @@ void RVVEmitter::createCodeGen(raw_ostream &OS) {
}
void RVVEmitter::createRVVIntrinsics(
- std::vector<std::unique_ptr<RVVIntrinsic>> &Out) {
+ std::vector<std::unique_ptr<RVVIntrinsic>> &Out,
+ std::vector<SemaRecord> *SemaRecords) {
std::vector<Record *> RV = Records.getAllDerivedDefinitions("RVVBuiltin");
for (auto *R : RV) {
StringRef Name = R->getValueAsString("Name");
@@ -404,12 +479,12 @@ void RVVEmitter::createRVVIntrinsics(
bool HasMasked = R->getValueAsBit("HasMasked");
bool HasMaskedOffOperand = R->getValueAsBit("HasMaskedOffOperand");
bool HasVL = R->getValueAsBit("HasVL");
- Record *MaskedPolicyRecord = R->getValueAsDef("MaskedPolicy");
- PolicyScheme MaskedPolicy =
- static_cast<PolicyScheme>(MaskedPolicyRecord->getValueAsInt("Value"));
- Record *UnMaskedPolicyRecord = R->getValueAsDef("UnMaskedPolicy");
- PolicyScheme UnMaskedPolicy =
- static_cast<PolicyScheme>(UnMaskedPolicyRecord->getValueAsInt("Value"));
+ Record *MPSRecord = R->getValueAsDef("MaskedPolicyScheme");
+ auto MaskedPolicyScheme =
+ static_cast<PolicyScheme>(MPSRecord->getValueAsInt("Value"));
+ Record *UMPSRecord = R->getValueAsDef("UnMaskedPolicyScheme");
+ auto UnMaskedPolicyScheme =
+ static_cast<PolicyScheme>(UMPSRecord->getValueAsInt("Value"));
bool HasUnMaskedOverloaded = R->getValueAsBit("HasUnMaskedOverloaded");
std::vector<int64_t> Log2LMULList = R->getValueAsListOfInts("Log2LMUL");
bool HasBuiltinAlias = R->getValueAsBit("HasBuiltinAlias");
@@ -425,50 +500,19 @@ void RVVEmitter::createRVVIntrinsics(
// Parse prototype and create a list of primitive type with transformers
// (operand) in Prototype. Prototype[0] is output operand.
- SmallVector<PrototypeDescriptor> Prototype = parsePrototypes(Prototypes);
+ SmallVector<PrototypeDescriptor> BasicPrototype =
+ parsePrototypes(Prototypes);
SmallVector<PrototypeDescriptor> SuffixDesc = parsePrototypes(SuffixProto);
SmallVector<PrototypeDescriptor> OverloadedSuffixDesc =
parsePrototypes(OverloadedSuffixProto);
// Compute Builtin types
- SmallVector<PrototypeDescriptor> MaskedPrototype = Prototype;
- if (HasMasked) {
- // If HasMaskedOffOperand, insert result type as first input operand.
- if (HasMaskedOffOperand) {
- if (NF == 1) {
- MaskedPrototype.insert(MaskedPrototype.begin() + 1, Prototype[0]);
- } else {
- // Convert
- // (void, op0 address, op1 address, ...)
- // to
- // (void, op0 address, op1 address, ..., maskedoff0, maskedoff1, ...)
- PrototypeDescriptor MaskoffType = Prototype[1];
- MaskoffType.TM &= ~static_cast<uint8_t>(TypeModifier::Pointer);
- for (unsigned I = 0; I < NF; ++I)
- MaskedPrototype.insert(MaskedPrototype.begin() + NF + 1,
- MaskoffType);
- }
- }
- if (HasMaskedOffOperand && NF > 1) {
- // Convert
- // (void, op0 address, op1 address, ..., maskedoff0, maskedoff1, ...)
- // to
- // (void, op0 address, op1 address, ..., mask, maskedoff0, maskedoff1,
- // ...)
- MaskedPrototype.insert(MaskedPrototype.begin() + NF + 1,
- PrototypeDescriptor::Mask);
- } else {
- // If HasMasked, insert PrototypeDescriptor:Mask as first input operand.
- MaskedPrototype.insert(MaskedPrototype.begin() + 1,
- PrototypeDescriptor::Mask);
- }
- }
- // If HasVL, append PrototypeDescriptor:VL to last operand
- if (HasVL) {
- Prototype.push_back(PrototypeDescriptor::VL);
- MaskedPrototype.push_back(PrototypeDescriptor::VL);
- }
+ auto Prototype = RVVIntrinsic::computeBuiltinTypes(
+ BasicPrototype, /*IsMasked=*/false, /*HasMaskedOffOperand=*/false,
+ HasVL, NF);
+ auto MaskedPrototype = RVVIntrinsic::computeBuiltinTypes(
+ BasicPrototype, /*IsMasked=*/true, HasMaskedOffOperand, HasVL, NF);
// Create Intrinsics for each type and LMUL.
for (char I : TypeRange) {
@@ -487,7 +531,7 @@ void RVVEmitter::createRVVIntrinsics(
Out.push_back(std::make_unique<RVVIntrinsic>(
Name, SuffixStr, OverloadedName, OverloadedSuffixStr, IRName,
/*IsMasked=*/false, /*HasMaskedOffOperand=*/false, HasVL,
- UnMaskedPolicy, HasUnMaskedOverloaded, HasBuiltinAlias,
+ UnMaskedPolicyScheme, HasUnMaskedOverloaded, HasBuiltinAlias,
ManualCodegen, *Types, IntrinsicTypes, RequiredFeatures, NF));
if (HasMasked) {
// Create a masked intrinsic
@@ -496,12 +540,57 @@ void RVVEmitter::createRVVIntrinsics(
Out.push_back(std::make_unique<RVVIntrinsic>(
Name, SuffixStr, OverloadedName, OverloadedSuffixStr,
MaskedIRName,
- /*IsMasked=*/true, HasMaskedOffOperand, HasVL, MaskedPolicy,
+ /*IsMasked=*/true, HasMaskedOffOperand, HasVL, MaskedPolicyScheme,
HasUnMaskedOverloaded, HasBuiltinAlias, MaskedManualCodegen,
*MaskTypes, IntrinsicTypes, RequiredFeatures, NF));
}
} // end for Log2LMULList
} // end for TypeRange
+
+ // We don't emit vsetvli and vsetvlimax for SemaRecord.
+ // They are written in riscv_vector.td and will emit those marco define in
+ // riscv_vector.h
+ if (Name == "vsetvli" || Name == "vsetvlimax")
+ continue;
+
+ if (!SemaRecords)
+ continue;
+
+ // Create SemaRecord
+ SemaRecord SR;
+ SR.Name = Name.str();
+ SR.OverloadedName = OverloadedName.str();
+ BasicType TypeRangeMask = BasicType::Unknown;
+ for (char I : TypeRange)
+ TypeRangeMask |= ParseBasicType(I);
+
+ SR.TypeRangeMask = static_cast<unsigned>(TypeRangeMask);
+
+ unsigned Log2LMULMask = 0;
+ for (int Log2LMUL : Log2LMULList)
+ Log2LMULMask |= 1 << (Log2LMUL + 3);
+
+ SR.Log2LMULMask = Log2LMULMask;
+
+ SR.RequiredExtensions = 0;
+ for (auto RequiredFeature : RequiredFeatures) {
+ RVVRequire RequireExt = StringSwitch<RVVRequire>(RequiredFeature)
+ .Case("RV64", RVV_REQ_RV64)
+ .Case("FullMultiply", RVV_REQ_FullMultiply)
+ .Default(RVV_REQ_None);
+ assert(RequireExt != RVV_REQ_None && "Unrecognized required feature?");
+ SR.RequiredExtensions |= RequireExt;
+ }
+
+ SR.NF = NF;
+ SR.HasMasked = HasMasked;
+ SR.HasVL = HasVL;
+ SR.HasMaskedOffOperand = HasMaskedOffOperand;
+ SR.Prototype = std::move(BasicPrototype);
+ SR.Suffix = parsePrototypes(SuffixProto);
+ SR.OverloadedSuffix = parsePrototypes(OverloadedSuffixProto);
+
+ SemaRecords->push_back(SR);
}
}
@@ -514,47 +603,59 @@ void RVVEmitter::printHeaderCode(raw_ostream &OS) {
}
}
-void RVVEmitter::emitArchMacroAndBody(
- std::vector<std::unique_ptr<RVVIntrinsic>> &Defs, raw_ostream &OS,
- std::function<void(raw_ostream &, const RVVIntrinsic &)> PrintBody) {
- RISCVPredefinedMacroT PrevMacros =
- (*Defs.begin())->getRISCVPredefinedMacros();
- bool NeedEndif = emitMacroRestrictionStr(PrevMacros, OS);
- for (auto &Def : Defs) {
- RISCVPredefinedMacroT CurMacros = Def->getRISCVPredefinedMacros();
- if (CurMacros != PrevMacros) {
- if (NeedEndif)
- OS << "#endif\n\n";
- NeedEndif = emitMacroRestrictionStr(CurMacros, OS);
- PrevMacros = CurMacros;
- }
- if (Def->hasBuiltinAlias())
- PrintBody(OS, *Def);
+void RVVEmitter::createRVVIntrinsicRecords(std::vector<RVVIntrinsicRecord> &Out,
+ SemaSignatureTable &SST,
+ ArrayRef<SemaRecord> SemaRecords) {
+ SST.init(SemaRecords);
+
+ for (const auto &SR : SemaRecords) {
+ Out.emplace_back(RVVIntrinsicRecord());
+ RVVIntrinsicRecord &R = Out.back();
+ R.Name = SR.Name.c_str();
+ R.OverloadedName = SR.OverloadedName.c_str();
+ R.PrototypeIndex = SST.getIndex(SR.Prototype);
+ R.SuffixIndex = SST.getIndex(SR.Suffix);
+ R.OverloadedSuffixIndex = SST.getIndex(SR.OverloadedSuffix);
+ R.PrototypeLength = SR.Prototype.size();
+ R.SuffixLength = SR.Suffix.size();
+ R.OverloadedSuffixSize = SR.OverloadedSuffix.size();
+ R.RequiredExtensions = SR.RequiredExtensions;
+ R.TypeRangeMask = SR.TypeRangeMask;
+ R.Log2LMULMask = SR.Log2LMULMask;
+ R.NF = SR.NF;
+ R.HasMasked = SR.HasMasked;
+ R.HasVL = SR.HasVL;
+ R.HasMaskedOffOperand = SR.HasMaskedOffOperand;
+
+ assert(R.PrototypeIndex !=
+ static_cast<uint16_t>(SemaSignatureTable::INVALID_INDEX));
+ assert(R.SuffixIndex !=
+ static_cast<uint16_t>(SemaSignatureTable::INVALID_INDEX));
+ assert(R.OverloadedSuffixIndex !=
+ static_cast<uint16_t>(SemaSignatureTable::INVALID_INDEX));
}
- if (NeedEndif)
- OS << "#endif\n\n";
}
-bool RVVEmitter::emitMacroRestrictionStr(RISCVPredefinedMacroT PredefinedMacros,
- raw_ostream &OS) {
- if (PredefinedMacros == RISCVPredefinedMacro::Basic)
- return false;
- OS << "#if ";
- ListSeparator LS(" && ");
- if (PredefinedMacros & RISCVPredefinedMacro::V)
- OS << LS << "defined(__riscv_v)";
- if (PredefinedMacros & RISCVPredefinedMacro::Zvfh)
- OS << LS << "defined(__riscv_zvfh)";
- if (PredefinedMacros & RISCVPredefinedMacro::RV64)
- OS << LS << "(__riscv_xlen == 64)";
- if (PredefinedMacros & RISCVPredefinedMacro::VectorMaxELen64)
- OS << LS << "(__riscv_v_elen >= 64)";
- if (PredefinedMacros & RISCVPredefinedMacro::VectorMaxELenFp32)
- OS << LS << "(__riscv_v_elen_fp >= 32)";
- if (PredefinedMacros & RISCVPredefinedMacro::VectorMaxELenFp64)
- OS << LS << "(__riscv_v_elen_fp >= 64)";
- OS << "\n";
- return true;
+void RVVEmitter::createSema(raw_ostream &OS) {
+ std::vector<std::unique_ptr<RVVIntrinsic>> Defs;
+ std::vector<RVVIntrinsicRecord> RVVIntrinsicRecords;
+ SemaSignatureTable SST;
+ std::vector<SemaRecord> SemaRecords;
+
+ createRVVIntrinsics(Defs, &SemaRecords);
+
+ createRVVIntrinsicRecords(RVVIntrinsicRecords, SST, SemaRecords);
+
+ // Emit signature table for SemaRISCVVectorLookup.cpp.
+ OS << "#ifdef DECL_SIGNATURE_TABLE\n";
+ SST.print(OS);
+ OS << "#endif\n";
+
+ // Emit RVVIntrinsicRecords for SemaRISCVVectorLookup.cpp.
+ OS << "#ifdef DECL_INTRINSIC_RECORDS\n";
+ for (const RVVIntrinsicRecord &Record : RVVIntrinsicRecords)
+ OS << Record;
+ OS << "#endif\n";
}
namespace clang {
@@ -570,4 +671,8 @@ void EmitRVVBuiltinCG(RecordKeeper &Records, raw_ostream &OS) {
RVVEmitter(Records).createCodeGen(OS);
}
+void EmitRVVBuiltinSema(RecordKeeper &Records, raw_ostream &OS) {
+ RVVEmitter(Records).createSema(OS);
+}
+
} // End namespace clang
diff --git a/contrib/llvm-project/clang/utils/TableGen/TableGen.cpp b/contrib/llvm-project/clang/utils/TableGen/TableGen.cpp
index bb9366e2b7fc..d18a31226e80 100644
--- a/contrib/llvm-project/clang/utils/TableGen/TableGen.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/TableGen.cpp
@@ -88,6 +88,7 @@ enum ActionType {
GenRISCVVectorHeader,
GenRISCVVectorBuiltins,
GenRISCVVectorBuiltinCG,
+ GenRISCVVectorBuiltinSema,
GenAttrDocs,
GenDiagDocs,
GenOptDocs,
@@ -243,6 +244,8 @@ cl::opt<ActionType> Action(
"Generate riscv_vector_builtins.inc for clang"),
clEnumValN(GenRISCVVectorBuiltinCG, "gen-riscv-vector-builtin-codegen",
"Generate riscv_vector_builtin_cg.inc for clang"),
+ clEnumValN(GenRISCVVectorBuiltinSema, "gen-riscv-vector-builtin-sema",
+ "Generate riscv_vector_builtin_sema.inc for clang"),
clEnumValN(GenAttrDocs, "gen-attr-docs",
"Generate attribute documentation"),
clEnumValN(GenDiagDocs, "gen-diag-docs",
@@ -458,6 +461,9 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
case GenRISCVVectorBuiltinCG:
EmitRVVBuiltinCG(Records, OS);
break;
+ case GenRISCVVectorBuiltinSema:
+ EmitRVVBuiltinSema(Records, OS);
+ break;
case GenAttrDocs:
EmitClangAttrDocs(Records, OS);
break;
diff --git a/contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h b/contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h
index fd8b9fcda20f..2ba857f66f50 100644
--- a/contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h
+++ b/contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h
@@ -110,6 +110,7 @@ void EmitMveBuiltinAliases(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitRVVHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitRVVBuiltins(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitRVVBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitRVVBuiltinSema(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitCdeHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitCdeBuiltinDef(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/int_types.h b/contrib/llvm-project/compiler-rt/lib/builtins/int_types.h
index 7a72de480676..9ee5a327b28a 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/int_types.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/int_types.h
@@ -64,7 +64,7 @@ typedef union {
} udwords;
#if defined(__LP64__) || defined(__wasm__) || defined(__mips64) || \
- defined(__riscv) || defined(_WIN64)
+ defined(__riscv) || defined(_WIN64) || defined(__powerpc__)
#define CRT_HAS_128BIT
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h
index 517f776baf6e..08c6062ba067 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h
@@ -120,6 +120,11 @@ bool MprotectReadOnly(uptr addr, uptr size);
void MprotectMallocZones(void *addr, int prot);
+#if SANITIZER_WINDOWS
+// Zero previously mmap'd memory. Currently used only on Windows.
+bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) WARN_UNUSED_RESULT;
+#endif
+
#if SANITIZER_LINUX
// Unmap memory. Currently only used on Linux.
void UnmapFromTo(uptr from, uptr to);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
index b4506e52efaa..e0568c9b62d5 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
@@ -234,6 +234,17 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
return (void *)mapped_addr;
}
+// ZeroMmapFixedRegion zero's out a region of memory previously returned from a
+// call to one of the MmapFixed* helpers. On non-windows systems this would be
+// done with another mmap, but on windows remapping is not an option.
+// VirtualFree(DECOMMIT)+VirtualAlloc(RECOMMIT) would also be a way to zero the
+// memory, but we can't do this atomically, so instead we fall back to using
+// internal_memset.
+bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) {
+ internal_memset((void*) fixed_addr, 0, size);
+ return true;
+}
+
bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
// FIXME: is this really "NoReserve"? On Win32 this does not matter much,
// but on Win64 it does.
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.inc b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.inc
index c6a5fb8bc984..731d776cc893 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.inc
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.inc
@@ -23,10 +23,6 @@ TSAN_FLAG(bool, enable_annotations, true,
TSAN_FLAG(bool, suppress_equal_stacks, true,
"Suppress a race report if we've already output another race report "
"with the same stack.")
-TSAN_FLAG(bool, suppress_equal_addresses, true,
- "Suppress a race report if we've already output another race report "
- "on the same address.")
-
TSAN_FLAG(bool, report_bugs, true,
"Turns off bug reporting entirely (useful for benchmarking).")
TSAN_FLAG(bool, report_thread_leaks, true, "Report thread leaks at exit?")
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h
index 12643f7f512c..7c13c7335136 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h
@@ -394,6 +394,7 @@ struct MappingGo48 {
0300 0000 0000 - 0700 0000 0000: -
0700 0000 0000 - 0770 0000 0000: metainfo (memory blocks and sync objects)
07d0 0000 0000 - 8000 0000 0000: -
+PIE binaries currently not supported, but it should be theoretically possible.
*/
struct MappingGoWindows {
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
index 825a9d791ecc..ff3bb33eb134 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
@@ -197,13 +197,30 @@ static void DoResetImpl(uptr epoch) {
}
DPrintf("Resetting shadow...\n");
- if (!MmapFixedSuperNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(),
- "shadow")) {
+ auto shadow_begin = ShadowBeg();
+ auto shadow_end = ShadowEnd();
+#if SANITIZER_GO
+ CHECK_NE(0, ctx->mapped_shadow_begin);
+ shadow_begin = ctx->mapped_shadow_begin;
+ shadow_end = ctx->mapped_shadow_end;
+ VPrintf(2, "shadow_begin-shadow_end: (0x%zx-0x%zx)\n",
+ shadow_begin, shadow_end);
+#endif
+
+#if SANITIZER_WINDOWS
+ auto resetFailed =
+ !ZeroMmapFixedRegion(shadow_begin, shadow_end - shadow_begin);
+#else
+ auto resetFailed =
+ !MmapFixedSuperNoReserve(shadow_begin, shadow_end-shadow_begin, "shadow");
+#endif
+ if (resetFailed) {
Printf("failed to reset shadow memory\n");
Die();
}
DPrintf("Resetting meta shadow...\n");
ctx->metamap.ResetClocks();
+ StoreShadow(&ctx->last_spurious_race, Shadow::kEmpty);
ctx->resetting = false;
}
@@ -368,7 +385,6 @@ Context::Context()
}),
racy_mtx(MutexTypeRacy),
racy_stacks(),
- racy_addresses(),
fired_suppressions_mtx(MutexTypeFired),
slot_mtx(MutexTypeSlots),
resetting() {
@@ -557,18 +573,50 @@ void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
#endif
void MapShadow(uptr addr, uptr size) {
+ // Ensure thead registry lock held, so as to synchronize
+ // with DoReset, which also access the mapped_shadow_* ctxt fields.
+ ThreadRegistryLock lock0(&ctx->thread_registry);
+ static bool data_mapped = false;
+
+#if !SANITIZER_GO
// Global data is not 64K aligned, but there are no adjacent mappings,
// so we can get away with unaligned mapping.
// CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
const uptr kPageSize = GetPageSizeCached();
uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
- if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
- "shadow"))
+ if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
Die();
+#else
+ uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), (64 << 10));
+ uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), (64 << 10));
+ VPrintf(2, "MapShadow for (0x%zx-0x%zx), begin/end: (0x%zx-0x%zx)\n",
+ addr, addr + size, shadow_begin, shadow_end);
+
+ if (!data_mapped) {
+ // First call maps data+bss.
+ if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
+ Die();
+ } else {
+ VPrintf(2, "ctx->mapped_shadow_{begin,end} = (0x%zx-0x%zx)\n",
+ ctx->mapped_shadow_begin, ctx->mapped_shadow_end);
+ // Second and subsequent calls map heap.
+ if (shadow_end <= ctx->mapped_shadow_end)
+ return;
+ if (ctx->mapped_shadow_begin < shadow_begin)
+ ctx->mapped_shadow_begin = shadow_begin;
+ if (shadow_begin < ctx->mapped_shadow_end)
+ shadow_begin = ctx->mapped_shadow_end;
+ VPrintf(2, "MapShadow begin/end = (0x%zx-0x%zx)\n",
+ shadow_begin, shadow_end);
+ if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
+ "shadow"))
+ Die();
+ ctx->mapped_shadow_end = shadow_end;
+ }
+#endif
// Meta shadow is 2:1, so tread carefully.
- static bool data_mapped = false;
static uptr mapped_meta_end = 0;
uptr meta_begin = (uptr)MemToMeta(addr);
uptr meta_end = (uptr)MemToMeta(addr + size);
@@ -585,8 +633,7 @@ void MapShadow(uptr addr, uptr size) {
// Windows wants 64K alignment.
meta_begin = RoundDownTo(meta_begin, 64 << 10);
meta_end = RoundUpTo(meta_end, 64 << 10);
- if (meta_end <= mapped_meta_end)
- return;
+ CHECK_GT(meta_end, mapped_meta_end);
if (meta_begin < mapped_meta_end)
meta_begin = mapped_meta_end;
if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h
index c8d3c48a0c0c..e1e121e2ee07 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h
@@ -314,9 +314,43 @@ struct Context {
ThreadRegistry thread_registry;
+ // This is used to prevent a very unlikely but very pathological behavior.
+ // Since memory access handling is not synchronized with DoReset,
+ // a thread running concurrently with DoReset can leave a bogus shadow value
+ // that will be later falsely detected as a race. For such false races
+ // RestoreStack will return false and we will not report it.
+ // However, consider that a thread leaves a whole lot of such bogus values
+ // and these values are later read by a whole lot of threads.
+ // This will cause massive amounts of ReportRace calls and lots of
+ // serialization. In very pathological cases the resulting slowdown
+ // can be >100x. This is very unlikely, but it was presumably observed
+ // in practice: https://github.com/google/sanitizers/issues/1552
+ // If this happens, previous access sid+epoch will be the same for all of
+ // these false races b/c if the thread will try to increment epoch, it will
+ // notice that DoReset has happened and will stop producing bogus shadow
+ // values. So, last_spurious_race is used to remember the last sid+epoch
+ // for which RestoreStack returned false. Then it is used to filter out
+ // races with the same sid+epoch very early and quickly.
+ // It is of course possible that multiple threads left multiple bogus shadow
+ // values and all of them are read by lots of threads at the same time.
+ // In such case last_spurious_race will only be able to deduplicate a few
+ // races from one thread, then few from another and so on. An alternative
+ // would be to hold an array of such sid+epoch, but we consider such scenario
+ // as even less likely.
+ // Note: this can lead to some rare false negatives as well:
+ // 1. When a legit access with the same sid+epoch participates in a race
+ // as the "previous" memory access, it will be wrongly filtered out.
+ // 2. When RestoreStack returns false for a legit memory access because it
+ // was already evicted from the thread trace, we will still remember it in
+ // last_spurious_race. Then if there is another racing memory access from
+ // the same thread that happened in the same epoch, but was stored in the
+ // next thread trace part (which is still preserved in the thread trace),
+ // we will also wrongly filter it out while RestoreStack would actually
+ // succeed for that second memory access.
+ RawShadow last_spurious_race;
+
Mutex racy_mtx;
Vector<RacyStacks> racy_stacks;
- Vector<RacyAddress> racy_addresses;
// Number of fired suppressions may be large enough.
Mutex fired_suppressions_mtx;
InternalMmapVector<FiredSuppression> fired_suppressions;
@@ -338,6 +372,10 @@ struct Context {
uptr trace_part_total_allocated SANITIZER_GUARDED_BY(slot_mtx);
uptr trace_part_recycle_finished SANITIZER_GUARDED_BY(slot_mtx);
uptr trace_part_finished_excess SANITIZER_GUARDED_BY(slot_mtx);
+#if SANITIZER_GO
+ uptr mapped_shadow_begin;
+ uptr mapped_shadow_end;
+#endif
};
extern Context *ctx; // The one and the only global runtime context.
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
index 7d771bfaad7f..8b20984a0100 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
@@ -145,15 +145,6 @@ void TraceTime(ThreadState* thr) {
TraceEvent(thr, ev);
}
-ALWAYS_INLINE RawShadow LoadShadow(RawShadow* p) {
- return static_cast<RawShadow>(
- atomic_load((atomic_uint32_t*)p, memory_order_relaxed));
-}
-
-ALWAYS_INLINE void StoreShadow(RawShadow* sp, RawShadow s) {
- atomic_store((atomic_uint32_t*)sp, static_cast<u32>(s), memory_order_relaxed);
-}
-
NOINLINE void DoReportRace(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
Shadow old,
AccessType typ) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
index 4cf8816489df..444f210390cc 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
@@ -629,35 +629,6 @@ static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) {
return false;
}
-static bool FindRacyAddress(const RacyAddress &ra0) {
- for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
- RacyAddress ra2 = ctx->racy_addresses[i];
- uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
- uptr minend = min(ra0.addr_max, ra2.addr_max);
- if (maxbeg < minend) {
- VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
- return true;
- }
- }
- return false;
-}
-
-static bool HandleRacyAddress(ThreadState *thr, uptr addr_min, uptr addr_max) {
- if (!flags()->suppress_equal_addresses)
- return false;
- RacyAddress ra0 = {addr_min, addr_max};
- {
- ReadLock lock(&ctx->racy_mtx);
- if (FindRacyAddress(ra0))
- return true;
- }
- Lock lock(&ctx->racy_mtx);
- if (FindRacyAddress(ra0))
- return true;
- ctx->racy_addresses.PushBack(ra0);
- return false;
-}
-
bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
// These should have been checked in ShouldReport.
// It's too late to check them here, we have already taken locks.
@@ -730,6 +701,11 @@ static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
return false;
}
+static bool SpuriousRace(Shadow old) {
+ Shadow last(LoadShadow(&ctx->last_spurious_race));
+ return last.sid() == old.sid() && last.epoch() == old.epoch();
+}
+
void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
AccessType typ0) {
CheckedMutex::CheckNoLocks();
@@ -750,6 +726,8 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
((typ0 & kAccessAtomic) || (typ1 & kAccessAtomic)) &&
!(typ0 & kAccessFree) && !(typ1 & kAccessFree))
return;
+ if (SpuriousRace(old))
+ return;
const uptr kMop = 2;
Shadow s[kMop] = {cur, old};
@@ -761,8 +739,6 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
uptr addr_max = max(end0, end1);
if (IsExpectedReport(addr_min, addr_max - addr_min))
return;
- if (HandleRacyAddress(thr, addr_min, addr_max))
- return;
ReportType rep_typ = ReportTypeRace;
if ((typ0 & kAccessVptr) && (typ1 & kAccessFree))
@@ -791,9 +767,13 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx);
ThreadRegistryLock l0(&ctx->thread_registry);
Lock slots_lock(&ctx->slot_mtx);
+ if (SpuriousRace(old))
+ return;
if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1,
- size1, typ1, &tids[1], &traces[1], mset[1], &tags[1]))
+ size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) {
+ StoreShadow(&ctx->last_spurious_race, old.raw());
return;
+ }
if (IsFiredSuppression(ctx, rep_typ, traces[1]))
return;
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_shadow.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_shadow.h
index b222acf9e6c5..6b8114ef5132 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_shadow.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_shadow.h
@@ -178,6 +178,16 @@ class Shadow {
static_assert(sizeof(Shadow) == kShadowSize, "bad Shadow size");
+ALWAYS_INLINE RawShadow LoadShadow(RawShadow *p) {
+ return static_cast<RawShadow>(
+ atomic_load((atomic_uint32_t *)p, memory_order_relaxed));
+}
+
+ALWAYS_INLINE void StoreShadow(RawShadow *sp, RawShadow s) {
+ atomic_store((atomic_uint32_t *)sp, static_cast<u32>(s),
+ memory_order_relaxed);
+}
+
} // namespace __tsan
#endif
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/copy.h b/contrib/llvm-project/libcxx/include/__algorithm/copy.h
index 886a1ac6ce3e..5428baa68859 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/copy.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/copy.h
@@ -58,20 +58,20 @@ pair<_InValueT*, _OutValueT*> __copy_impl(_InValueT* __first, _InValueT* __last,
template <class _InIter, class _OutIter,
__enable_if_t<is_same<typename remove_const<__iter_value_type<_InIter> >::type, __iter_value_type<_OutIter> >::value
- && __is_cpp17_contiguous_iterator<_InIter>::value
- && __is_cpp17_contiguous_iterator<_OutIter>::value
- && is_trivially_copy_assignable<__iter_value_type<_OutIter> >::value, int> = 0>
+ && __is_cpp17_contiguous_iterator<typename _InIter::iterator_type>::value
+ && __is_cpp17_contiguous_iterator<typename _OutIter::iterator_type>::value
+ && is_trivially_copy_assignable<__iter_value_type<_OutIter> >::value
+ && __is_reverse_iterator<_InIter>::value
+ && __is_reverse_iterator<_OutIter>::value, int> = 0>
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11
-pair<reverse_iterator<_InIter>, reverse_iterator<_OutIter> >
-__copy_impl(reverse_iterator<_InIter> __first,
- reverse_iterator<_InIter> __last,
- reverse_iterator<_OutIter> __result) {
+pair<_InIter, _OutIter>
+__copy_impl(_InIter __first, _InIter __last, _OutIter __result) {
auto __first_base = std::__unwrap_iter(__first.base());
auto __last_base = std::__unwrap_iter(__last.base());
auto __result_base = std::__unwrap_iter(__result.base());
auto __result_first = __result_base - (__first_base - __last_base);
std::__copy_impl(__last_base, __first_base, __result_first);
- return std::make_pair(__last, reverse_iterator<_OutIter>(std::__rewrap_iter(__result.base(), __result_first)));
+ return std::make_pair(__last, _OutIter(std::__rewrap_iter(__result.base(), __result_first)));
}
template <class _InIter, class _Sent, class _OutIter,
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/copy_backward.h b/contrib/llvm-project/libcxx/include/__algorithm/copy_backward.h
index dd43a91ffa87..26b8c4d791fd 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/copy_backward.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/copy_backward.h
@@ -10,10 +10,16 @@
#define _LIBCPP___ALGORITHM_COPY_BACKWARD_H
#include <__algorithm/copy.h>
+#include <__algorithm/iterator_operations.h>
+#include <__algorithm/ranges_copy.h>
#include <__algorithm/unwrap_iter.h>
+#include <__concepts/same_as.h>
#include <__config>
#include <__iterator/iterator_traits.h>
#include <__iterator/reverse_iterator.h>
+#include <__ranges/subrange.h>
+#include <__utility/move.h>
+#include <__utility/pair.h>
#include <cstring>
#include <type_traits>
@@ -23,29 +29,31 @@
_LIBCPP_BEGIN_NAMESPACE_STD
-template <class _Iter1, class _Sent1, class _Iter2>
-inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11
-pair<_Iter1, _Iter2> __copy_backward_impl(_Iter1 __first, _Sent1 __last, _Iter2 __result) {
- auto __ret = std::__copy(reverse_iterator<_Iter1>(__last),
- reverse_iterator<_Sent1>(__first),
- reverse_iterator<_Iter2>(__result));
- return pair<_Iter1, _Iter2>(__ret.first.base(), __ret.second.base());
+template <class _AlgPolicy, class _InputIterator, class _OutputIterator,
+ __enable_if_t<is_same<_AlgPolicy, _ClassicAlgPolicy>::value, int> = 0>
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11 pair<_InputIterator, _OutputIterator>
+__copy_backward(_InputIterator __first, _InputIterator __last, _OutputIterator __result) {
+ auto __ret = std::__copy(
+ __unconstrained_reverse_iterator<_InputIterator>(__last),
+ __unconstrained_reverse_iterator<_InputIterator>(__first),
+ __unconstrained_reverse_iterator<_OutputIterator>(__result));
+ return pair<_InputIterator, _OutputIterator>(__ret.first.base(), __ret.second.base());
}
-template <class _Iter1, class _Sent1, class _Iter2>
-inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11
-pair<_Iter1, _Iter2> __copy_backward(_Iter1 __first, _Sent1 __last, _Iter2 __result) {
- auto __ret = std::__copy_backward_impl(std::__unwrap_iter(__first),
- std::__unwrap_iter(__last),
- std::__unwrap_iter(__result));
- return pair<_Iter1, _Iter2>(std::__rewrap_iter(__first, __ret.first), std::__rewrap_iter(__result, __ret.second));
+#if _LIBCPP_STD_VER > 17 && !defined(_LIBCPP_HAS_NO_INCOMPLETE_RANGES)
+template <class _AlgPolicy, class _Iter1, class _Sent1, class _Iter2,
+ __enable_if_t<is_same<_AlgPolicy, _RangeAlgPolicy>::value, int> = 0>
+_LIBCPP_HIDE_FROM_ABI constexpr pair<_Iter1, _Iter2> __copy_backward(_Iter1 __first, _Sent1 __last, _Iter2 __result) {
+ auto __reverse_range = std::__reverse_range(std::ranges::subrange(std::move(__first), std::move(__last)));
+ auto __ret = ranges::copy(std::move(__reverse_range), std::make_reverse_iterator(__result));
+ return std::make_pair(__ret.in.base(), __ret.out.base());
}
+#endif // _LIBCPP_STD_VER > 17 && !defined(_LIBCPP_HAS_NO_INCOMPLETE_RANGES)
template <class _BidirectionalIterator1, class _BidirectionalIterator2>
-inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX17
-_BidirectionalIterator2
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX17 _BidirectionalIterator2
copy_backward(_BidirectionalIterator1 __first, _BidirectionalIterator1 __last, _BidirectionalIterator2 __result) {
- return std::__copy_backward(__first, __last, __result).second;
+ return std::__copy_backward<_ClassicAlgPolicy>(__first, __last, __result).second;
}
_LIBCPP_END_NAMESPACE_STD
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/equal_range.h b/contrib/llvm-project/libcxx/include/__algorithm/equal_range.h
index 42d009ebbc0f..b11165baf384 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/equal_range.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/equal_range.h
@@ -21,6 +21,7 @@
#include <__iterator/advance.h>
#include <__iterator/distance.h>
#include <__iterator/iterator_traits.h>
+#include <__iterator/next.h>
#include <__type_traits/is_callable.h>
#include <__type_traits/is_copy_constructible.h>
#include <__utility/move.h>
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/includes.h b/contrib/llvm-project/libcxx/include/__algorithm/includes.h
index 102d3db39a2d..c64194a2c826 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/includes.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/includes.h
@@ -12,7 +12,10 @@
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
#include <__config>
+#include <__functional/identity.h>
+#include <__functional/invoke.h>
#include <__iterator/iterator_traits.h>
+#include <__type_traits/is_callable.h>
#include <__utility/move.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
@@ -21,13 +24,15 @@
_LIBCPP_BEGIN_NAMESPACE_STD
-template <class _Iter1, class _Sent1, class _Iter2, class _Sent2, class _Comp>
+template <class _Iter1, class _Sent1, class _Iter2, class _Sent2, class _Comp, class _Proj1, class _Proj2>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX17 bool
-__includes(_Iter1 __first1, _Sent1 __last1, _Iter2 __first2, _Sent2 __last2, _Comp&& __comp) {
+__includes(_Iter1 __first1, _Sent1 __last1, _Iter2 __first2, _Sent2 __last2,
+ _Comp&& __comp, _Proj1&& __proj1, _Proj2&& __proj2) {
for (; __first2 != __last2; ++__first1) {
- if (__first1 == __last1 || __comp(*__first2, *__first1))
+ if (__first1 == __last1 || std::__invoke(
+ __comp, std::__invoke(__proj2, *__first2), std::__invoke(__proj1, *__first1)))
return false;
- if (!__comp(*__first1, *__first2))
+ if (!std::__invoke(__comp, std::__invoke(__proj1, *__first1), std::__invoke(__proj2, *__first2)))
++__first2;
}
return true;
@@ -40,9 +45,13 @@ _LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX17
_InputIterator2 __first2,
_InputIterator2 __last2,
_Compare __comp) {
+ static_assert(__is_callable<_Compare, decltype(*__first1), decltype(*__first2)>::value,
+ "Comparator has to be callable");
+
typedef typename __comp_ref_type<_Compare>::type _Comp_ref;
return std::__includes(
- std::move(__first1), std::move(__last1), std::move(__first2), std::move(__last2), static_cast<_Comp_ref>(__comp));
+ std::move(__first1), std::move(__last1), std::move(__first2), std::move(__last2),
+ static_cast<_Comp_ref>(__comp), __identity(), __identity());
}
template <class _InputIterator1, class _InputIterator2>
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/inplace_merge.h b/contrib/llvm-project/libcxx/include/__algorithm/inplace_merge.h
index f4364969b8f9..cb662e791872 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/inplace_merge.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/inplace_merge.h
@@ -105,8 +105,8 @@ __buffered_inplace_merge(_BidirectionalIterator __first, _BidirectionalIterator
value_type* __p = __buff;
for (_BidirectionalIterator __i = __middle; __i != __last; __d.template __incr<value_type>(), (void) ++__i, (void) ++__p)
::new ((void*)__p) value_type(_IterOps<_AlgPolicy>::__iter_move(__i));
- typedef reverse_iterator<_BidirectionalIterator> _RBi;
- typedef reverse_iterator<value_type*> _Rv;
+ typedef __unconstrained_reverse_iterator<_BidirectionalIterator> _RBi;
+ typedef __unconstrained_reverse_iterator<value_type*> _Rv;
typedef __invert<_Compare> _Inverted;
std::__half_inplace_merge<_AlgPolicy, _Inverted>(_Rv(__p), _Rv(__buff),
_RBi(__middle), _RBi(__first),
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/is_heap.h b/contrib/llvm-project/libcxx/include/__algorithm/is_heap.h
index fe44e634f6dd..0d399a09b857 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/is_heap.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/is_heap.h
@@ -28,7 +28,7 @@ bool
is_heap(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp)
{
typedef typename __comp_ref_type<_Compare>::type _Comp_ref;
- return _VSTD::__is_heap_until<_Comp_ref>(__first, __last, __comp) == __last;
+ return std::__is_heap_until(__first, __last, static_cast<_Comp_ref>(__comp)) == __last;
}
template<class _RandomAccessIterator>
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/is_heap_until.h b/contrib/llvm-project/libcxx/include/__algorithm/is_heap_until.h
index 39f313eb0d3f..adb35af887fb 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/is_heap_until.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/is_heap_until.h
@@ -22,7 +22,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD
template <class _Compare, class _RandomAccessIterator>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX17 _RandomAccessIterator
-__is_heap_until(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp)
+__is_heap_until(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare&& __comp)
{
typedef typename iterator_traits<_RandomAccessIterator>::difference_type difference_type;
difference_type __len = __last - __first;
@@ -52,7 +52,7 @@ _LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX17
is_heap_until(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp)
{
typedef typename __comp_ref_type<_Compare>::type _Comp_ref;
- return _VSTD::__is_heap_until<_Comp_ref>(__first, __last, __comp);
+ return std::__is_heap_until(__first, __last, static_cast<_Comp_ref>(__comp));
}
template<class _RandomAccessIterator>
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/ranges_copy_backward.h b/contrib/llvm-project/libcxx/include/__algorithm/ranges_copy_backward.h
index 49c1b26add6d..673df8025fab 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/ranges_copy_backward.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/ranges_copy_backward.h
@@ -11,6 +11,7 @@
#include <__algorithm/copy_backward.h>
#include <__algorithm/in_out_result.h>
+#include <__algorithm/iterator_operations.h>
#include <__config>
#include <__iterator/concepts.h>
#include <__iterator/reverse_iterator.h>
@@ -39,7 +40,7 @@ struct __fn {
requires indirectly_copyable<_InIter1, _InIter2>
_LIBCPP_HIDE_FROM_ABI constexpr
copy_backward_result<_InIter1, _InIter2> operator()(_InIter1 __first, _Sent1 __last, _InIter2 __result) const {
- auto __ret = std::__copy_backward(std::move(__first), std::move(__last), std::move(__result));
+ auto __ret = std::__copy_backward<_RangeAlgPolicy>(std::move(__first), std::move(__last), std::move(__result));
return {std::move(__ret.first), std::move(__ret.second)};
}
@@ -47,9 +48,7 @@ struct __fn {
requires indirectly_copyable<iterator_t<_Range>, _Iter>
_LIBCPP_HIDE_FROM_ABI constexpr
copy_backward_result<borrowed_iterator_t<_Range>, _Iter> operator()(_Range&& __r, _Iter __result) const {
- auto __ret = std::__copy_backward(ranges::begin(__r),
- ranges::end(__r),
- std::move(__result));
+ auto __ret = std::__copy_backward<_RangeAlgPolicy>(ranges::begin(__r), ranges::end(__r), std::move(__result));
return {std::move(__ret.first), std::move(__ret.second)};
}
};
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/ranges_generate.h b/contrib/llvm-project/libcxx/include/__algorithm/ranges_generate.h
index c23645e6d906..149296574d80 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/ranges_generate.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/ranges_generate.h
@@ -9,21 +9,15 @@
#ifndef _LIBCPP___ALGORITHM_RANGES_GENERATE_H
#define _LIBCPP___ALGORITHM_RANGES_GENERATE_H
-#include <__algorithm/generate.h>
-#include <__algorithm/make_projected.h>
#include <__concepts/constructible.h>
#include <__concepts/invocable.h>
#include <__config>
-#include <__functional/identity.h>
#include <__functional/invoke.h>
-#include <__functional/ranges_operations.h>
#include <__iterator/concepts.h>
#include <__iterator/iterator_traits.h>
-#include <__iterator/projected.h>
#include <__ranges/access.h>
#include <__ranges/concepts.h>
#include <__ranges/dangling.h>
-#include <__utility/forward.h>
#include <__utility/move.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
@@ -39,22 +33,28 @@ namespace __generate {
struct __fn {
+ template <class _OutIter, class _Sent, class _Func>
+ _LIBCPP_HIDE_FROM_ABI constexpr
+ static _OutIter __generate_fn_impl(_OutIter __first, _Sent __last, _Func& __gen) {
+ for (; __first != __last; ++__first) {
+ *__first = __gen();
+ }
+
+ return __first;
+ }
+
template <input_or_output_iterator _OutIter, sentinel_for<_OutIter> _Sent, copy_constructible _Func>
requires invocable<_Func&> && indirectly_writable<_OutIter, invoke_result_t<_Func&>>
_LIBCPP_HIDE_FROM_ABI constexpr
_OutIter operator()(_OutIter __first, _Sent __last, _Func __gen) const {
- // TODO: implement
- (void)__first; (void)__last; (void)__gen;
- return {};
+ return __generate_fn_impl(std::move(__first), std::move(__last), __gen);
}
template <class _Range, copy_constructible _Func>
requires invocable<_Func&> && output_range<_Range, invoke_result_t<_Func&>>
_LIBCPP_HIDE_FROM_ABI constexpr
borrowed_iterator_t<_Range> operator()(_Range&& __range, _Func __gen) const {
- // TODO: implement
- (void)__range; (void)__gen;
- return {};
+ return __generate_fn_impl(ranges::begin(__range), ranges::end(__range), __gen);
}
};
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/ranges_generate_n.h b/contrib/llvm-project/libcxx/include/__algorithm/ranges_generate_n.h
index 7bde5fb4e579..63f466cecdd7 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/ranges_generate_n.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/ranges_generate_n.h
@@ -9,21 +9,16 @@
#ifndef _LIBCPP___ALGORITHM_RANGES_GENERATE_N_H
#define _LIBCPP___ALGORITHM_RANGES_GENERATE_N_H
-#include <__algorithm/generate_n.h>
-#include <__algorithm/make_projected.h>
#include <__concepts/constructible.h>
#include <__concepts/invocable.h>
#include <__config>
#include <__functional/identity.h>
#include <__functional/invoke.h>
-#include <__functional/ranges_operations.h>
#include <__iterator/concepts.h>
#include <__iterator/incrementable_traits.h>
#include <__iterator/iterator_traits.h>
-#include <__iterator/projected.h>
#include <__ranges/access.h>
#include <__ranges/concepts.h>
-#include <__utility/forward.h>
#include <__utility/move.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
@@ -43,9 +38,12 @@ struct __fn {
requires invocable<_Func&> && indirectly_writable<_OutIter, invoke_result_t<_Func&>>
_LIBCPP_HIDE_FROM_ABI constexpr
_OutIter operator()(_OutIter __first, iter_difference_t<_OutIter> __n, _Func __gen) const {
- // TODO: implement
- (void)__first; (void)__n; (void)__gen;
- return {};
+ for (; __n > 0; --__n) {
+ *__first = __gen();
+ ++__first;
+ }
+
+ return __first;
}
};
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/ranges_includes.h b/contrib/llvm-project/libcxx/include/__algorithm/ranges_includes.h
index ba054e6fd89d..2c7581af68af 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/ranges_includes.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/ranges_includes.h
@@ -56,7 +56,9 @@ struct __fn {
std::move(__last1),
std::move(__first2),
std::move(__last2),
- ranges::__make_projected_comp(__comp, __proj1, __proj2));
+ std::move(__comp),
+ std::move(__proj1),
+ std::move(__proj2));
}
template <
@@ -73,7 +75,9 @@ struct __fn {
ranges::end(__range1),
ranges::begin(__range2),
ranges::end(__range2),
- ranges::__make_projected_comp(__comp, __proj1, __proj2));
+ std::move(__comp),
+ std::move(__proj1),
+ std::move(__proj2));
}
};
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/ranges_is_heap.h b/contrib/llvm-project/libcxx/include/__algorithm/ranges_is_heap.h
index 00105189fed7..a3e86d1a8d72 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/ranges_is_heap.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/ranges_is_heap.h
@@ -9,18 +9,17 @@
#ifndef _LIBCPP___ALGORITHM_RANGES_IS_HEAP_H
#define _LIBCPP___ALGORITHM_RANGES_IS_HEAP_H
-#include <__algorithm/is_heap.h>
+#include <__algorithm/is_heap_until.h>
#include <__algorithm/make_projected.h>
#include <__config>
#include <__functional/identity.h>
-#include <__functional/invoke.h>
#include <__functional/ranges_operations.h>
#include <__iterator/concepts.h>
#include <__iterator/iterator_traits.h>
+#include <__iterator/next.h>
#include <__iterator/projected.h>
#include <__ranges/access.h>
#include <__ranges/concepts.h>
-#include <__utility/forward.h>
#include <__utility/move.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
@@ -36,22 +35,28 @@ namespace __is_heap {
struct __fn {
+ template <class _Iter, class _Sent, class _Proj, class _Comp>
+ _LIBCPP_HIDE_FROM_ABI constexpr
+ static bool __is_heap_fn_impl(_Iter __first, _Sent __last, _Comp& __comp, _Proj& __proj) {
+ auto __last_iter = ranges::next(__first, __last);
+ auto&& __projected_comp = ranges::__make_projected_comp(__comp, __proj);
+
+ auto __result = std::__is_heap_until(std::move(__first), std::move(__last_iter), __projected_comp);
+ return __result == __last;
+ }
+
template <random_access_iterator _Iter, sentinel_for<_Iter> _Sent, class _Proj = identity,
indirect_strict_weak_order<projected<_Iter, _Proj>> _Comp = ranges::less>
_LIBCPP_HIDE_FROM_ABI constexpr
bool operator()(_Iter __first, _Sent __last, _Comp __comp = {}, _Proj __proj = {}) const {
- // TODO: implement
- (void)__first; (void)__last; (void)__comp; (void)__proj;
- return {};
+ return __is_heap_fn_impl(std::move(__first), std::move(__last), __comp, __proj);
}
template <random_access_range _Range, class _Proj = identity,
indirect_strict_weak_order<projected<iterator_t<_Range>, _Proj>> _Comp = ranges::less>
_LIBCPP_HIDE_FROM_ABI constexpr
bool operator()(_Range&& __range, _Comp __comp = {}, _Proj __proj = {}) const {
- // TODO: implement
- (void)__range; (void)__comp; (void)__proj;
- return {};
+ return __is_heap_fn_impl(ranges::begin(__range), ranges::end(__range), __comp, __proj);
}
};
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/ranges_is_heap_until.h b/contrib/llvm-project/libcxx/include/__algorithm/ranges_is_heap_until.h
index ad021d6f2525..bcd33ad404e8 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/ranges_is_heap_until.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/ranges_is_heap_until.h
@@ -13,15 +13,14 @@
#include <__algorithm/make_projected.h>
#include <__config>
#include <__functional/identity.h>
-#include <__functional/invoke.h>
#include <__functional/ranges_operations.h>
#include <__iterator/concepts.h>
#include <__iterator/iterator_traits.h>
+#include <__iterator/next.h>
#include <__iterator/projected.h>
#include <__ranges/access.h>
#include <__ranges/concepts.h>
#include <__ranges/dangling.h>
-#include <__utility/forward.h>
#include <__utility/move.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
@@ -37,22 +36,27 @@ namespace __is_heap_until {
struct __fn {
+ template <class _Iter, class _Sent, class _Proj, class _Comp>
+ _LIBCPP_HIDE_FROM_ABI constexpr
+ static _Iter __is_heap_until_fn_impl(_Iter __first, _Sent __last, _Comp& __comp, _Proj& __proj) {
+ auto __last_iter = ranges::next(__first, __last);
+ auto&& __projected_comp = ranges::__make_projected_comp(__comp, __proj);
+
+ return std::__is_heap_until(std::move(__first), std::move(__last_iter), __projected_comp);
+ }
+
template <random_access_iterator _Iter, sentinel_for<_Iter> _Sent, class _Proj = identity,
indirect_strict_weak_order<projected<_Iter, _Proj>> _Comp = ranges::less>
_LIBCPP_HIDE_FROM_ABI constexpr
_Iter operator()(_Iter __first, _Sent __last, _Comp __comp = {}, _Proj __proj = {}) const {
- // TODO: implement
- (void)__first; (void)__last; (void)__comp; (void)__proj;
- return {};
+ return __is_heap_until_fn_impl(std::move(__first), std::move(__last), __comp, __proj);
}
template <random_access_range _Range, class _Proj = identity,
indirect_strict_weak_order<projected<iterator_t<_Range>, _Proj>> _Comp = ranges::less>
_LIBCPP_HIDE_FROM_ABI constexpr
borrowed_iterator_t<_Range> operator()(_Range&& __range, _Comp __comp = {}, _Proj __proj = {}) const {
- // TODO: implement
- (void)__range; (void)__comp; (void)__proj;
- return {};
+ return __is_heap_until_fn_impl(ranges::begin(__range), ranges::end(__range), __comp, __proj);
}
};
diff --git a/contrib/llvm-project/libcxx/include/__assert b/contrib/llvm-project/libcxx/include/__assert
index 84ddcd25b0b5..82db2cf052b5 100644
--- a/contrib/llvm-project/libcxx/include/__assert
+++ b/contrib/llvm-project/libcxx/include/__assert
@@ -45,7 +45,7 @@
# define _LIBCPP_ASSERT(expression, message) \
(__builtin_expect(static_cast<bool>(expression), 1) ? \
(void)0 : \
- ::std::__libcpp_assertion_handler(__FILE__, __LINE__, #expression, message))
+ ::std::__libcpp_assertion_handler("%s:%d: assertion %s failed: %s", __FILE__, __LINE__, #expression, message))
#elif !defined(_LIBCPP_ASSERTIONS_DISABLE_ASSUME) && __has_builtin(__builtin_assume)
# define _LIBCPP_ASSERT(expression, message) \
(_LIBCPP_DIAGNOSTIC_PUSH \
@@ -58,8 +58,8 @@
_LIBCPP_BEGIN_NAMESPACE_STD
-_LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_AVAILABILITY_ASSERTION_HANDLER
-void __libcpp_assertion_handler(char const* __file, int __line, char const* __expression, char const* __message);
+_LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_AVAILABILITY_ASSERTION_HANDLER _LIBCPP_ATTRIBUTE_FORMAT(__printf__, 1, 2)
+void __libcpp_assertion_handler(const char *__format, ...);
_LIBCPP_END_NAMESPACE_STD
diff --git a/contrib/llvm-project/libcxx/include/__concepts/arithmetic.h b/contrib/llvm-project/libcxx/include/__concepts/arithmetic.h
index 023f031e7e07..d91570f02b8b 100644
--- a/contrib/llvm-project/libcxx/include/__concepts/arithmetic.h
+++ b/contrib/llvm-project/libcxx/include/__concepts/arithmetic.h
@@ -10,6 +10,8 @@
#define _LIBCPP___CONCEPTS_ARITHMETIC_H
#include <__config>
+#include <__type_traits/is_signed_integer.h>
+#include <__type_traits/is_unsigned_integer.h>
#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__format/formatter_integer.h b/contrib/llvm-project/libcxx/include/__format/formatter_integer.h
index 0281b4f2fa67..3139c9efdf80 100644
--- a/contrib/llvm-project/libcxx/include/__format/formatter_integer.h
+++ b/contrib/llvm-project/libcxx/include/__format/formatter_integer.h
@@ -19,6 +19,7 @@
#include <__format/formatter_integral.h>
#include <__format/formatter_output.h>
#include <__format/parser_std_format_spec.h>
+#include <__type_traits/make_32_64_or_128_bit.h>
#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
diff --git a/contrib/llvm-project/libcxx/include/__functional/invoke.h b/contrib/llvm-project/libcxx/include/__functional/invoke.h
index 7381462ffca5..d56a0accdbce 100644
--- a/contrib/llvm-project/libcxx/include/__functional/invoke.h
+++ b/contrib/llvm-project/libcxx/include/__functional/invoke.h
@@ -24,6 +24,7 @@
#include <__type_traits/is_reference_wrapper.h>
#include <__type_traits/is_same.h>
#include <__type_traits/is_void.h>
+#include <__type_traits/nat.h>
#include <__type_traits/remove_cv.h>
#include <__utility/declval.h>
#include <__utility/forward.h>
@@ -41,16 +42,6 @@ struct __any
__any(...);
};
-struct __nat
-{
-#ifndef _LIBCPP_CXX03_LANG
- __nat() = delete;
- __nat(const __nat&) = delete;
- __nat& operator=(const __nat&) = delete;
- ~__nat() = delete;
-#endif
-};
-
template <class _MP, bool _IsMemberFunctionPtr, bool _IsMemberObjectPtr>
struct __member_pointer_traits_imp
{
diff --git a/contrib/llvm-project/libcxx/include/__hash_table b/contrib/llvm-project/libcxx/include/__hash_table
index 6123a310ad63..959ef7fe7d83 100644
--- a/contrib/llvm-project/libcxx/include/__hash_table
+++ b/contrib/llvm-project/libcxx/include/__hash_table
@@ -18,6 +18,7 @@
#include <__debug>
#include <__functional/hash.h>
#include <__iterator/iterator_traits.h>
+#include <__memory/swap_allocator.h>
#include <__utility/swap.h>
#include <cmath>
#include <initializer_list>
diff --git a/contrib/llvm-project/libcxx/include/__iterator/incrementable_traits.h b/contrib/llvm-project/libcxx/include/__iterator/incrementable_traits.h
index ef5f5110a30e..4109b58d46a9 100644
--- a/contrib/llvm-project/libcxx/include/__iterator/incrementable_traits.h
+++ b/contrib/llvm-project/libcxx/include/__iterator/incrementable_traits.h
@@ -11,6 +11,7 @@
#define _LIBCPP___ITERATOR_INCREMENTABLE_TRAITS_H
#include <__config>
+#include <__type_traits/is_primary_template.h>
#include <concepts>
#include <type_traits>
diff --git a/contrib/llvm-project/libcxx/include/__iterator/iterator_traits.h b/contrib/llvm-project/libcxx/include/__iterator/iterator_traits.h
index 63525e230add..254f8c2339e4 100644
--- a/contrib/llvm-project/libcxx/include/__iterator/iterator_traits.h
+++ b/contrib/llvm-project/libcxx/include/__iterator/iterator_traits.h
@@ -501,6 +501,12 @@ using __iter_to_alloc_type = pair<
typename add_const<typename iterator_traits<_InputIterator>::value_type::first_type>::type,
typename iterator_traits<_InputIterator>::value_type::second_type>;
+template <class _Iter>
+using __iterator_category_type = typename iterator_traits<_Iter>::iterator_category;
+
+template <class _Iter>
+using __iterator_pointer_type = typename iterator_traits<_Iter>::pointer;
+
_LIBCPP_END_NAMESPACE_STD
#endif // _LIBCPP___ITERATOR_ITERATOR_TRAITS_H
diff --git a/contrib/llvm-project/libcxx/include/__iterator/reverse_iterator.h b/contrib/llvm-project/libcxx/include/__iterator/reverse_iterator.h
index 7f4ef3c3d503..5c344c2ee310 100644
--- a/contrib/llvm-project/libcxx/include/__iterator/reverse_iterator.h
+++ b/contrib/llvm-project/libcxx/include/__iterator/reverse_iterator.h
@@ -197,6 +197,12 @@ public:
#endif // _LIBCPP_STD_VER > 17
};
+template <class _Iter>
+struct __is_reverse_iterator : false_type {};
+
+template <class _Iter>
+struct __is_reverse_iterator<reverse_iterator<_Iter> > : true_type {};
+
template <class _Iter1, class _Iter2>
inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14
bool
@@ -327,20 +333,162 @@ reverse_iterator<_Iter> make_reverse_iterator(_Iter __i)
}
#endif
+#if _LIBCPP_STD_VER <= 17
template <class _Iter>
-using _ReverseWrapper = reverse_iterator<reverse_iterator<_Iter> >;
+using __unconstrained_reverse_iterator = reverse_iterator<_Iter>;
+#else
-template <class _Iter, bool __b>
-struct __unwrap_iter_impl<_ReverseWrapper<_Iter>, __b> {
+// __unconstrained_reverse_iterator allows us to use reverse iterators in the implementation of algorithms by working
+// around a language issue in C++20.
+// In C++20, when a reverse iterator wraps certain C++20-hostile iterators, calling comparison operators on it will
+// result in a compilation error. However, calling comparison operators on the pristine hostile iterator is not
+// an error. Thus, we cannot use reverse_iterators in the implementation of an algorithm that accepts a
+// C++20-hostile iterator. This class is an internal workaround -- it is a copy of reverse_iterator with
+// tweaks to make it support hostile iterators.
+//
+// A C++20-hostile iterator is one that defines a comparison operator where one of the arguments is an exact match
+// and the other requires an implicit conversion, for example:
+// friend bool operator==(const BaseIter&, const DerivedIter&);
+//
+// C++20 rules for rewriting equality operators create another overload of this function with parameters reversed:
+// friend bool operator==(const DerivedIter&, const BaseIter&);
+//
+// This creates an ambiguity in overload resolution.
+//
+// Clang treats this ambiguity differently in different contexts. When operator== is actually called in the function
+// body, the code is accepted with a warning. When a concept requires operator== to be a valid expression, however,
+// it evaluates to false. Thus, the implementation of reverse_iterator::operator== can actually call operator== on its
+// base iterators, but the constraints on reverse_iterator::operator== prevent it from being considered during overload
+// resolution. This class simply removes the problematic constraints from comparison functions.
+template <class _Iter>
+class __unconstrained_reverse_iterator {
+ _Iter __iter_;
+
+public:
+ static_assert(__is_cpp17_bidirectional_iterator<_Iter>::value);
+
+ using iterator_type = _Iter;
+ using iterator_category =
+ _If<__is_cpp17_random_access_iterator<_Iter>::value, random_access_iterator_tag, __iterator_category_type<_Iter>>;
+ using pointer = __iterator_pointer_type<_Iter>;
+ using value_type = iter_value_t<_Iter>;
+ using difference_type = iter_difference_t<_Iter>;
+ using reference = iter_reference_t<_Iter>;
+
+ _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator() = default;
+ _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator(const __unconstrained_reverse_iterator&) = default;
+ _LIBCPP_HIDE_FROM_ABI constexpr explicit __unconstrained_reverse_iterator(_Iter __iter) : __iter_(__iter) {}
+
+ _LIBCPP_HIDE_FROM_ABI constexpr _Iter base() const { return __iter_; }
+ _LIBCPP_HIDE_FROM_ABI constexpr reference operator*() const {
+ auto __tmp = __iter_;
+ return *--__tmp;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI constexpr pointer operator->() const {
+ if constexpr (is_pointer_v<_Iter>) {
+ return std::prev(__iter_);
+ } else {
+ return std::prev(__iter_).operator->();
+ }
+ }
+
+ _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator& operator++() {
+ --__iter_;
+ return *this;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator operator++(int) {
+ auto __tmp = *this;
+ --__iter_;
+ return __tmp;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator& operator--() {
+ ++__iter_;
+ return *this;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator operator--(int) {
+ auto __tmp = *this;
+ ++__iter_;
+ return __tmp;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator& operator+=(difference_type __n) {
+ __iter_ -= __n;
+ return *this;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator& operator-=(difference_type __n) {
+ __iter_ += __n;
+ return *this;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator operator+(difference_type __n) const {
+ return __unconstrained_reverse_iterator(__iter_ - __n);
+ }
+
+ _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator operator-(difference_type __n) const {
+ return __unconstrained_reverse_iterator(__iter_ + __n);
+ }
+
+ _LIBCPP_HIDE_FROM_ABI constexpr difference_type operator-(const __unconstrained_reverse_iterator& __other) const {
+ return __other.__iter_ - __iter_;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI constexpr auto operator[](difference_type __n) const { return *(*this + __n); }
+
+ // Deliberately unconstrained unlike the comparison functions in `reverse_iterator` -- see the class comment for the
+ // rationale.
+ _LIBCPP_HIDE_FROM_ABI friend constexpr bool
+ operator==(const __unconstrained_reverse_iterator& __lhs, const __unconstrained_reverse_iterator& __rhs) {
+ return __lhs.base() == __rhs.base();
+ }
+
+ _LIBCPP_HIDE_FROM_ABI friend constexpr bool
+ operator!=(const __unconstrained_reverse_iterator& __lhs, const __unconstrained_reverse_iterator& __rhs) {
+ return __lhs.base() != __rhs.base();
+ }
+
+ _LIBCPP_HIDE_FROM_ABI friend constexpr bool
+ operator<(const __unconstrained_reverse_iterator& __lhs, const __unconstrained_reverse_iterator& __rhs) {
+ return __lhs.base() > __rhs.base();
+ }
+
+ _LIBCPP_HIDE_FROM_ABI friend constexpr bool
+ operator>(const __unconstrained_reverse_iterator& __lhs, const __unconstrained_reverse_iterator& __rhs) {
+ return __lhs.base() < __rhs.base();
+ }
+
+ _LIBCPP_HIDE_FROM_ABI friend constexpr bool
+ operator<=(const __unconstrained_reverse_iterator& __lhs, const __unconstrained_reverse_iterator& __rhs) {
+ return __lhs.base() >= __rhs.base();
+ }
+
+ _LIBCPP_HIDE_FROM_ABI friend constexpr bool
+ operator>=(const __unconstrained_reverse_iterator& __lhs, const __unconstrained_reverse_iterator& __rhs) {
+ return __lhs.base() <= __rhs.base();
+ }
+};
+
+template <class _Iter>
+struct __is_reverse_iterator<__unconstrained_reverse_iterator<_Iter>> : true_type {};
+
+#endif // _LIBCPP_STD_VER <= 17
+
+template <template <class> class _RevIter1, template <class> class _RevIter2, class _Iter>
+struct __unwrap_reverse_iter_impl {
using _UnwrappedIter = decltype(__unwrap_iter_impl<_Iter>::__unwrap(std::declval<_Iter>()));
+ using _ReverseWrapper = _RevIter1<_RevIter2<_Iter> >;
- static _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _ReverseWrapper<_Iter>
- __rewrap(_ReverseWrapper<_Iter> __orig_iter, _UnwrappedIter __unwrapped_iter) {
- return _ReverseWrapper<_Iter>(
- reverse_iterator<_Iter>(__unwrap_iter_impl<_Iter>::__rewrap(__orig_iter.base().base(), __unwrapped_iter)));
+ static _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _ReverseWrapper
+ __rewrap(_ReverseWrapper __orig_iter, _UnwrappedIter __unwrapped_iter) {
+ return _ReverseWrapper(
+ _RevIter2<_Iter>(__unwrap_iter_impl<_Iter>::__rewrap(__orig_iter.base().base(), __unwrapped_iter)));
}
- static _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _UnwrappedIter __unwrap(_ReverseWrapper<_Iter> __i) _NOEXCEPT {
+ static _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _UnwrappedIter __unwrap(_ReverseWrapper __i) _NOEXCEPT {
return __unwrap_iter_impl<_Iter>::__unwrap(__i.base().base());
}
};
@@ -355,6 +503,26 @@ _LIBCPP_HIDE_FROM_ABI constexpr ranges::
}
#endif
+template <class _Iter, bool __b>
+struct __unwrap_iter_impl<reverse_iterator<reverse_iterator<_Iter> >, __b>
+ : __unwrap_reverse_iter_impl<reverse_iterator, reverse_iterator, _Iter> {};
+
+#if _LIBCPP_STD_VER > 17
+
+template <class _Iter, bool __b>
+struct __unwrap_iter_impl<reverse_iterator<__unconstrained_reverse_iterator<_Iter>>, __b>
+ : __unwrap_reverse_iter_impl<reverse_iterator, __unconstrained_reverse_iterator, _Iter> {};
+
+template <class _Iter, bool __b>
+struct __unwrap_iter_impl<__unconstrained_reverse_iterator<reverse_iterator<_Iter>>, __b>
+ : __unwrap_reverse_iter_impl<__unconstrained_reverse_iterator, reverse_iterator, _Iter> {};
+
+template <class _Iter, bool __b>
+struct __unwrap_iter_impl<__unconstrained_reverse_iterator<__unconstrained_reverse_iterator<_Iter>>, __b>
+ : __unwrap_reverse_iter_impl<__unconstrained_reverse_iterator, __unconstrained_reverse_iterator, _Iter> {};
+
+#endif // _LIBCPP_STD_VER > 17
+
_LIBCPP_END_NAMESPACE_STD
#endif // _LIBCPP___ITERATOR_REVERSE_ITERATOR_H
diff --git a/contrib/llvm-project/libcxx/include/__memory/swap_allocator.h b/contrib/llvm-project/libcxx/include/__memory/swap_allocator.h
new file mode 100644
index 000000000000..64970fa9e2f4
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__memory/swap_allocator.h
@@ -0,0 +1,53 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___MEMORY_SWAP_ALLOCATOR_H
+#define _LIBCPP___MEMORY_SWAP_ALLOCATOR_H
+
+#include <__config>
+#include <__memory/allocator_traits.h>
+#include <__type_traits/integral_constant.h>
+#include <__utility/swap.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <typename _Alloc>
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11 void __swap_allocator(_Alloc& __a1, _Alloc& __a2, true_type)
+#if _LIBCPP_STD_VER > 11
+ _NOEXCEPT
+#else
+ _NOEXCEPT_(__is_nothrow_swappable<_Alloc>::value)
+#endif
+{
+ using _VSTD::swap;
+ swap(__a1, __a2);
+}
+
+template <typename _Alloc>
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11 void
+__swap_allocator(_Alloc&, _Alloc&, false_type) _NOEXCEPT {}
+
+template <typename _Alloc>
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11 void __swap_allocator(_Alloc& __a1, _Alloc& __a2)
+#if _LIBCPP_STD_VER > 11
+ _NOEXCEPT
+#else
+ _NOEXCEPT_(__is_nothrow_swappable<_Alloc>::value)
+#endif
+{
+ _VSTD::__swap_allocator(
+ __a1, __a2, integral_constant<bool, allocator_traits<_Alloc>::propagate_on_container_swap::value>());
+}
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___MEMORY_SWAP_ALLOCATOR_H
diff --git a/contrib/llvm-project/libcxx/include/__memory/temporary_buffer.h b/contrib/llvm-project/libcxx/include/__memory/temporary_buffer.h
index 9822bd30c826..b1f7a126299f 100644
--- a/contrib/llvm-project/libcxx/include/__memory/temporary_buffer.h
+++ b/contrib/llvm-project/libcxx/include/__memory/temporary_buffer.h
@@ -11,6 +11,7 @@
#define _LIBCPP___MEMORY_TEMPORARY_BUFFER_H
#include <__config>
+#include <__type_traits/alignment_of.h>
#include <__utility/pair.h>
#include <cstddef>
#include <new>
diff --git a/contrib/llvm-project/libcxx/include/__memory/uninitialized_algorithms.h b/contrib/llvm-project/libcxx/include/__memory/uninitialized_algorithms.h
index 3a8560f080c6..72b6890c2225 100644
--- a/contrib/llvm-project/libcxx/include/__memory/uninitialized_algorithms.h
+++ b/contrib/llvm-project/libcxx/include/__memory/uninitialized_algorithms.h
@@ -10,12 +10,17 @@
#ifndef _LIBCPP___MEMORY_UNINITIALIZED_ALGORITHMS_H
#define _LIBCPP___MEMORY_UNINITIALIZED_ALGORITHMS_H
+#include <__algorithm/copy.h>
+#include <__algorithm/move.h>
#include <__config>
#include <__iterator/iterator_traits.h>
+#include <__iterator/reverse_iterator.h>
#include <__memory/addressof.h>
#include <__memory/allocator_traits.h>
#include <__memory/construct_at.h>
+#include <__memory/pointer_traits.h>
#include <__memory/voidify.h>
+#include <__type_traits/is_constant_evaluated.h>
#include <__utility/move.h>
#include <__utility/pair.h>
#include <__utility/transaction.h>
@@ -347,6 +352,7 @@ uninitialized_move_n(_InputIterator __ifirst, _Size __n, _ForwardIterator __ofir
__unreachable_sentinel(), __iter_move);
}
+// TODO: Rewrite this to iterate left to right and use reverse_iterators when calling
// Destroys every element in the range [first, last) FROM RIGHT TO LEFT using allocator
// destruction. If elements are themselves C-style arrays, they are recursively destroyed
// in the same manner.
@@ -492,6 +498,144 @@ constexpr void __uninitialized_allocator_value_construct_n(_Alloc& __alloc, _Bid
#endif // _LIBCPP_STD_VER > 14
+// Destroy all elements in [__first, __last) from left to right using allocator destruction.
+template <class _Alloc, class _Iter, class _Sent>
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX17 void
+__allocator_destroy(_Alloc& __alloc, _Iter __first, _Sent __last) {
+ for (; __first != __last; ++__first)
+ allocator_traits<_Alloc>::destroy(__alloc, std::__to_address(__first));
+}
+
+template <class _Alloc, class _Iter>
+class _AllocatorDestroyRangeReverse {
+public:
+ _LIBCPP_HIDE_FROM_ABI _AllocatorDestroyRangeReverse(_Alloc& __alloc, _Iter& __first, _Iter& __last)
+ : __alloc_(__alloc), __first_(__first), __last_(__last) {}
+
+ _LIBCPP_CONSTEXPR_AFTER_CXX11 void operator()() const {
+ std::__allocator_destroy(__alloc_, std::reverse_iterator<_Iter>(__last_), std::reverse_iterator<_Iter>(__first_));
+ }
+
+private:
+ _Alloc& __alloc_;
+ _Iter& __first_;
+ _Iter& __last_;
+};
+
+// Copy-construct [__first1, __last1) in [__first2, __first2 + N), where N is distance(__first1, __last1).
+//
+// The caller has to ensure that __first2 can hold at least N uninitialized elements. If an exception is thrown the
+// already copied elements are destroyed in reverse order of their construction.
+template <class _Alloc, class _Iter1, class _Sent1, class _Iter2>
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX17 _Iter2
+__uninitialized_allocator_copy(_Alloc& __alloc, _Iter1 __first1, _Sent1 __last1, _Iter2 __first2) {
+#ifndef _LIBCPP_NO_EXCEPTIONS
+ auto __destruct_first = __first2;
+ try {
+#endif
+ while (__first1 != __last1) {
+ allocator_traits<_Alloc>::construct(__alloc, std::__to_address(__first2), *__first1);
+ ++__first1;
+ ++__first2;
+ }
+#ifndef _LIBCPP_NO_EXCEPTIONS
+ } catch (...) {
+ _AllocatorDestroyRangeReverse<_Alloc, _Iter2>(__alloc, __destruct_first, __first2)();
+ throw;
+ }
+#endif
+ return __first2;
+}
+
+template <class _Alloc, class _Type>
+struct __allocator_has_trivial_copy_construct : _Not<__has_construct<_Alloc, _Type*, const _Type&> > {};
+
+template <class _Type>
+struct __allocator_has_trivial_copy_construct<allocator<_Type>, _Type> : true_type {};
+
+template <class _Alloc,
+ class _Type,
+ class _RawType = typename remove_const<_Type>::type,
+ __enable_if_t<
+ // using _RawType because of the allocator<T const> extension
+ is_trivially_copy_constructible<_RawType>::value && is_trivially_copy_assignable<_RawType>::value &&
+ __allocator_has_trivial_copy_construct<_Alloc, _RawType>::value>* = nullptr>
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX17 _Type*
+__uninitialized_allocator_copy(_Alloc&, const _Type* __first1, const _Type* __last1, _Type* __first2) {
+ // TODO: Remove the const_cast once we drop support for std::allocator<T const>
+ if (__libcpp_is_constant_evaluated()) {
+ while (__first1 != __last1) {
+ std::__construct_at(std::__to_address(__first2), *__first1);
+ ++__first1;
+ ++__first2;
+ }
+ return __first2;
+ } else {
+ return std::copy(__first1, __last1, const_cast<_RawType*>(__first2));
+ }
+}
+
+// Move-construct the elements [__first1, __last1) into [__first2, __first2 + N)
+// if the move constructor is noexcept, where N is distance(__first1, __last1).
+//
+// Otherwise try to copy all elements. If an exception is thrown the already copied
+// elements are destroyed in reverse order of their construction.
+template <class _Alloc, class _Iter1, class _Sent1, class _Iter2>
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX17 _Iter2 __uninitialized_allocator_move_if_noexcept(
+ _Alloc& __alloc, _Iter1 __first1, _Sent1 __last1, _Iter2 __first2) {
+ static_assert(__is_cpp17_move_insertable<_Alloc>::value,
+ "The specified type does not meet the requirements of Cpp17MoveInsertable");
+#ifndef _LIBCPP_NO_EXCEPTIONS
+ auto __destruct_first = __first2;
+ try {
+#endif
+ while (__first1 != __last1) {
+#ifndef _LIBCPP_NO_EXCEPTIONS
+ allocator_traits<_Alloc>::construct(__alloc, std::__to_address(__first2), std::move_if_noexcept(*__first1));
+#else
+ allocator_traits<_Alloc>::construct(__alloc, std::__to_address(__first2), std::move(*__first1));
+#endif
+ ++__first1;
+ ++__first2;
+ }
+#ifndef _LIBCPP_NO_EXCEPTIONS
+ } catch (...) {
+ _AllocatorDestroyRangeReverse<_Alloc, _Iter2>(__alloc, __destruct_first, __first2)();
+ throw;
+ }
+#endif
+ return __first2;
+}
+
+template <class _Alloc, class _Type>
+struct __allocator_has_trivial_move_construct : _Not<__has_construct<_Alloc, _Type*, _Type&&> > {};
+
+template <class _Type>
+struct __allocator_has_trivial_move_construct<allocator<_Type>, _Type> : true_type {};
+
+#ifndef _LIBCPP_COMPILER_GCC
+template <
+ class _Alloc,
+ class _Iter1,
+ class _Iter2,
+ class _Type = typename iterator_traits<_Iter1>::value_type,
+ class = __enable_if_t<is_trivially_move_constructible<_Type>::value && is_trivially_move_assignable<_Type>::value &&
+ __allocator_has_trivial_move_construct<_Alloc, _Type>::value> >
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX17 _Iter1
+__uninitialized_allocator_move_if_noexcept(_Alloc&, _Iter1 __first1, _Iter1 __last1, _Iter2 __first2) {
+ if (__libcpp_is_constant_evaluated()) {
+ while (__first1 != __last1) {
+ std::__construct_at(std::__to_address(__first2), std::move(*__first1));
+ ++__first1;
+ ++__first2;
+ }
+ return __first2;
+ } else {
+ return std::move(__first1, __last1, __first2);
+ }
+}
+#endif // _LIBCPP_COMPILER_GCC
+
_LIBCPP_END_NAMESPACE_STD
#endif // _LIBCPP___MEMORY_UNINITIALIZED_ALGORITHMS_H
diff --git a/contrib/llvm-project/libcxx/include/__split_buffer b/contrib/llvm-project/libcxx/include/__split_buffer
index 7409b51b1f96..f78167466217 100644
--- a/contrib/llvm-project/libcxx/include/__split_buffer
+++ b/contrib/llvm-project/libcxx/include/__split_buffer
@@ -19,6 +19,7 @@
#include <__iterator/move_iterator.h>
#include <__memory/allocator.h>
#include <__memory/compressed_pair.h>
+#include <__memory/swap_allocator.h>
#include <__utility/forward.h>
#include <memory>
#include <type_traits>
diff --git a/contrib/llvm-project/libcxx/include/__tree b/contrib/llvm-project/libcxx/include/__tree
index 8d8449706871..59525a03047a 100644
--- a/contrib/llvm-project/libcxx/include/__tree
+++ b/contrib/llvm-project/libcxx/include/__tree
@@ -17,6 +17,7 @@
#include <__iterator/distance.h>
#include <__iterator/iterator_traits.h>
#include <__iterator/next.h>
+#include <__memory/swap_allocator.h>
#include <__utility/forward.h>
#include <__utility/swap.h>
#include <limits>
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/aligned_storage.h b/contrib/llvm-project/libcxx/include/__type_traits/aligned_storage.h
new file mode 100644
index 000000000000..9659a90ae50c
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__type_traits/aligned_storage.h
@@ -0,0 +1,142 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_ALIGNED_STORAGE_H
+#define _LIBCPP___TYPE_TRAITS_ALIGNED_STORAGE_H
+
+#include <__config>
+#include <__type_traits/conditional.h>
+#include <__type_traits/integral_constant.h>
+#include <__type_traits/nat.h>
+#include <__type_traits/type_list.h>
+#include <cstddef>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <class _Tp>
+struct __align_type
+{
+ static const size_t value = _LIBCPP_PREFERRED_ALIGNOF(_Tp);
+ typedef _Tp type;
+};
+
+struct __struct_double {long double __lx;};
+struct __struct_double4 {double __lx[4];};
+
+typedef
+ __type_list<__align_type<unsigned char>,
+ __type_list<__align_type<unsigned short>,
+ __type_list<__align_type<unsigned int>,
+ __type_list<__align_type<unsigned long>,
+ __type_list<__align_type<unsigned long long>,
+ __type_list<__align_type<double>,
+ __type_list<__align_type<long double>,
+ __type_list<__align_type<__struct_double>,
+ __type_list<__align_type<__struct_double4>,
+ __type_list<__align_type<int*>,
+ __nat
+ > > > > > > > > > > __all_types;
+
+template <size_t _Align>
+struct _ALIGNAS(_Align) __fallback_overaligned {};
+
+template <class _TL, size_t _Align> struct __find_pod;
+
+template <class _Hp, size_t _Align>
+struct __find_pod<__type_list<_Hp, __nat>, _Align>
+{
+ typedef typename conditional<
+ _Align == _Hp::value,
+ typename _Hp::type,
+ __fallback_overaligned<_Align>
+ >::type type;
+};
+
+template <class _Hp, class _Tp, size_t _Align>
+struct __find_pod<__type_list<_Hp, _Tp>, _Align>
+{
+ typedef typename conditional<
+ _Align == _Hp::value,
+ typename _Hp::type,
+ typename __find_pod<_Tp, _Align>::type
+ >::type type;
+};
+
+template <class _TL, size_t _Len> struct __find_max_align;
+
+template <class _Hp, size_t _Len>
+struct __find_max_align<__type_list<_Hp, __nat>, _Len> : public integral_constant<size_t, _Hp::value> {};
+
+template <size_t _Len, size_t _A1, size_t _A2>
+struct __select_align
+{
+private:
+ static const size_t __min = _A2 < _A1 ? _A2 : _A1;
+ static const size_t __max = _A1 < _A2 ? _A2 : _A1;
+public:
+ static const size_t value = _Len < __max ? __min : __max;
+};
+
+template <class _Hp, class _Tp, size_t _Len>
+struct __find_max_align<__type_list<_Hp, _Tp>, _Len>
+ : public integral_constant<size_t, __select_align<_Len, _Hp::value, __find_max_align<_Tp, _Len>::value>::value> {};
+
+template <size_t _Len, size_t _Align = __find_max_align<__all_types, _Len>::value>
+struct _LIBCPP_TEMPLATE_VIS aligned_storage
+{
+ typedef typename __find_pod<__all_types, _Align>::type _Aligner;
+ union type
+ {
+ _Aligner __align;
+ unsigned char __data[(_Len + _Align - 1)/_Align * _Align];
+ };
+};
+
+#if _LIBCPP_STD_VER > 11
+template <size_t _Len, size_t _Align = __find_max_align<__all_types, _Len>::value>
+ using aligned_storage_t = typename aligned_storage<_Len, _Align>::type;
+#endif
+
+#define _CREATE_ALIGNED_STORAGE_SPECIALIZATION(n) \
+template <size_t _Len>\
+struct _LIBCPP_TEMPLATE_VIS aligned_storage<_Len, n>\
+{\
+ struct _ALIGNAS(n) type\
+ {\
+ unsigned char __lx[(_Len + n - 1)/n * n];\
+ };\
+}
+
+_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x1);
+_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x2);
+_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x4);
+_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x8);
+_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x10);
+_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x20);
+_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x40);
+_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x80);
+_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x100);
+_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x200);
+_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x400);
+_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x800);
+_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x1000);
+_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x2000);
+// PE/COFF does not support alignment beyond 8192 (=0x2000)
+#if !defined(_LIBCPP_OBJECT_FORMAT_COFF)
+_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x4000);
+#endif // !defined(_LIBCPP_OBJECT_FORMAT_COFF)
+
+#undef _CREATE_ALIGNED_STORAGE_SPECIALIZATION
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_ALIGNED_STORAGE_H
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/aligned_union.h b/contrib/llvm-project/libcxx/include/__type_traits/aligned_union.h
new file mode 100644
index 000000000000..31eb9353a9e2
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__type_traits/aligned_union.h
@@ -0,0 +1,55 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_ALIGNED_UNION_H
+#define _LIBCPP___TYPE_TRAITS_ALIGNED_UNION_H
+
+#include <__config>
+#include <__type_traits/aligned_storage.h>
+#include <__type_traits/integral_constant.h>
+#include <cstddef>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <size_t _I0, size_t ..._In>
+struct __static_max;
+
+template <size_t _I0>
+struct __static_max<_I0>
+{
+ static const size_t value = _I0;
+};
+
+template <size_t _I0, size_t _I1, size_t ..._In>
+struct __static_max<_I0, _I1, _In...>
+{
+ static const size_t value = _I0 >= _I1 ? __static_max<_I0, _In...>::value :
+ __static_max<_I1, _In...>::value;
+};
+
+template <size_t _Len, class _Type0, class ..._Types>
+struct aligned_union
+{
+ static const size_t alignment_value = __static_max<_LIBCPP_PREFERRED_ALIGNOF(_Type0),
+ _LIBCPP_PREFERRED_ALIGNOF(_Types)...>::value;
+ static const size_t __len = __static_max<_Len, sizeof(_Type0),
+ sizeof(_Types)...>::value;
+ typedef typename aligned_storage<__len, alignment_value>::type type;
+};
+
+#if _LIBCPP_STD_VER > 11
+template <size_t _Len, class ..._Types> using aligned_union_t = typename aligned_union<_Len, _Types...>::type;
+#endif
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_ALIGNED_UNION_H
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/common_reference.h b/contrib/llvm-project/libcxx/include/__type_traits/common_reference.h
new file mode 100644
index 000000000000..559d39858ec8
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__type_traits/common_reference.h
@@ -0,0 +1,188 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_COMMON_REFERENCE_H
+#define _LIBCPP___TYPE_TRAITS_COMMON_REFERENCE_H
+
+#include <__config>
+#include <__type_traits/common_type.h>
+#include <__type_traits/copy_cv.h>
+#include <__type_traits/copy_cvref.h>
+#include <__type_traits/is_convertible.h>
+#include <__type_traits/is_reference.h>
+#include <__type_traits/remove_cv.h>
+#include <__type_traits/remove_cvref.h>
+#include <__type_traits/remove_reference.h>
+#include <__utility/declval.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+// common_reference
+#if _LIBCPP_STD_VER > 17
+// Let COND_RES(X, Y) be:
+template <class _Xp, class _Yp>
+using __cond_res =
+ decltype(false ? declval<_Xp(&)()>()() : declval<_Yp(&)()>()());
+
+// Let `XREF(A)` denote a unary alias template `T` such that `T<U>` denotes the same type as `U`
+// with the addition of `A`'s cv and reference qualifiers, for a non-reference cv-unqualified type
+// `U`.
+// [Note: `XREF(A)` is `__xref<A>::template __apply`]
+template <class _Tp>
+struct __xref {
+ template<class _Up>
+ using __apply = __copy_cvref_t<_Tp, _Up>;
+};
+
+// Given types A and B, let X be remove_reference_t<A>, let Y be remove_reference_t<B>,
+// and let COMMON-REF(A, B) be:
+template<class _Ap, class _Bp, class _Xp = remove_reference_t<_Ap>, class _Yp = remove_reference_t<_Bp>>
+struct __common_ref;
+
+template<class _Xp, class _Yp>
+using __common_ref_t = typename __common_ref<_Xp, _Yp>::__type;
+
+template<class _Xp, class _Yp>
+using __cv_cond_res = __cond_res<__copy_cv_t<_Xp, _Yp>&, __copy_cv_t<_Yp, _Xp>&>;
+
+
+// If A and B are both lvalue reference types, COMMON-REF(A, B) is
+// COND-RES(COPYCV(X, Y)&, COPYCV(Y, X)&) if that type exists and is a reference type.
+template<class _Ap, class _Bp, class _Xp, class _Yp>
+requires requires { typename __cv_cond_res<_Xp, _Yp>; } && is_reference_v<__cv_cond_res<_Xp, _Yp>>
+struct __common_ref<_Ap&, _Bp&, _Xp, _Yp>
+{
+ using __type = __cv_cond_res<_Xp, _Yp>;
+};
+
+// Otherwise, let C be remove_reference_t<COMMON-REF(X&, Y&)>&&. ...
+template <class _Xp, class _Yp>
+using __common_ref_C = remove_reference_t<__common_ref_t<_Xp&, _Yp&>>&&;
+
+
+// .... If A and B are both rvalue reference types, C is well-formed, and
+// is_convertible_v<A, C> && is_convertible_v<B, C> is true, then COMMON-REF(A, B) is C.
+template<class _Ap, class _Bp, class _Xp, class _Yp>
+requires
+ requires { typename __common_ref_C<_Xp, _Yp>; } &&
+ is_convertible_v<_Ap&&, __common_ref_C<_Xp, _Yp>> &&
+ is_convertible_v<_Bp&&, __common_ref_C<_Xp, _Yp>>
+struct __common_ref<_Ap&&, _Bp&&, _Xp, _Yp>
+{
+ using __type = __common_ref_C<_Xp, _Yp>;
+};
+
+// Otherwise, let D be COMMON-REF(const X&, Y&). ...
+template <class _Tp, class _Up>
+using __common_ref_D = __common_ref_t<const _Tp&, _Up&>;
+
+// ... If A is an rvalue reference and B is an lvalue reference and D is well-formed and
+// is_convertible_v<A, D> is true, then COMMON-REF(A, B) is D.
+template<class _Ap, class _Bp, class _Xp, class _Yp>
+requires requires { typename __common_ref_D<_Xp, _Yp>; } &&
+ is_convertible_v<_Ap&&, __common_ref_D<_Xp, _Yp>>
+struct __common_ref<_Ap&&, _Bp&, _Xp, _Yp>
+{
+ using __type = __common_ref_D<_Xp, _Yp>;
+};
+
+// Otherwise, if A is an lvalue reference and B is an rvalue reference, then
+// COMMON-REF(A, B) is COMMON-REF(B, A).
+template<class _Ap, class _Bp, class _Xp, class _Yp>
+struct __common_ref<_Ap&, _Bp&&, _Xp, _Yp> : __common_ref<_Bp&&, _Ap&> {};
+
+// Otherwise, COMMON-REF(A, B) is ill-formed.
+template<class _Ap, class _Bp, class _Xp, class _Yp>
+struct __common_ref {};
+
+// Note C: For the common_reference trait applied to a parameter pack [...]
+
+template <class...>
+struct common_reference;
+
+template <class... _Types>
+using common_reference_t = typename common_reference<_Types...>::type;
+
+// bullet 1 - sizeof...(T) == 0
+template<>
+struct common_reference<> {};
+
+// bullet 2 - sizeof...(T) == 1
+template <class _Tp>
+struct common_reference<_Tp>
+{
+ using type = _Tp;
+};
+
+// bullet 3 - sizeof...(T) == 2
+template <class _Tp, class _Up> struct __common_reference_sub_bullet3;
+template <class _Tp, class _Up> struct __common_reference_sub_bullet2 : __common_reference_sub_bullet3<_Tp, _Up> {};
+template <class _Tp, class _Up> struct __common_reference_sub_bullet1 : __common_reference_sub_bullet2<_Tp, _Up> {};
+
+// sub-bullet 1 - If T1 and T2 are reference types and COMMON-REF(T1, T2) is well-formed, then
+// the member typedef `type` denotes that type.
+template <class _Tp, class _Up> struct common_reference<_Tp, _Up> : __common_reference_sub_bullet1<_Tp, _Up> {};
+
+template <class _Tp, class _Up>
+requires is_reference_v<_Tp> && is_reference_v<_Up> && requires { typename __common_ref_t<_Tp, _Up>; }
+struct __common_reference_sub_bullet1<_Tp, _Up>
+{
+ using type = __common_ref_t<_Tp, _Up>;
+};
+
+// sub-bullet 2 - Otherwise, if basic_common_reference<remove_cvref_t<T1>, remove_cvref_t<T2>, XREF(T1), XREF(T2)>::type
+// is well-formed, then the member typedef `type` denotes that type.
+template <class, class, template <class> class, template <class> class> struct basic_common_reference {};
+
+template <class _Tp, class _Up>
+using __basic_common_reference_t = typename basic_common_reference<
+ remove_cvref_t<_Tp>, remove_cvref_t<_Up>,
+ __xref<_Tp>::template __apply, __xref<_Up>::template __apply>::type;
+
+template <class _Tp, class _Up>
+requires requires { typename __basic_common_reference_t<_Tp, _Up>; }
+struct __common_reference_sub_bullet2<_Tp, _Up>
+{
+ using type = __basic_common_reference_t<_Tp, _Up>;
+};
+
+// sub-bullet 3 - Otherwise, if COND-RES(T1, T2) is well-formed,
+// then the member typedef `type` denotes that type.
+template <class _Tp, class _Up>
+requires requires { typename __cond_res<_Tp, _Up>; }
+struct __common_reference_sub_bullet3<_Tp, _Up>
+{
+ using type = __cond_res<_Tp, _Up>;
+};
+
+
+// sub-bullet 4 & 5 - Otherwise, if common_type_t<T1, T2> is well-formed,
+// then the member typedef `type` denotes that type.
+// - Otherwise, there shall be no member `type`.
+template <class _Tp, class _Up> struct __common_reference_sub_bullet3 : common_type<_Tp, _Up> {};
+
+// bullet 4 - If there is such a type `C`, the member typedef type shall denote the same type, if
+// any, as `common_reference_t<C, Rest...>`.
+template <class _Tp, class _Up, class _Vp, class... _Rest>
+requires requires { typename common_reference_t<_Tp, _Up>; }
+struct common_reference<_Tp, _Up, _Vp, _Rest...>
+ : common_reference<common_reference_t<_Tp, _Up>, _Vp, _Rest...>
+{};
+
+// bullet 5 - Otherwise, there shall be no member `type`.
+template <class...> struct common_reference {};
+
+#endif // _LIBCPP_STD_VER > 17
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_COMMON_REFERENCE_H
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/common_type.h b/contrib/llvm-project/libcxx/include/__type_traits/common_type.h
new file mode 100644
index 000000000000..55321e9936dd
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__type_traits/common_type.h
@@ -0,0 +1,138 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_COMMON_TYPE_H
+#define _LIBCPP___TYPE_TRAITS_COMMON_TYPE_H
+
+#include <__config>
+#include <__type_traits/conditional.h>
+#include <__type_traits/decay.h>
+#include <__type_traits/is_same.h>
+#include <__type_traits/remove_cvref.h>
+#include <__type_traits/void_t.h>
+#include <__utility/declval.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+#if _LIBCPP_STD_VER > 17
+// Let COND_RES(X, Y) be:
+template <class _Tp, class _Up>
+using __cond_type = decltype(false ? declval<_Tp>() : declval<_Up>());
+
+template <class _Tp, class _Up, class = void>
+struct __common_type3 {};
+
+// sub-bullet 4 - "if COND_RES(CREF(D1), CREF(D2)) denotes a type..."
+template <class _Tp, class _Up>
+struct __common_type3<_Tp, _Up, void_t<__cond_type<const _Tp&, const _Up&>>>
+{
+ using type = remove_cvref_t<__cond_type<const _Tp&, const _Up&>>;
+};
+
+template <class _Tp, class _Up, class = void>
+struct __common_type2_imp : __common_type3<_Tp, _Up> {};
+#else
+template <class _Tp, class _Up, class = void>
+struct __common_type2_imp {};
+#endif
+
+// sub-bullet 3 - "if decay_t<decltype(false ? declval<D1>() : declval<D2>())> ..."
+template <class _Tp, class _Up>
+struct __common_type2_imp<_Tp, _Up,
+ typename __void_t<decltype(
+ true ? declval<_Tp>() : declval<_Up>()
+ )>::type>
+{
+ typedef _LIBCPP_NODEBUG typename decay<decltype(
+ true ? declval<_Tp>() : declval<_Up>()
+ )>::type type;
+};
+
+template <class, class = void>
+struct __common_type_impl {};
+
+// Clang provides variadic templates in C++03 as an extension.
+#if !defined(_LIBCPP_CXX03_LANG) || defined(__clang__)
+# define _LIBCPP_OPTIONAL_PACK(...) , __VA_ARGS__
+template <class... _Tp>
+struct __common_types;
+template <class... _Tp>
+struct _LIBCPP_TEMPLATE_VIS common_type;
+#else
+# define _LIBCPP_OPTIONAL_PACK(...)
+struct __no_arg;
+template <class _Tp, class _Up, class = __no_arg>
+struct __common_types;
+template <class _Tp = __no_arg, class _Up = __no_arg, class _Vp = __no_arg,
+ class _Unused = __no_arg>
+struct common_type {
+ static_assert(sizeof(_Unused) == 0,
+ "common_type accepts at most 3 arguments in C++03");
+};
+#endif // _LIBCPP_CXX03_LANG
+
+template <class _Tp, class _Up>
+struct __common_type_impl<
+ __common_types<_Tp, _Up>,
+ typename __void_t<typename common_type<_Tp, _Up>::type>::type>
+{
+ typedef typename common_type<_Tp, _Up>::type type;
+};
+
+template <class _Tp, class _Up, class _Vp _LIBCPP_OPTIONAL_PACK(class... _Rest)>
+struct __common_type_impl<
+ __common_types<_Tp, _Up, _Vp _LIBCPP_OPTIONAL_PACK(_Rest...)>,
+ typename __void_t<typename common_type<_Tp, _Up>::type>::type>
+ : __common_type_impl<__common_types<typename common_type<_Tp, _Up>::type,
+ _Vp _LIBCPP_OPTIONAL_PACK(_Rest...)> > {
+};
+
+// bullet 1 - sizeof...(Tp) == 0
+
+template <>
+struct _LIBCPP_TEMPLATE_VIS common_type<> {};
+
+// bullet 2 - sizeof...(Tp) == 1
+
+template <class _Tp>
+struct _LIBCPP_TEMPLATE_VIS common_type<_Tp>
+ : public common_type<_Tp, _Tp> {};
+
+// bullet 3 - sizeof...(Tp) == 2
+
+// sub-bullet 1 - "If is_same_v<T1, D1> is false or ..."
+template <class _Tp, class _Up>
+struct _LIBCPP_TEMPLATE_VIS common_type<_Tp, _Up>
+ : conditional<
+ _IsSame<_Tp, typename decay<_Tp>::type>::value && _IsSame<_Up, typename decay<_Up>::type>::value,
+ __common_type2_imp<_Tp, _Up>,
+ common_type<typename decay<_Tp>::type, typename decay<_Up>::type>
+ >::type
+{};
+
+// bullet 4 - sizeof...(Tp) > 2
+
+template <class _Tp, class _Up, class _Vp _LIBCPP_OPTIONAL_PACK(class... _Rest)>
+struct _LIBCPP_TEMPLATE_VIS
+ common_type<_Tp, _Up, _Vp _LIBCPP_OPTIONAL_PACK(_Rest...)>
+ : __common_type_impl<
+ __common_types<_Tp, _Up, _Vp _LIBCPP_OPTIONAL_PACK(_Rest...)> > {};
+
+#undef _LIBCPP_OPTIONAL_PACK
+
+#if _LIBCPP_STD_VER > 11
+template <class ..._Tp> using common_type_t = typename common_type<_Tp...>::type;
+#endif
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_COMMON_TYPE_H
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/copy_cv.h b/contrib/llvm-project/libcxx/include/__type_traits/copy_cv.h
new file mode 100644
index 000000000000..8e9bfe0a522f
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__type_traits/copy_cv.h
@@ -0,0 +1,54 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_COPY_CV_H
+#define _LIBCPP___TYPE_TRAITS_COPY_CV_H
+
+#include <__config>
+#include <__type_traits/add_const.h>
+#include <__type_traits/add_cv.h>
+#include <__type_traits/add_volatile.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+// Let COPYCV(FROM, TO) be an alias for type TO with the addition of FROM's
+// top-level cv-qualifiers.
+template <class _From, class _To>
+struct __copy_cv
+{
+ using type = _To;
+};
+
+template <class _From, class _To>
+struct __copy_cv<const _From, _To>
+{
+ using type = typename add_const<_To>::type;
+};
+
+template <class _From, class _To>
+struct __copy_cv<volatile _From, _To>
+{
+ using type = typename add_volatile<_To>::type;
+};
+
+template <class _From, class _To>
+struct __copy_cv<const volatile _From, _To>
+{
+ using type = typename add_cv<_To>::type;
+};
+
+template <class _From, class _To>
+using __copy_cv_t = typename __copy_cv<_From, _To>::type;
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_COPY_CV_H
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/copy_cvref.h b/contrib/llvm-project/libcxx/include/__type_traits/copy_cvref.h
new file mode 100644
index 000000000000..69b97ac1cb17
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__type_traits/copy_cvref.h
@@ -0,0 +1,46 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_COPY_CVREF_H
+#define _LIBCPP___TYPE_TRAITS_COPY_CVREF_H
+
+#include <__config>
+#include <__type_traits/add_lvalue_reference.h>
+#include <__type_traits/add_rvalue_reference.h>
+#include <__type_traits/copy_cv.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <class _From, class _To>
+struct __copy_cvref
+{
+ using type = __copy_cv_t<_From, _To>;
+};
+
+template <class _From, class _To>
+struct __copy_cvref<_From&, _To>
+{
+ using type = typename add_lvalue_reference<__copy_cv_t<_From, _To> >::type;
+};
+
+template <class _From, class _To>
+struct __copy_cvref<_From&&, _To>
+{
+ using type = typename add_rvalue_reference<__copy_cv_t<_From, _To> >::type;
+};
+
+template <class _From, class _To>
+using __copy_cvref_t = typename __copy_cvref<_From, _To>::type;
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_COPY_CVREF_H
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/is_nothrow_convertible.h b/contrib/llvm-project/libcxx/include/__type_traits/is_nothrow_convertible.h
new file mode 100644
index 000000000000..712b6f2cf4b8
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__type_traits/is_nothrow_convertible.h
@@ -0,0 +1,53 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_IS_NOTHROW_CONVERTIBLE_H
+#define _LIBCPP___TYPE_TRAITS_IS_NOTHROW_CONVERTIBLE_H
+
+#include <__config>
+#include <__type_traits/conjunction.h>
+#include <__type_traits/disjunction.h>
+#include <__type_traits/integral_constant.h>
+#include <__type_traits/is_convertible.h>
+#include <__type_traits/is_void.h>
+#include <__type_traits/lazy.h>
+#include <__utility/declval.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+#if _LIBCPP_STD_VER > 17
+
+template <typename _Tp>
+static void __test_noexcept(_Tp) noexcept;
+
+template<typename _Fm, typename _To>
+static bool_constant<noexcept(_VSTD::__test_noexcept<_To>(declval<_Fm>()))>
+__is_nothrow_convertible_test();
+
+template <typename _Fm, typename _To>
+struct __is_nothrow_convertible_helper: decltype(__is_nothrow_convertible_test<_Fm, _To>())
+{ };
+
+template <typename _Fm, typename _To>
+struct is_nothrow_convertible : _Or<
+ _And<is_void<_To>, is_void<_Fm>>,
+ _Lazy<_And, is_convertible<_Fm, _To>, __is_nothrow_convertible_helper<_Fm, _To>>
+>::type { };
+
+template <typename _Fm, typename _To>
+inline constexpr bool is_nothrow_convertible_v = is_nothrow_convertible<_Fm, _To>::value;
+
+#endif // _LIBCPP_STD_VER > 17
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_IS_NOTHROW_CONVERTIBLE_H
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/is_primary_template.h b/contrib/llvm-project/libcxx/include/__type_traits/is_primary_template.h
new file mode 100644
index 000000000000..a9bebcf7e64c
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__type_traits/is_primary_template.h
@@ -0,0 +1,34 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_IS_PRIMARY_TEMPLATE_H
+#define _LIBCPP___TYPE_TRAITS_IS_PRIMARY_TEMPLATE_H
+
+#include <__config>
+#include <__type_traits/enable_if.h>
+#include <__type_traits/is_same.h>
+#include <__type_traits/is_valid_expansion.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <class _Tp>
+using __test_for_primary_template = __enable_if_t<
+ _IsSame<_Tp, typename _Tp::__primary_template>::value
+ >;
+template <class _Tp>
+using __is_primary_template = _IsValidExpansion<
+ __test_for_primary_template, _Tp
+ >;
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_IS_PRIMARY_TEMPLATE_H
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/is_signed_integer.h b/contrib/llvm-project/libcxx/include/__type_traits/is_signed_integer.h
new file mode 100644
index 000000000000..95aa11bfa1b5
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__type_traits/is_signed_integer.h
@@ -0,0 +1,33 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_IS_SIGNED_INTEGER_H
+#define _LIBCPP___TYPE_TRAITS_IS_SIGNED_INTEGER_H
+
+#include <__config>
+#include <__type_traits/integral_constant.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <class _Tp> struct __libcpp_is_signed_integer : public false_type {};
+template <> struct __libcpp_is_signed_integer<signed char> : public true_type {};
+template <> struct __libcpp_is_signed_integer<signed short> : public true_type {};
+template <> struct __libcpp_is_signed_integer<signed int> : public true_type {};
+template <> struct __libcpp_is_signed_integer<signed long> : public true_type {};
+template <> struct __libcpp_is_signed_integer<signed long long> : public true_type {};
+#ifndef _LIBCPP_HAS_NO_INT128
+template <> struct __libcpp_is_signed_integer<__int128_t> : public true_type {};
+#endif
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_IS_SIGNED_INTEGER_H
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/is_unsigned_integer.h b/contrib/llvm-project/libcxx/include/__type_traits/is_unsigned_integer.h
new file mode 100644
index 000000000000..54b29acd9ea3
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__type_traits/is_unsigned_integer.h
@@ -0,0 +1,33 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_IS_UNSIGNED_INTEGER_H
+#define _LIBCPP___TYPE_TRAITS_IS_UNSIGNED_INTEGER_H
+
+#include <__config>
+#include <__type_traits/integral_constant.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <class _Tp> struct __libcpp_is_unsigned_integer : public false_type {};
+template <> struct __libcpp_is_unsigned_integer<unsigned char> : public true_type {};
+template <> struct __libcpp_is_unsigned_integer<unsigned short> : public true_type {};
+template <> struct __libcpp_is_unsigned_integer<unsigned int> : public true_type {};
+template <> struct __libcpp_is_unsigned_integer<unsigned long> : public true_type {};
+template <> struct __libcpp_is_unsigned_integer<unsigned long long> : public true_type {};
+#ifndef _LIBCPP_HAS_NO_INT128
+template <> struct __libcpp_is_unsigned_integer<__uint128_t> : public true_type {};
+#endif
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_IS_UNSIGNED_INTEGER_H
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/is_valid_expansion.h b/contrib/llvm-project/libcxx/include/__type_traits/is_valid_expansion.h
new file mode 100644
index 000000000000..c45db7509e41
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__type_traits/is_valid_expansion.h
@@ -0,0 +1,31 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_IS_VALID_EXPANSION_H
+#define _LIBCPP___TYPE_TRAITS_IS_VALID_EXPANSION_H
+
+#include <__config>
+#include <__type_traits/integral_constant.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <template <class...> class _Templ, class ..._Args, class = _Templ<_Args...> >
+true_type __sfinae_test_impl(int);
+template <template <class...> class, class ...>
+false_type __sfinae_test_impl(...);
+
+template <template <class ...> class _Templ, class ..._Args>
+using _IsValidExpansion _LIBCPP_NODEBUG = decltype(__sfinae_test_impl<_Templ, _Args...>(0));
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_IS_VALID_EXPANSION_H
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/lazy.h b/contrib/llvm-project/libcxx/include/__type_traits/lazy.h
new file mode 100644
index 000000000000..6874b06f3c5a
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__type_traits/lazy.h
@@ -0,0 +1,25 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_LAZY_H
+#define _LIBCPP___TYPE_TRAITS_LAZY_H
+
+#include <__config>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <template <class...> class _Func, class ..._Args>
+struct _Lazy : _Func<_Args...> {};
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_LAZY_H
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/make_32_64_or_128_bit.h b/contrib/llvm-project/libcxx/include/__type_traits/make_32_64_or_128_bit.h
new file mode 100644
index 000000000000..87340eac7fb1
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__type_traits/make_32_64_or_128_bit.h
@@ -0,0 +1,48 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_MAKE_32_64_OR_128_BIT_H
+#define _LIBCPP___TYPE_TRAITS_MAKE_32_64_OR_128_BIT_H
+
+#include <__config>
+#include <__type_traits/conditional.h>
+#include <__type_traits/is_same.h>
+#include <__type_traits/is_signed.h>
+#include <__type_traits/is_unsigned.h>
+#include <__type_traits/make_unsigned.h>
+#include <cstdint>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+/// Helper to promote an integral to smallest 32, 64, or 128 bit representation.
+///
+/// The restriction is the same as the integral version of to_char.
+template <class _Tp>
+#if _LIBCPP_STD_VER > 17
+ requires (is_signed_v<_Tp> || is_unsigned_v<_Tp> || is_same_v<_Tp, char>)
+#endif
+using __make_32_64_or_128_bit_t =
+ __copy_unsigned_t<_Tp,
+ __conditional_t<sizeof(_Tp) <= sizeof(int32_t), int32_t,
+ __conditional_t<sizeof(_Tp) <= sizeof(int64_t), int64_t,
+#ifndef _LIBCPP_HAS_NO_INT128
+ __conditional_t<sizeof(_Tp) <= sizeof(__int128_t), __int128_t,
+ /* else */ void>
+#else
+ /* else */ void
+#endif
+ > >
+ >;
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_MAKE_32_64_OR_128_BIT_H
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/make_signed.h b/contrib/llvm-project/libcxx/include/__type_traits/make_signed.h
new file mode 100644
index 000000000000..fbc31172a978
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__type_traits/make_signed.h
@@ -0,0 +1,76 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_MAKE_SIGNED_H
+#define _LIBCPP___TYPE_TRAITS_MAKE_SIGNED_H
+
+#include <__config>
+#include <__type_traits/apply_cv.h>
+#include <__type_traits/is_enum.h>
+#include <__type_traits/is_integral.h>
+#include <__type_traits/nat.h>
+#include <__type_traits/remove_cv.h>
+#include <__type_traits/type_list.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+typedef
+ __type_list<signed char,
+ __type_list<signed short,
+ __type_list<signed int,
+ __type_list<signed long,
+ __type_list<signed long long,
+#ifndef _LIBCPP_HAS_NO_INT128
+ __type_list<__int128_t,
+#endif
+ __nat
+#ifndef _LIBCPP_HAS_NO_INT128
+ >
+#endif
+ > > > > > __signed_types;
+
+template <class _Tp, bool = is_integral<_Tp>::value || is_enum<_Tp>::value>
+struct __make_signed {};
+
+template <class _Tp>
+struct __make_signed<_Tp, true>
+{
+ typedef typename __find_first<__signed_types, sizeof(_Tp)>::type type;
+};
+
+template <> struct __make_signed<bool, true> {};
+template <> struct __make_signed< signed short, true> {typedef short type;};
+template <> struct __make_signed<unsigned short, true> {typedef short type;};
+template <> struct __make_signed< signed int, true> {typedef int type;};
+template <> struct __make_signed<unsigned int, true> {typedef int type;};
+template <> struct __make_signed< signed long, true> {typedef long type;};
+template <> struct __make_signed<unsigned long, true> {typedef long type;};
+template <> struct __make_signed< signed long long, true> {typedef long long type;};
+template <> struct __make_signed<unsigned long long, true> {typedef long long type;};
+#ifndef _LIBCPP_HAS_NO_INT128
+template <> struct __make_signed<__int128_t, true> {typedef __int128_t type;};
+template <> struct __make_signed<__uint128_t, true> {typedef __int128_t type;};
+#endif
+
+template <class _Tp>
+struct _LIBCPP_TEMPLATE_VIS make_signed
+{
+ typedef typename __apply_cv<_Tp, typename __make_signed<typename remove_cv<_Tp>::type>::type>::type type;
+};
+
+#if _LIBCPP_STD_VER > 11
+template <class _Tp> using make_signed_t = typename make_signed<_Tp>::type;
+#endif
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_MAKE_SIGNED_H
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/make_unsigned.h b/contrib/llvm-project/libcxx/include/__type_traits/make_unsigned.h
new file mode 100644
index 000000000000..8110a5ca9609
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__type_traits/make_unsigned.h
@@ -0,0 +1,89 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_MAKE_UNSIGNED_H
+#define _LIBCPP___TYPE_TRAITS_MAKE_UNSIGNED_H
+
+#include <__config>
+#include <__type_traits/apply_cv.h>
+#include <__type_traits/conditional.h>
+#include <__type_traits/is_enum.h>
+#include <__type_traits/is_integral.h>
+#include <__type_traits/is_unsigned.h>
+#include <__type_traits/nat.h>
+#include <__type_traits/remove_cv.h>
+#include <__type_traits/type_list.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+typedef
+ __type_list<unsigned char,
+ __type_list<unsigned short,
+ __type_list<unsigned int,
+ __type_list<unsigned long,
+ __type_list<unsigned long long,
+#ifndef _LIBCPP_HAS_NO_INT128
+ __type_list<__uint128_t,
+#endif
+ __nat
+#ifndef _LIBCPP_HAS_NO_INT128
+ >
+#endif
+ > > > > > __unsigned_types;
+
+template <class _Tp, bool = is_integral<_Tp>::value || is_enum<_Tp>::value>
+struct __make_unsigned {};
+
+template <class _Tp>
+struct __make_unsigned<_Tp, true>
+{
+ typedef typename __find_first<__unsigned_types, sizeof(_Tp)>::type type;
+};
+
+template <> struct __make_unsigned<bool, true> {};
+template <> struct __make_unsigned< signed short, true> {typedef unsigned short type;};
+template <> struct __make_unsigned<unsigned short, true> {typedef unsigned short type;};
+template <> struct __make_unsigned< signed int, true> {typedef unsigned int type;};
+template <> struct __make_unsigned<unsigned int, true> {typedef unsigned int type;};
+template <> struct __make_unsigned< signed long, true> {typedef unsigned long type;};
+template <> struct __make_unsigned<unsigned long, true> {typedef unsigned long type;};
+template <> struct __make_unsigned< signed long long, true> {typedef unsigned long long type;};
+template <> struct __make_unsigned<unsigned long long, true> {typedef unsigned long long type;};
+#ifndef _LIBCPP_HAS_NO_INT128
+template <> struct __make_unsigned<__int128_t, true> {typedef __uint128_t type;};
+template <> struct __make_unsigned<__uint128_t, true> {typedef __uint128_t type;};
+#endif
+
+template <class _Tp>
+struct _LIBCPP_TEMPLATE_VIS make_unsigned
+{
+ typedef typename __apply_cv<_Tp, typename __make_unsigned<typename remove_cv<_Tp>::type>::type>::type type;
+};
+
+#if _LIBCPP_STD_VER > 11
+template <class _Tp> using make_unsigned_t = typename make_unsigned<_Tp>::type;
+#endif
+
+#ifndef _LIBCPP_CXX03_LANG
+template <class _Tp>
+_LIBCPP_HIDE_FROM_ABI constexpr
+typename make_unsigned<_Tp>::type __to_unsigned_like(_Tp __x) noexcept {
+ return static_cast<typename make_unsigned<_Tp>::type>(__x);
+}
+#endif
+
+template <class _Tp, class _Up>
+using __copy_unsigned_t = __conditional_t<is_unsigned<_Tp>::value, typename make_unsigned<_Up>::type, _Up>;
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_MAKE_UNSIGNED_H
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/nat.h b/contrib/llvm-project/libcxx/include/__type_traits/nat.h
new file mode 100644
index 000000000000..5216ef520420
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__type_traits/nat.h
@@ -0,0 +1,32 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_NAT_H
+#define _LIBCPP___TYPE_TRAITS_NAT_H
+
+#include <__config>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+struct __nat
+{
+#ifndef _LIBCPP_CXX03_LANG
+ __nat() = delete;
+ __nat(const __nat&) = delete;
+ __nat& operator=(const __nat&) = delete;
+ ~__nat() = delete;
+#endif
+};
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_NAT_H
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/promote.h b/contrib/llvm-project/libcxx/include/__type_traits/promote.h
new file mode 100644
index 000000000000..a9226a74b300
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__type_traits/promote.h
@@ -0,0 +1,95 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_PROMOTE_H
+#define _LIBCPP___TYPE_TRAITS_PROMOTE_H
+
+#include <__config>
+#include <__type_traits/integral_constant.h>
+#include <__type_traits/is_same.h>
+#include <__utility/declval.h>
+#include <cstddef>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <class _Tp>
+struct __numeric_type
+{
+ static void __test(...);
+ static float __test(float);
+ static double __test(char);
+ static double __test(int);
+ static double __test(unsigned);
+ static double __test(long);
+ static double __test(unsigned long);
+ static double __test(long long);
+ static double __test(unsigned long long);
+ static double __test(double);
+ static long double __test(long double);
+
+ typedef decltype(__test(declval<_Tp>())) type;
+ static const bool value = _IsNotSame<type, void>::value;
+};
+
+template <>
+struct __numeric_type<void>
+{
+ static const bool value = true;
+};
+
+template <class _A1, class _A2 = void, class _A3 = void,
+ bool = __numeric_type<_A1>::value &&
+ __numeric_type<_A2>::value &&
+ __numeric_type<_A3>::value>
+class __promote_imp
+{
+public:
+ static const bool value = false;
+};
+
+template <class _A1, class _A2, class _A3>
+class __promote_imp<_A1, _A2, _A3, true>
+{
+private:
+ typedef typename __promote_imp<_A1>::type __type1;
+ typedef typename __promote_imp<_A2>::type __type2;
+ typedef typename __promote_imp<_A3>::type __type3;
+public:
+ typedef decltype(__type1() + __type2() + __type3()) type;
+ static const bool value = true;
+};
+
+template <class _A1, class _A2>
+class __promote_imp<_A1, _A2, void, true>
+{
+private:
+ typedef typename __promote_imp<_A1>::type __type1;
+ typedef typename __promote_imp<_A2>::type __type2;
+public:
+ typedef decltype(__type1() + __type2()) type;
+ static const bool value = true;
+};
+
+template <class _A1>
+class __promote_imp<_A1, void, void, true>
+{
+public:
+ typedef typename __numeric_type<_A1>::type type;
+ static const bool value = true;
+};
+
+template <class _A1, class _A2 = void, class _A3 = void>
+class __promote : public __promote_imp<_A1, _A2, _A3> {};
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_PROMOTE_H
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/remove_cvref.h b/contrib/llvm-project/libcxx/include/__type_traits/remove_cvref.h
new file mode 100644
index 000000000000..d937501fedce
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__type_traits/remove_cvref.h
@@ -0,0 +1,41 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_REMOVE_CVREF_H
+#define _LIBCPP___TYPE_TRAITS_REMOVE_CVREF_H
+
+#include <__config>
+#include <__type_traits/is_same.h>
+#include <__type_traits/remove_cv.h>
+#include <__type_traits/remove_reference.h>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <class _Tp>
+using __uncvref_t _LIBCPP_NODEBUG = typename remove_cv<typename remove_reference<_Tp>::type>::type;
+
+template <class _Tp, class _Up>
+struct __is_same_uncvref : _IsSame<__uncvref_t<_Tp>, __uncvref_t<_Up> > {};
+
+#if _LIBCPP_STD_VER > 17
+// remove_cvref - same as __uncvref
+template <class _Tp>
+struct remove_cvref {
+ using type _LIBCPP_NODEBUG = __uncvref_t<_Tp>;
+};
+
+template <class _Tp> using remove_cvref_t = typename remove_cvref<_Tp>::type;
+#endif
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_REMOVE_CVREF_H
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/type_list.h b/contrib/llvm-project/libcxx/include/__type_traits/type_list.h
new file mode 100644
index 000000000000..5a9e3319a1d4
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/__type_traits/type_list.h
@@ -0,0 +1,44 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___TYPE_TRAITS_TYPE_LIST_H
+#define _LIBCPP___TYPE_TRAITS_TYPE_LIST_H
+
+#include <__config>
+#include <cstddef>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+template <class _Hp, class _Tp>
+struct __type_list
+{
+ typedef _Hp _Head;
+ typedef _Tp _Tail;
+};
+
+template <class _TypeList, size_t _Size, bool = _Size <= sizeof(typename _TypeList::_Head)> struct __find_first;
+
+template <class _Hp, class _Tp, size_t _Size>
+struct __find_first<__type_list<_Hp, _Tp>, _Size, true>
+{
+ typedef _LIBCPP_NODEBUG _Hp type;
+};
+
+template <class _Hp, class _Tp, size_t _Size>
+struct __find_first<__type_list<_Hp, _Tp>, _Size, false>
+{
+ typedef _LIBCPP_NODEBUG typename __find_first<_Tp, _Size>::type type;
+};
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___TYPE_TRAITS_TYPE_LIST_H
diff --git a/contrib/llvm-project/libcxx/include/__utility/transaction.h b/contrib/llvm-project/libcxx/include/__utility/transaction.h
index 87e51c0b198e..e2cc43820845 100644
--- a/contrib/llvm-project/libcxx/include/__utility/transaction.h
+++ b/contrib/llvm-project/libcxx/include/__utility/transaction.h
@@ -86,6 +86,11 @@ private:
bool __completed_;
};
+template <class _Rollback>
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR __transaction<_Rollback> __make_transaction(_Rollback __rollback) {
+ return __transaction<_Rollback>(std::move(__rollback));
+}
+
_LIBCPP_END_NAMESPACE_STD
#endif // _LIBCPP___UTILITY_TRANSACTION_H
diff --git a/contrib/llvm-project/libcxx/include/algorithm b/contrib/llvm-project/libcxx/include/algorithm
index 5958ad1a95af..197b3b1043bb 100644
--- a/contrib/llvm-project/libcxx/include/algorithm
+++ b/contrib/llvm-project/libcxx/include/algorithm
@@ -331,6 +331,23 @@ namespace ranges {
constexpr borrowed_iterator_t<R>
ranges::sort_heap(R&& r, Comp comp = {}, Proj proj = {}); // since C++20
+ template<random_access_iterator I, sentinel_for<I> S, class Proj = identity,
+ indirect_strict_weak_order<projected<I, Proj>> Comp = ranges::less>
+ constexpr bool is_heap(I first, S last, Comp comp = {}, Proj proj = {}); // Since C++20
+
+ template<random_access_range R, class Proj = identity,
+ indirect_strict_weak_order<projected<iterator_t<R>, Proj>> Comp = ranges::less>
+ constexpr bool is_heap(R&& r, Comp comp = {}, Proj proj = {}); // Since C++20
+
+ template<random_access_iterator I, sentinel_for<I> S, class Proj = identity,
+ indirect_strict_weak_order<projected<I, Proj>> Comp = ranges::less>
+ constexpr I is_heap_until(I first, S last, Comp comp = {}, Proj proj = {}); // Since C++20
+
+ template<random_access_range R, class Proj = identity,
+ indirect_strict_weak_order<projected<iterator_t<R>, Proj>> Comp = ranges::less>
+ constexpr borrowed_iterator_t<R>
+ is_heap_until(R&& r, Comp comp = {}, Proj proj = {}); // Since C++20
+
template<bidirectional_iterator I, sentinel_for<I> S>
requires permutable<I>
constexpr I ranges::reverse(I first, S last); // since C++20
@@ -380,6 +397,18 @@ namespace ranges {
template<class T, output_iterator<const T&> O>
constexpr O ranges::fill_n(O first, iter_difference_t<O> n, const T& value); // since C++20
+ template<input_or_output_iterator O, sentinel_for<O> S, copy_constructible F>
+ requires invocable<F&> && indirectly_writable<O, invoke_result_t<F&>>
+ constexpr O generate(O first, S last, F gen); // Since C++20
+
+ template<class R, copy_constructible F>
+ requires invocable<F&> && output_range<R, invoke_result_t<F&>>
+ constexpr borrowed_iterator_t<R> generate(R&& r, F gen); // Since C++20
+
+ template<input_or_output_iterator O, copy_constructible F>
+ requires invocable<F&> && indirectly_writable<O, invoke_result_t<F&>>
+ constexpr O generate_n(O first, iter_difference_t<O> n, F gen); // Since C++20
+
template<input_iterator I1, sentinel_for<I1> S1, input_iterator I2, sentinel_for<I2> S2,
class Pred = ranges::equal_to, class Proj1 = identity, class Proj2 = identity>
requires indirectly_comparable<I1, I2, Pred, Proj1, Proj2>
@@ -1575,7 +1604,11 @@ template <class BidirectionalIterator, class Compare>
#include <__algorithm/ranges_find_if_not.h>
#include <__algorithm/ranges_for_each.h>
#include <__algorithm/ranges_for_each_n.h>
+#include <__algorithm/ranges_generate.h>
+#include <__algorithm/ranges_generate_n.h>
#include <__algorithm/ranges_includes.h>
+#include <__algorithm/ranges_is_heap.h>
+#include <__algorithm/ranges_is_heap_until.h>
#include <__algorithm/ranges_is_partitioned.h>
#include <__algorithm/ranges_is_sorted.h>
#include <__algorithm/ranges_is_sorted_until.h>
diff --git a/contrib/llvm-project/libcxx/include/charconv b/contrib/llvm-project/libcxx/include/charconv
index 9f474ae711f3..4f00755a83bb 100644
--- a/contrib/llvm-project/libcxx/include/charconv
+++ b/contrib/llvm-project/libcxx/include/charconv
@@ -88,6 +88,7 @@ namespace std {
#include <__config>
#include <__debug>
#include <__errc>
+#include <__type_traits/make_32_64_or_128_bit.h>
#include <__utility/unreachable.h>
#include <cmath> // for log2f
#include <cstdint>
diff --git a/contrib/llvm-project/libcxx/include/forward_list b/contrib/llvm-project/libcxx/include/forward_list
index aab3b8715d01..ee90aaa4771f 100644
--- a/contrib/llvm-project/libcxx/include/forward_list
+++ b/contrib/llvm-project/libcxx/include/forward_list
@@ -188,6 +188,7 @@ template <class T, class Allocator, class Predicate>
#include <__iterator/iterator_traits.h>
#include <__iterator/move_iterator.h>
#include <__iterator/next.h>
+#include <__memory/swap_allocator.h>
#include <__utility/forward.h>
#include <limits>
#include <memory>
diff --git a/contrib/llvm-project/libcxx/include/list b/contrib/llvm-project/libcxx/include/list
index 1db29d14b842..5fcbd67c6703 100644
--- a/contrib/llvm-project/libcxx/include/list
+++ b/contrib/llvm-project/libcxx/include/list
@@ -194,6 +194,7 @@ template <class T, class Allocator, class Predicate>
#include <__iterator/next.h>
#include <__iterator/prev.h>
#include <__iterator/reverse_iterator.h>
+#include <__memory/swap_allocator.h>
#include <__utility/forward.h>
#include <__utility/move.h>
#include <__utility/swap.h>
diff --git a/contrib/llvm-project/libcxx/include/math.h b/contrib/llvm-project/libcxx/include/math.h
index 7d553e728d0f..0ec584af9dbe 100644
--- a/contrib/llvm-project/libcxx/include/math.h
+++ b/contrib/llvm-project/libcxx/include/math.h
@@ -305,6 +305,7 @@ long double truncl(long double x);
// back to C++ linkage before including these C++ headers.
extern "C++" {
+#include <__type_traits/promote.h>
#include <limits>
#include <stdlib.h>
#include <type_traits>
diff --git a/contrib/llvm-project/libcxx/include/memory b/contrib/llvm-project/libcxx/include/memory
index ec9f5773929f..56f8159fbd44 100644
--- a/contrib/llvm-project/libcxx/include/memory
+++ b/contrib/llvm-project/libcxx/include/memory
@@ -885,93 +885,6 @@ template<size_t N, class T>
_LIBCPP_BEGIN_NAMESPACE_STD
-template <class _Alloc, class _Ptr>
-_LIBCPP_INLINE_VISIBILITY
-void __construct_forward_with_exception_guarantees(_Alloc& __a, _Ptr __begin1, _Ptr __end1, _Ptr& __begin2) {
- static_assert(__is_cpp17_move_insertable<_Alloc>::value,
- "The specified type does not meet the requirements of Cpp17MoveInsertable");
- typedef allocator_traits<_Alloc> _Traits;
- for (; __begin1 != __end1; ++__begin1, (void)++__begin2) {
- _Traits::construct(__a, _VSTD::__to_address(__begin2),
-#ifdef _LIBCPP_NO_EXCEPTIONS
- _VSTD::move(*__begin1)
-#else
- _VSTD::move_if_noexcept(*__begin1)
-#endif
- );
- }
-}
-
-template <class _Alloc, class _Tp, typename enable_if<
- (__is_default_allocator<_Alloc>::value || !__has_construct<_Alloc, _Tp*, _Tp>::value) &&
- is_trivially_move_constructible<_Tp>::value
->::type>
-_LIBCPP_INLINE_VISIBILITY
-void __construct_forward_with_exception_guarantees(_Alloc&, _Tp* __begin1, _Tp* __end1, _Tp*& __begin2) {
- ptrdiff_t _Np = __end1 - __begin1;
- if (_Np > 0) {
- _VSTD::memcpy(__begin2, __begin1, _Np * sizeof(_Tp));
- __begin2 += _Np;
- }
-}
-
-template <class _Alloc, class _Iter, class _Ptr>
-_LIBCPP_INLINE_VISIBILITY
-void __construct_range_forward(_Alloc& __a, _Iter __begin1, _Iter __end1, _Ptr& __begin2) {
- typedef allocator_traits<_Alloc> _Traits;
- for (; __begin1 != __end1; ++__begin1, (void) ++__begin2) {
- _Traits::construct(__a, _VSTD::__to_address(__begin2), *__begin1);
- }
-}
-
-template <class _Alloc, class _Source, class _Dest,
- class _RawSource = typename remove_const<_Source>::type,
- class _RawDest = typename remove_const<_Dest>::type,
- class =
- typename enable_if<
- is_trivially_copy_constructible<_Dest>::value &&
- is_same<_RawSource, _RawDest>::value &&
- (__is_default_allocator<_Alloc>::value || !__has_construct<_Alloc, _Dest*, _Source&>::value)
- >::type>
-_LIBCPP_INLINE_VISIBILITY
-void __construct_range_forward(_Alloc&, _Source* __begin1, _Source* __end1, _Dest*& __begin2) {
- ptrdiff_t _Np = __end1 - __begin1;
- if (_Np > 0) {
- _VSTD::memcpy(const_cast<_RawDest*>(__begin2), __begin1, _Np * sizeof(_Dest));
- __begin2 += _Np;
- }
-}
-
-template <class _Alloc, class _Ptr>
-_LIBCPP_INLINE_VISIBILITY
-void __construct_backward_with_exception_guarantees(_Alloc& __a, _Ptr __begin1, _Ptr __end1, _Ptr& __end2) {
- static_assert(__is_cpp17_move_insertable<_Alloc>::value,
- "The specified type does not meet the requirements of Cpp17MoveInsertable");
- typedef allocator_traits<_Alloc> _Traits;
- while (__end1 != __begin1) {
- _Traits::construct(__a, _VSTD::__to_address(__end2 - 1),
-#ifdef _LIBCPP_NO_EXCEPTIONS
- _VSTD::move(*--__end1)
-#else
- _VSTD::move_if_noexcept(*--__end1)
-#endif
- );
- --__end2;
- }
-}
-
-template <class _Alloc, class _Tp, class = typename enable_if<
- (__is_default_allocator<_Alloc>::value || !__has_construct<_Alloc, _Tp*, _Tp>::value) &&
- is_trivially_move_constructible<_Tp>::value
->::type>
-_LIBCPP_INLINE_VISIBILITY
-void __construct_backward_with_exception_guarantees(_Alloc&, _Tp* __begin1, _Tp* __end1, _Tp*& __end2) {
- ptrdiff_t _Np = __end1 - __begin1;
- __end2 -= _Np;
- if (_Np > 0)
- _VSTD::memcpy(static_cast<void*>(__end2), static_cast<void const*>(__begin1), _Np * sizeof(_Tp));
-}
-
struct __destruct_n
{
private:
@@ -1013,37 +926,6 @@ public:
_LIBCPP_FUNC_VIS void* align(size_t __align, size_t __sz, void*& __ptr, size_t& __space);
-// --- Helper for container swap --
-template <typename _Alloc>
-_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11
-void __swap_allocator(_Alloc & __a1, _Alloc & __a2, true_type)
-#if _LIBCPP_STD_VER > 11
- _NOEXCEPT
-#else
- _NOEXCEPT_(__is_nothrow_swappable<_Alloc>::value)
-#endif
-{
- using _VSTD::swap;
- swap(__a1, __a2);
-}
-
-template <typename _Alloc>
-inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11
-void __swap_allocator(_Alloc &, _Alloc &, false_type) _NOEXCEPT {}
-
-template <typename _Alloc>
-inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_AFTER_CXX11
-void __swap_allocator(_Alloc & __a1, _Alloc & __a2)
-#if _LIBCPP_STD_VER > 11
- _NOEXCEPT
-#else
- _NOEXCEPT_(__is_nothrow_swappable<_Alloc>::value)
-#endif
-{
- _VSTD::__swap_allocator(__a1, __a2,
- integral_constant<bool, allocator_traits<_Alloc>::propagate_on_container_swap::value>());
-}
-
template <typename _Alloc, typename _Traits=allocator_traits<_Alloc> >
struct __noexcept_move_assign_container : public integral_constant<bool,
_Traits::propagate_on_container_move_assignment::value
diff --git a/contrib/llvm-project/libcxx/include/module.modulemap.in b/contrib/llvm-project/libcxx/include/module.modulemap.in
index cbf0b4f7f16d..98485bcd93ab 100644
--- a/contrib/llvm-project/libcxx/include/module.modulemap.in
+++ b/contrib/llvm-project/libcxx/include/module.modulemap.in
@@ -832,6 +832,7 @@ module std [system] {
module ranges_uninitialized_algorithms { private header "__memory/ranges_uninitialized_algorithms.h" }
module raw_storage_iterator { private header "__memory/raw_storage_iterator.h" }
module shared_ptr { private header "__memory/shared_ptr.h" }
+ module swap_allocator { private header "__memory/swap_allocator.h" }
module temporary_buffer { private header "__memory/temporary_buffer.h" }
module uninitialized_algorithms { private header "__memory/uninitialized_algorithms.h" }
module unique_ptr { private header "__memory/unique_ptr.h" }
@@ -1090,10 +1091,16 @@ module std [system] {
module add_pointer { private header "__type_traits/add_pointer.h" }
module add_rvalue_reference { private header "__type_traits/add_rvalue_reference.h" }
module add_volatile { private header "__type_traits/add_volatile.h" }
+ module aligned_storage { private header "__type_traits/aligned_storage.h" }
+ module aligned_union { private header "__type_traits/aligned_union.h" }
module alignment_of { private header "__type_traits/alignment_of.h" }
module apply_cv { private header "__type_traits/apply_cv.h" }
+ module common_reference { private header "__type_traits/common_reference.h" }
+ module common_type { private header "__type_traits/common_type.h" }
module conditional { private header "__type_traits/conditional.h" }
module conjunction { private header "__type_traits/conjunction.h" }
+ module copy_cv { private header "__type_traits/copy_cv.h" }
+ module copy_cvref { private header "__type_traits/copy_cvref.h" }
module decay { private header "__type_traits/decay.h" }
module disjunction { private header "__type_traits/disjunction.h" }
module enable_if { private header "__type_traits/enable_if.h" }
@@ -1135,6 +1142,7 @@ module std [system] {
module is_move_constructible { private header "__type_traits/is_move_constructible.h" }
module is_nothrow_assignable { private header "__type_traits/is_nothrow_assignable.h" }
module is_nothrow_constructible { private header "__type_traits/is_nothrow_constructible.h" }
+ module is_nothrow_convertible { private header "__type_traits/is_nothrow_convertible.h" }
module is_nothrow_copy_assignable { private header "__type_traits/is_nothrow_copy_assignable.h" }
module is_nothrow_copy_constructible { private header "__type_traits/is_nothrow_copy_constructible.h" }
module is_nothrow_default_constructible { private header "__type_traits/is_nothrow_default_constructible.h" }
@@ -1146,6 +1154,7 @@ module std [system] {
module is_pod { private header "__type_traits/is_pod.h" }
module is_pointer { private header "__type_traits/is_pointer.h" }
module is_polymorphic { private header "__type_traits/is_polymorphic.h" }
+ module is_primary_template { private header "__type_traits/is_primary_template.h" }
module is_reference { private header "__type_traits/is_reference.h" }
module is_reference_wrapper { private header "__type_traits/is_reference_wrapper.h" }
module is_referenceable { private header "__type_traits/is_referenceable.h" }
@@ -1153,6 +1162,7 @@ module std [system] {
module is_scalar { private header "__type_traits/is_scalar.h" }
module is_scoped_enum { private header "__type_traits/is_scoped_enum.h" }
module is_signed { private header "__type_traits/is_signed.h" }
+ module is_signed_integer { private header "__type_traits/is_signed_integer.h" }
module is_standard_layout { private header "__type_traits/is_standard_layout.h" }
module is_trivial { private header "__type_traits/is_trivial.h" }
module is_trivially_assignable { private header "__type_traits/is_trivially_assignable.h" }
@@ -1167,18 +1177,28 @@ module std [system] {
module is_unbounded_array { private header "__type_traits/is_unbounded_array.h" }
module is_union { private header "__type_traits/is_union.h" }
module is_unsigned { private header "__type_traits/is_unsigned.h" }
+ module is_unsigned_integer { private header "__type_traits/is_unsigned_integer.h" }
+ module is_valid_expansion { private header "__type_traits/is_valid_expansion.h" }
module is_void { private header "__type_traits/is_void.h" }
module is_volatile { private header "__type_traits/is_volatile.h" }
+ module lazy { private header "__type_traits/lazy.h" }
+ module make_32_64_or_128_bit { private header "__type_traits/make_32_64_or_128_bit.h" }
+ module make_signed { private header "__type_traits/make_signed.h" }
+ module make_unsigned { private header "__type_traits/make_unsigned.h" }
+ module nat { private header "__type_traits/nat.h" }
module negation { private header "__type_traits/negation.h" }
+ module promote { private header "__type_traits/promote.h" }
module rank { private header "__type_traits/rank.h" }
module remove_all_extents { private header "__type_traits/remove_all_extents.h" }
module remove_const { private header "__type_traits/remove_const.h" }
module remove_cv { private header "__type_traits/remove_cv.h" }
+ module remove_cvref { private header "__type_traits/remove_cvref.h" }
module remove_extent { private header "__type_traits/remove_extent.h" }
module remove_pointer { private header "__type_traits/remove_pointer.h" }
module remove_reference { private header "__type_traits/remove_reference.h" }
module remove_volatile { private header "__type_traits/remove_volatile.h" }
module type_identity { private header "__type_traits/type_identity.h" }
+ module type_list { private header "__type_traits/type_list.h" }
module underlying_type { private header "__type_traits/underlying_type.h" }
module void_t { private header "__type_traits/void_t.h" }
}
diff --git a/contrib/llvm-project/libcxx/include/string b/contrib/llvm-project/libcxx/include/string
index bb169a82c9e7..3723dc8a3938 100644
--- a/contrib/llvm-project/libcxx/include/string
+++ b/contrib/llvm-project/libcxx/include/string
@@ -532,6 +532,7 @@ basic_string<char32_t> operator "" s( const char32_t *str, size_t len );
#include <__iterator/reverse_iterator.h>
#include <__iterator/wrap_iter.h>
#include <__memory/allocate_at_least.h>
+#include <__memory/swap_allocator.h>
#include <__string/char_traits.h>
#include <__string/extern_template_lists.h>
#include <__utility/auto_cast.h>
diff --git a/contrib/llvm-project/libcxx/include/type_traits b/contrib/llvm-project/libcxx/include/type_traits
index f7d81e65dd8e..3a086c595f92 100644
--- a/contrib/llvm-project/libcxx/include/type_traits
+++ b/contrib/llvm-project/libcxx/include/type_traits
@@ -425,8 +425,12 @@ namespace std
#include <__type_traits/add_pointer.h>
#include <__type_traits/add_rvalue_reference.h>
#include <__type_traits/add_volatile.h>
+#include <__type_traits/aligned_storage.h>
+#include <__type_traits/aligned_union.h>
#include <__type_traits/alignment_of.h>
#include <__type_traits/apply_cv.h>
+#include <__type_traits/common_reference.h>
+#include <__type_traits/common_type.h>
#include <__type_traits/conditional.h>
#include <__type_traits/conjunction.h>
#include <__type_traits/decay.h>
@@ -469,6 +473,7 @@ namespace std
#include <__type_traits/is_move_constructible.h>
#include <__type_traits/is_nothrow_assignable.h>
#include <__type_traits/is_nothrow_constructible.h>
+#include <__type_traits/is_nothrow_convertible.h>
#include <__type_traits/is_nothrow_copy_assignable.h>
#include <__type_traits/is_nothrow_copy_constructible.h>
#include <__type_traits/is_nothrow_default_constructible.h>
@@ -503,6 +508,8 @@ namespace std
#include <__type_traits/is_unsigned.h>
#include <__type_traits/is_void.h>
#include <__type_traits/is_volatile.h>
+#include <__type_traits/make_signed.h>
+#include <__type_traits/make_unsigned.h>
#include <__type_traits/negation.h>
#include <__type_traits/rank.h>
#include <__type_traits/remove_all_extents.h>
@@ -529,798 +536,18 @@ _LIBCPP_BEGIN_NAMESPACE_STD
template <class _T1, class _T2> struct _LIBCPP_TEMPLATE_VIS pair;
template <class _Tp> struct _LIBCPP_TEMPLATE_VIS hash;
-template <template <class...> class _Func, class ..._Args>
-struct _Lazy : _Func<_Args...> {};
-
// Member detector base
-template <template <class...> class _Templ, class ..._Args, class = _Templ<_Args...> >
-true_type __sfinae_test_impl(int);
-template <template <class...> class, class ...>
-false_type __sfinae_test_impl(...);
-
-template <template <class ...> class _Templ, class ..._Args>
-using _IsValidExpansion _LIBCPP_NODEBUG = decltype(__sfinae_test_impl<_Templ, _Args...>(0));
-
template <class _Tp, bool>
struct _LIBCPP_TEMPLATE_VIS __dependent_type : public _Tp {};
-// is_same
-
-template <class _Tp>
-using __test_for_primary_template = __enable_if_t<
- _IsSame<_Tp, typename _Tp::__primary_template>::value
- >;
-template <class _Tp>
-using __is_primary_template = _IsValidExpansion<
- __test_for_primary_template, _Tp
- >;
-
// is_integral
-// [basic.fundamental] defines five standard signed integer types;
-// __int128_t is an extended signed integer type.
-// The signed and unsigned integer types, plus bool and the
-// five types with "char" in their name, compose the "integral" types.
-
-template <class _Tp> struct __libcpp_is_signed_integer : public false_type {};
-template <> struct __libcpp_is_signed_integer<signed char> : public true_type {};
-template <> struct __libcpp_is_signed_integer<signed short> : public true_type {};
-template <> struct __libcpp_is_signed_integer<signed int> : public true_type {};
-template <> struct __libcpp_is_signed_integer<signed long> : public true_type {};
-template <> struct __libcpp_is_signed_integer<signed long long> : public true_type {};
-#ifndef _LIBCPP_HAS_NO_INT128
-template <> struct __libcpp_is_signed_integer<__int128_t> : public true_type {};
-#endif
-
-template <class _Tp> struct __libcpp_is_unsigned_integer : public false_type {};
-template <> struct __libcpp_is_unsigned_integer<unsigned char> : public true_type {};
-template <> struct __libcpp_is_unsigned_integer<unsigned short> : public true_type {};
-template <> struct __libcpp_is_unsigned_integer<unsigned int> : public true_type {};
-template <> struct __libcpp_is_unsigned_integer<unsigned long> : public true_type {};
-template <> struct __libcpp_is_unsigned_integer<unsigned long long> : public true_type {};
-#ifndef _LIBCPP_HAS_NO_INT128
-template <> struct __libcpp_is_unsigned_integer<__uint128_t> : public true_type {};
-#endif
-
template <class _Tp>
struct __unconstref {
typedef _LIBCPP_NODEBUG typename remove_const<typename remove_reference<_Tp>::type>::type type;
};
-template <class _Tp>
-using __uncvref_t _LIBCPP_NODEBUG = typename remove_cv<typename remove_reference<_Tp>::type>::type;
-
-// __is_same_uncvref
-
-template <class _Tp, class _Up>
-struct __is_same_uncvref : _IsSame<__uncvref_t<_Tp>, __uncvref_t<_Up> > {};
-
-#if _LIBCPP_STD_VER > 17
-// remove_cvref - same as __uncvref
-template <class _Tp>
-struct remove_cvref {
- using type _LIBCPP_NODEBUG = __uncvref_t<_Tp>;
-};
-
-template <class _Tp> using remove_cvref_t = typename remove_cvref<_Tp>::type;
-#endif
-
-// is_nothrow_convertible
-
-#if _LIBCPP_STD_VER > 17
-
-template <typename _Tp>
-static void __test_noexcept(_Tp) noexcept;
-
-template<typename _Fm, typename _To>
-static bool_constant<noexcept(_VSTD::__test_noexcept<_To>(declval<_Fm>()))>
-__is_nothrow_convertible_test();
-
-template <typename _Fm, typename _To>
-struct __is_nothrow_convertible_helper: decltype(__is_nothrow_convertible_test<_Fm, _To>())
-{ };
-
-template <typename _Fm, typename _To>
-struct is_nothrow_convertible : _Or<
- _And<is_void<_To>, is_void<_Fm>>,
- _Lazy<_And, is_convertible<_Fm, _To>, __is_nothrow_convertible_helper<_Fm, _To>>
->::type { };
-
-template <typename _Fm, typename _To>
-inline constexpr bool is_nothrow_convertible_v = is_nothrow_convertible<_Fm, _To>::value;
-
-#endif // _LIBCPP_STD_VER > 17
-
-// aligned_storage
-
-template <class _Hp, class _Tp>
-struct __type_list
-{
- typedef _Hp _Head;
- typedef _Tp _Tail;
-};
-
-template <class _Tp>
-struct __align_type
-{
- static const size_t value = _LIBCPP_PREFERRED_ALIGNOF(_Tp);
- typedef _Tp type;
-};
-
-struct __struct_double {long double __lx;};
-struct __struct_double4 {double __lx[4];};
-
-typedef
- __type_list<__align_type<unsigned char>,
- __type_list<__align_type<unsigned short>,
- __type_list<__align_type<unsigned int>,
- __type_list<__align_type<unsigned long>,
- __type_list<__align_type<unsigned long long>,
- __type_list<__align_type<double>,
- __type_list<__align_type<long double>,
- __type_list<__align_type<__struct_double>,
- __type_list<__align_type<__struct_double4>,
- __type_list<__align_type<int*>,
- __nat
- > > > > > > > > > > __all_types;
-
-template <size_t _Align>
-struct _ALIGNAS(_Align) __fallback_overaligned {};
-
-template <class _TL, size_t _Align> struct __find_pod;
-
-template <class _Hp, size_t _Align>
-struct __find_pod<__type_list<_Hp, __nat>, _Align>
-{
- typedef typename conditional<
- _Align == _Hp::value,
- typename _Hp::type,
- __fallback_overaligned<_Align>
- >::type type;
-};
-
-template <class _Hp, class _Tp, size_t _Align>
-struct __find_pod<__type_list<_Hp, _Tp>, _Align>
-{
- typedef typename conditional<
- _Align == _Hp::value,
- typename _Hp::type,
- typename __find_pod<_Tp, _Align>::type
- >::type type;
-};
-
-template <class _TL, size_t _Len> struct __find_max_align;
-
-template <class _Hp, size_t _Len>
-struct __find_max_align<__type_list<_Hp, __nat>, _Len> : public integral_constant<size_t, _Hp::value> {};
-
-template <size_t _Len, size_t _A1, size_t _A2>
-struct __select_align
-{
-private:
- static const size_t __min = _A2 < _A1 ? _A2 : _A1;
- static const size_t __max = _A1 < _A2 ? _A2 : _A1;
-public:
- static const size_t value = _Len < __max ? __min : __max;
-};
-
-template <class _Hp, class _Tp, size_t _Len>
-struct __find_max_align<__type_list<_Hp, _Tp>, _Len>
- : public integral_constant<size_t, __select_align<_Len, _Hp::value, __find_max_align<_Tp, _Len>::value>::value> {};
-
-template <size_t _Len, size_t _Align = __find_max_align<__all_types, _Len>::value>
-struct _LIBCPP_TEMPLATE_VIS aligned_storage
-{
- typedef typename __find_pod<__all_types, _Align>::type _Aligner;
- union type
- {
- _Aligner __align;
- unsigned char __data[(_Len + _Align - 1)/_Align * _Align];
- };
-};
-
-#if _LIBCPP_STD_VER > 11
-template <size_t _Len, size_t _Align = __find_max_align<__all_types, _Len>::value>
- using aligned_storage_t = typename aligned_storage<_Len, _Align>::type;
-#endif
-
-#define _CREATE_ALIGNED_STORAGE_SPECIALIZATION(n) \
-template <size_t _Len>\
-struct _LIBCPP_TEMPLATE_VIS aligned_storage<_Len, n>\
-{\
- struct _ALIGNAS(n) type\
- {\
- unsigned char __lx[(_Len + n - 1)/n * n];\
- };\
-}
-
-_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x1);
-_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x2);
-_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x4);
-_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x8);
-_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x10);
-_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x20);
-_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x40);
-_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x80);
-_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x100);
-_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x200);
-_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x400);
-_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x800);
-_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x1000);
-_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x2000);
-// PE/COFF does not support alignment beyond 8192 (=0x2000)
-#if !defined(_LIBCPP_OBJECT_FORMAT_COFF)
-_CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x4000);
-#endif // !defined(_LIBCPP_OBJECT_FORMAT_COFF)
-
-#undef _CREATE_ALIGNED_STORAGE_SPECIALIZATION
-
-
-// aligned_union
-
-template <size_t _I0, size_t ..._In>
-struct __static_max;
-
-template <size_t _I0>
-struct __static_max<_I0>
-{
- static const size_t value = _I0;
-};
-
-template <size_t _I0, size_t _I1, size_t ..._In>
-struct __static_max<_I0, _I1, _In...>
-{
- static const size_t value = _I0 >= _I1 ? __static_max<_I0, _In...>::value :
- __static_max<_I1, _In...>::value;
-};
-
-template <size_t _Len, class _Type0, class ..._Types>
-struct aligned_union
-{
- static const size_t alignment_value = __static_max<_LIBCPP_PREFERRED_ALIGNOF(_Type0),
- _LIBCPP_PREFERRED_ALIGNOF(_Types)...>::value;
- static const size_t __len = __static_max<_Len, sizeof(_Type0),
- sizeof(_Types)...>::value;
- typedef typename aligned_storage<__len, alignment_value>::type type;
-};
-
-#if _LIBCPP_STD_VER > 11
-template <size_t _Len, class ..._Types> using aligned_union_t = typename aligned_union<_Len, _Types...>::type;
-#endif
-
-template <class _Tp>
-struct __numeric_type
-{
- static void __test(...);
- static float __test(float);
- static double __test(char);
- static double __test(int);
- static double __test(unsigned);
- static double __test(long);
- static double __test(unsigned long);
- static double __test(long long);
- static double __test(unsigned long long);
- static double __test(double);
- static long double __test(long double);
-
- typedef decltype(__test(declval<_Tp>())) type;
- static const bool value = _IsNotSame<type, void>::value;
-};
-
-template <>
-struct __numeric_type<void>
-{
- static const bool value = true;
-};
-
-// __promote
-
-template <class _A1, class _A2 = void, class _A3 = void,
- bool = __numeric_type<_A1>::value &&
- __numeric_type<_A2>::value &&
- __numeric_type<_A3>::value>
-class __promote_imp
-{
-public:
- static const bool value = false;
-};
-
-template <class _A1, class _A2, class _A3>
-class __promote_imp<_A1, _A2, _A3, true>
-{
-private:
- typedef typename __promote_imp<_A1>::type __type1;
- typedef typename __promote_imp<_A2>::type __type2;
- typedef typename __promote_imp<_A3>::type __type3;
-public:
- typedef decltype(__type1() + __type2() + __type3()) type;
- static const bool value = true;
-};
-
-template <class _A1, class _A2>
-class __promote_imp<_A1, _A2, void, true>
-{
-private:
- typedef typename __promote_imp<_A1>::type __type1;
- typedef typename __promote_imp<_A2>::type __type2;
-public:
- typedef decltype(__type1() + __type2()) type;
- static const bool value = true;
-};
-
-template <class _A1>
-class __promote_imp<_A1, void, void, true>
-{
-public:
- typedef typename __numeric_type<_A1>::type type;
- static const bool value = true;
-};
-
-template <class _A1, class _A2 = void, class _A3 = void>
-class __promote : public __promote_imp<_A1, _A2, _A3> {};
-
-// make_signed / make_unsigned
-
-typedef
- __type_list<signed char,
- __type_list<signed short,
- __type_list<signed int,
- __type_list<signed long,
- __type_list<signed long long,
-#ifndef _LIBCPP_HAS_NO_INT128
- __type_list<__int128_t,
-#endif
- __nat
-#ifndef _LIBCPP_HAS_NO_INT128
- >
-#endif
- > > > > > __signed_types;
-
-typedef
- __type_list<unsigned char,
- __type_list<unsigned short,
- __type_list<unsigned int,
- __type_list<unsigned long,
- __type_list<unsigned long long,
-#ifndef _LIBCPP_HAS_NO_INT128
- __type_list<__uint128_t,
-#endif
- __nat
-#ifndef _LIBCPP_HAS_NO_INT128
- >
-#endif
- > > > > > __unsigned_types;
-
-template <class _TypeList, size_t _Size, bool = _Size <= sizeof(typename _TypeList::_Head)> struct __find_first;
-
-template <class _Hp, class _Tp, size_t _Size>
-struct __find_first<__type_list<_Hp, _Tp>, _Size, true>
-{
- typedef _LIBCPP_NODEBUG _Hp type;
-};
-
-template <class _Hp, class _Tp, size_t _Size>
-struct __find_first<__type_list<_Hp, _Tp>, _Size, false>
-{
- typedef _LIBCPP_NODEBUG typename __find_first<_Tp, _Size>::type type;
-};
-
-template <class _Tp, bool = is_integral<_Tp>::value || is_enum<_Tp>::value>
-struct __make_signed {};
-
-template <class _Tp>
-struct __make_signed<_Tp, true>
-{
- typedef typename __find_first<__signed_types, sizeof(_Tp)>::type type;
-};
-
-template <> struct __make_signed<bool, true> {};
-template <> struct __make_signed< signed short, true> {typedef short type;};
-template <> struct __make_signed<unsigned short, true> {typedef short type;};
-template <> struct __make_signed< signed int, true> {typedef int type;};
-template <> struct __make_signed<unsigned int, true> {typedef int type;};
-template <> struct __make_signed< signed long, true> {typedef long type;};
-template <> struct __make_signed<unsigned long, true> {typedef long type;};
-template <> struct __make_signed< signed long long, true> {typedef long long type;};
-template <> struct __make_signed<unsigned long long, true> {typedef long long type;};
-#ifndef _LIBCPP_HAS_NO_INT128
-template <> struct __make_signed<__int128_t, true> {typedef __int128_t type;};
-template <> struct __make_signed<__uint128_t, true> {typedef __int128_t type;};
-#endif
-
-template <class _Tp>
-struct _LIBCPP_TEMPLATE_VIS make_signed
-{
- typedef typename __apply_cv<_Tp, typename __make_signed<typename remove_cv<_Tp>::type>::type>::type type;
-};
-
-#if _LIBCPP_STD_VER > 11
-template <class _Tp> using make_signed_t = typename make_signed<_Tp>::type;
-#endif
-
-template <class _Tp, bool = is_integral<_Tp>::value || is_enum<_Tp>::value>
-struct __make_unsigned {};
-
-template <class _Tp>
-struct __make_unsigned<_Tp, true>
-{
- typedef typename __find_first<__unsigned_types, sizeof(_Tp)>::type type;
-};
-
-template <> struct __make_unsigned<bool, true> {};
-template <> struct __make_unsigned< signed short, true> {typedef unsigned short type;};
-template <> struct __make_unsigned<unsigned short, true> {typedef unsigned short type;};
-template <> struct __make_unsigned< signed int, true> {typedef unsigned int type;};
-template <> struct __make_unsigned<unsigned int, true> {typedef unsigned int type;};
-template <> struct __make_unsigned< signed long, true> {typedef unsigned long type;};
-template <> struct __make_unsigned<unsigned long, true> {typedef unsigned long type;};
-template <> struct __make_unsigned< signed long long, true> {typedef unsigned long long type;};
-template <> struct __make_unsigned<unsigned long long, true> {typedef unsigned long long type;};
-#ifndef _LIBCPP_HAS_NO_INT128
-template <> struct __make_unsigned<__int128_t, true> {typedef __uint128_t type;};
-template <> struct __make_unsigned<__uint128_t, true> {typedef __uint128_t type;};
-#endif
-
-template <class _Tp>
-struct _LIBCPP_TEMPLATE_VIS make_unsigned
-{
- typedef typename __apply_cv<_Tp, typename __make_unsigned<typename remove_cv<_Tp>::type>::type>::type type;
-};
-
-#if _LIBCPP_STD_VER > 11
-template <class _Tp> using make_unsigned_t = typename make_unsigned<_Tp>::type;
-#endif
-
-#ifndef _LIBCPP_CXX03_LANG
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI constexpr
-typename make_unsigned<_Tp>::type __to_unsigned_like(_Tp __x) noexcept {
- return static_cast<typename make_unsigned<_Tp>::type>(__x);
-}
-#endif
-
-template <class _Tp, class _Up>
-using __copy_unsigned_t = __conditional_t<is_unsigned<_Tp>::value, typename make_unsigned<_Up>::type, _Up>;
-
-/// Helper to promote an integral to smallest 32, 64, or 128 bit representation.
-///
-/// The restriction is the same as the integral version of to_char.
-template <class _Tp>
-#if _LIBCPP_STD_VER > 17
- requires (is_signed_v<_Tp> || is_unsigned_v<_Tp> || is_same_v<_Tp, char>)
-#endif
-using __make_32_64_or_128_bit_t =
- __copy_unsigned_t<_Tp,
- __conditional_t<sizeof(_Tp) <= sizeof(int32_t), int32_t,
- __conditional_t<sizeof(_Tp) <= sizeof(int64_t), int64_t,
-#ifndef _LIBCPP_HAS_NO_INT128
- __conditional_t<sizeof(_Tp) <= sizeof(__int128_t), __int128_t,
- /* else */ void>
-#else
- /* else */ void
-#endif
- > >
- >;
-
-#if _LIBCPP_STD_VER > 17
-// Let COND_RES(X, Y) be:
-template <class _Tp, class _Up>
-using __cond_type = decltype(false ? declval<_Tp>() : declval<_Up>());
-
-template <class _Tp, class _Up, class = void>
-struct __common_type3 {};
-
-// sub-bullet 4 - "if COND_RES(CREF(D1), CREF(D2)) denotes a type..."
-template <class _Tp, class _Up>
-struct __common_type3<_Tp, _Up, void_t<__cond_type<const _Tp&, const _Up&>>>
-{
- using type = remove_cvref_t<__cond_type<const _Tp&, const _Up&>>;
-};
-
-template <class _Tp, class _Up, class = void>
-struct __common_type2_imp : __common_type3<_Tp, _Up> {};
-#else
-template <class _Tp, class _Up, class = void>
-struct __common_type2_imp {};
-#endif
-
-// sub-bullet 3 - "if decay_t<decltype(false ? declval<D1>() : declval<D2>())> ..."
-template <class _Tp, class _Up>
-struct __common_type2_imp<_Tp, _Up,
- typename __void_t<decltype(
- true ? declval<_Tp>() : declval<_Up>()
- )>::type>
-{
- typedef _LIBCPP_NODEBUG typename decay<decltype(
- true ? declval<_Tp>() : declval<_Up>()
- )>::type type;
-};
-
-template <class, class = void>
-struct __common_type_impl {};
-
-// Clang provides variadic templates in C++03 as an extension.
-#if !defined(_LIBCPP_CXX03_LANG) || defined(__clang__)
-# define _LIBCPP_OPTIONAL_PACK(...) , __VA_ARGS__
-template <class... _Tp>
-struct __common_types;
-template <class... _Tp>
-struct _LIBCPP_TEMPLATE_VIS common_type;
-#else
-# define _LIBCPP_OPTIONAL_PACK(...)
-struct __no_arg;
-template <class _Tp, class _Up, class = __no_arg>
-struct __common_types;
-template <class _Tp = __no_arg, class _Up = __no_arg, class _Vp = __no_arg,
- class _Unused = __no_arg>
-struct common_type {
- static_assert(sizeof(_Unused) == 0,
- "common_type accepts at most 3 arguments in C++03");
-};
-#endif // _LIBCPP_CXX03_LANG
-
-template <class _Tp, class _Up>
-struct __common_type_impl<
- __common_types<_Tp, _Up>,
- typename __void_t<typename common_type<_Tp, _Up>::type>::type>
-{
- typedef typename common_type<_Tp, _Up>::type type;
-};
-
-template <class _Tp, class _Up, class _Vp _LIBCPP_OPTIONAL_PACK(class... _Rest)>
-struct __common_type_impl<
- __common_types<_Tp, _Up, _Vp _LIBCPP_OPTIONAL_PACK(_Rest...)>,
- typename __void_t<typename common_type<_Tp, _Up>::type>::type>
- : __common_type_impl<__common_types<typename common_type<_Tp, _Up>::type,
- _Vp _LIBCPP_OPTIONAL_PACK(_Rest...)> > {
-};
-
-// bullet 1 - sizeof...(Tp) == 0
-
-template <>
-struct _LIBCPP_TEMPLATE_VIS common_type<> {};
-
-// bullet 2 - sizeof...(Tp) == 1
-
-template <class _Tp>
-struct _LIBCPP_TEMPLATE_VIS common_type<_Tp>
- : public common_type<_Tp, _Tp> {};
-
-// bullet 3 - sizeof...(Tp) == 2
-
-// sub-bullet 1 - "If is_same_v<T1, D1> is false or ..."
-template <class _Tp, class _Up>
-struct _LIBCPP_TEMPLATE_VIS common_type<_Tp, _Up>
- : conditional<
- _IsSame<_Tp, typename decay<_Tp>::type>::value && _IsSame<_Up, typename decay<_Up>::type>::value,
- __common_type2_imp<_Tp, _Up>,
- common_type<typename decay<_Tp>::type, typename decay<_Up>::type>
- >::type
-{};
-
-// bullet 4 - sizeof...(Tp) > 2
-
-template <class _Tp, class _Up, class _Vp _LIBCPP_OPTIONAL_PACK(class... _Rest)>
-struct _LIBCPP_TEMPLATE_VIS
- common_type<_Tp, _Up, _Vp _LIBCPP_OPTIONAL_PACK(_Rest...)>
- : __common_type_impl<
- __common_types<_Tp, _Up, _Vp _LIBCPP_OPTIONAL_PACK(_Rest...)> > {};
-
-#undef _LIBCPP_OPTIONAL_PACK
-
-#if _LIBCPP_STD_VER > 11
-template <class ..._Tp> using common_type_t = typename common_type<_Tp...>::type;
-#endif
-
-// Let COPYCV(FROM, TO) be an alias for type TO with the addition of FROM's
-// top-level cv-qualifiers.
-template <class _From, class _To>
-struct __copy_cv
-{
- using type = _To;
-};
-
-template <class _From, class _To>
-struct __copy_cv<const _From, _To>
-{
- using type = typename add_const<_To>::type;
-};
-
-template <class _From, class _To>
-struct __copy_cv<volatile _From, _To>
-{
- using type = typename add_volatile<_To>::type;
-};
-
-template <class _From, class _To>
-struct __copy_cv<const volatile _From, _To>
-{
- using type = typename add_cv<_To>::type;
-};
-
-template <class _From, class _To>
-using __copy_cv_t = typename __copy_cv<_From, _To>::type;
-
-template <class _From, class _To>
-struct __copy_cvref
-{
- using type = __copy_cv_t<_From, _To>;
-};
-
-template <class _From, class _To>
-struct __copy_cvref<_From&, _To>
-{
- using type = typename add_lvalue_reference<__copy_cv_t<_From, _To> >::type;
-};
-
-template <class _From, class _To>
-struct __copy_cvref<_From&&, _To>
-{
- using type = typename add_rvalue_reference<__copy_cv_t<_From, _To> >::type;
-};
-
-template <class _From, class _To>
-using __copy_cvref_t = typename __copy_cvref<_From, _To>::type;
-
-
-// common_reference
-#if _LIBCPP_STD_VER > 17
-// Let COND_RES(X, Y) be:
-template <class _Xp, class _Yp>
-using __cond_res =
- decltype(false ? declval<_Xp(&)()>()() : declval<_Yp(&)()>()());
-
-// Let `XREF(A)` denote a unary alias template `T` such that `T<U>` denotes the same type as `U`
-// with the addition of `A`'s cv and reference qualifiers, for a non-reference cv-unqualified type
-// `U`.
-// [Note: `XREF(A)` is `__xref<A>::template __apply`]
-template <class _Tp>
-struct __xref {
- template<class _Up>
- using __apply = __copy_cvref_t<_Tp, _Up>;
-};
-
-// Given types A and B, let X be remove_reference_t<A>, let Y be remove_reference_t<B>,
-// and let COMMON-REF(A, B) be:
-template<class _Ap, class _Bp, class _Xp = remove_reference_t<_Ap>, class _Yp = remove_reference_t<_Bp>>
-struct __common_ref;
-
-template<class _Xp, class _Yp>
-using __common_ref_t = typename __common_ref<_Xp, _Yp>::__type;
-
-template<class _Xp, class _Yp>
-using __cv_cond_res = __cond_res<__copy_cv_t<_Xp, _Yp>&, __copy_cv_t<_Yp, _Xp>&>;
-
-
-// If A and B are both lvalue reference types, COMMON-REF(A, B) is
-// COND-RES(COPYCV(X, Y)&, COPYCV(Y, X)&) if that type exists and is a reference type.
-template<class _Ap, class _Bp, class _Xp, class _Yp>
-requires requires { typename __cv_cond_res<_Xp, _Yp>; } && is_reference_v<__cv_cond_res<_Xp, _Yp>>
-struct __common_ref<_Ap&, _Bp&, _Xp, _Yp>
-{
- using __type = __cv_cond_res<_Xp, _Yp>;
-};
-
-// Otherwise, let C be remove_reference_t<COMMON-REF(X&, Y&)>&&. ...
-template <class _Xp, class _Yp>
-using __common_ref_C = remove_reference_t<__common_ref_t<_Xp&, _Yp&>>&&;
-
-
-// .... If A and B are both rvalue reference types, C is well-formed, and
-// is_convertible_v<A, C> && is_convertible_v<B, C> is true, then COMMON-REF(A, B) is C.
-template<class _Ap, class _Bp, class _Xp, class _Yp>
-requires
- requires { typename __common_ref_C<_Xp, _Yp>; } &&
- is_convertible_v<_Ap&&, __common_ref_C<_Xp, _Yp>> &&
- is_convertible_v<_Bp&&, __common_ref_C<_Xp, _Yp>>
-struct __common_ref<_Ap&&, _Bp&&, _Xp, _Yp>
-{
- using __type = __common_ref_C<_Xp, _Yp>;
-};
-
-// Otherwise, let D be COMMON-REF(const X&, Y&). ...
-template <class _Tp, class _Up>
-using __common_ref_D = __common_ref_t<const _Tp&, _Up&>;
-
-// ... If A is an rvalue reference and B is an lvalue reference and D is well-formed and
-// is_convertible_v<A, D> is true, then COMMON-REF(A, B) is D.
-template<class _Ap, class _Bp, class _Xp, class _Yp>
-requires requires { typename __common_ref_D<_Xp, _Yp>; } &&
- is_convertible_v<_Ap&&, __common_ref_D<_Xp, _Yp>>
-struct __common_ref<_Ap&&, _Bp&, _Xp, _Yp>
-{
- using __type = __common_ref_D<_Xp, _Yp>;
-};
-
-// Otherwise, if A is an lvalue reference and B is an rvalue reference, then
-// COMMON-REF(A, B) is COMMON-REF(B, A).
-template<class _Ap, class _Bp, class _Xp, class _Yp>
-struct __common_ref<_Ap&, _Bp&&, _Xp, _Yp> : __common_ref<_Bp&&, _Ap&> {};
-
-// Otherwise, COMMON-REF(A, B) is ill-formed.
-template<class _Ap, class _Bp, class _Xp, class _Yp>
-struct __common_ref {};
-
-// Note C: For the common_reference trait applied to a parameter pack [...]
-
-template <class...>
-struct common_reference;
-
-template <class... _Types>
-using common_reference_t = typename common_reference<_Types...>::type;
-
-// bullet 1 - sizeof...(T) == 0
-template<>
-struct common_reference<> {};
-
-// bullet 2 - sizeof...(T) == 1
-template <class _Tp>
-struct common_reference<_Tp>
-{
- using type = _Tp;
-};
-
-// bullet 3 - sizeof...(T) == 2
-template <class _Tp, class _Up> struct __common_reference_sub_bullet3;
-template <class _Tp, class _Up> struct __common_reference_sub_bullet2 : __common_reference_sub_bullet3<_Tp, _Up> {};
-template <class _Tp, class _Up> struct __common_reference_sub_bullet1 : __common_reference_sub_bullet2<_Tp, _Up> {};
-
-// sub-bullet 1 - If T1 and T2 are reference types and COMMON-REF(T1, T2) is well-formed, then
-// the member typedef `type` denotes that type.
-template <class _Tp, class _Up> struct common_reference<_Tp, _Up> : __common_reference_sub_bullet1<_Tp, _Up> {};
-
-template <class _Tp, class _Up>
-requires is_reference_v<_Tp> && is_reference_v<_Up> && requires { typename __common_ref_t<_Tp, _Up>; }
-struct __common_reference_sub_bullet1<_Tp, _Up>
-{
- using type = __common_ref_t<_Tp, _Up>;
-};
-
-// sub-bullet 2 - Otherwise, if basic_common_reference<remove_cvref_t<T1>, remove_cvref_t<T2>, XREF(T1), XREF(T2)>::type
-// is well-formed, then the member typedef `type` denotes that type.
-template <class, class, template <class> class, template <class> class> struct basic_common_reference {};
-
-template <class _Tp, class _Up>
-using __basic_common_reference_t = typename basic_common_reference<
- remove_cvref_t<_Tp>, remove_cvref_t<_Up>,
- __xref<_Tp>::template __apply, __xref<_Up>::template __apply>::type;
-
-template <class _Tp, class _Up>
-requires requires { typename __basic_common_reference_t<_Tp, _Up>; }
-struct __common_reference_sub_bullet2<_Tp, _Up>
-{
- using type = __basic_common_reference_t<_Tp, _Up>;
-};
-
-// sub-bullet 3 - Otherwise, if COND-RES(T1, T2) is well-formed,
-// then the member typedef `type` denotes that type.
-template <class _Tp, class _Up>
-requires requires { typename __cond_res<_Tp, _Up>; }
-struct __common_reference_sub_bullet3<_Tp, _Up>
-{
- using type = __cond_res<_Tp, _Up>;
-};
-
-
-// sub-bullet 4 & 5 - Otherwise, if common_type_t<T1, T2> is well-formed,
-// then the member typedef `type` denotes that type.
-// - Otherwise, there shall be no member `type`.
-template <class _Tp, class _Up> struct __common_reference_sub_bullet3 : common_type<_Tp, _Up> {};
-
-// bullet 4 - If there is such a type `C`, the member typedef type shall denote the same type, if
-// any, as `common_reference_t<C, Rest...>`.
-template <class _Tp, class _Up, class _Vp, class... _Rest>
-requires requires { typename common_reference_t<_Tp, _Up>; }
-struct common_reference<_Tp, _Up, _Vp, _Rest...>
- : common_reference<common_reference_t<_Tp, _Up>, _Vp, _Rest...>
-{};
-
-// bullet 5 - Otherwise, there shall be no member `type`.
-template <class...> struct common_reference {};
-
-#endif // _LIBCPP_STD_VER > 17
-
#ifndef _LIBCPP_CXX03_LANG
// First of all, we can't implement this check in C++03 mode because the {}
// default initialization syntax isn't valid.
diff --git a/contrib/llvm-project/libcxx/include/vector b/contrib/llvm-project/libcxx/include/vector
index 14f586c9bfd7..30030f85e43c 100644
--- a/contrib/llvm-project/libcxx/include/vector
+++ b/contrib/llvm-project/libcxx/include/vector
@@ -291,6 +291,8 @@ erase_if(vector<T, Allocator>& c, Predicate pred); // C++20
#include <__iterator/reverse_iterator.h>
#include <__iterator/wrap_iter.h>
#include <__memory/allocate_at_least.h>
+#include <__memory/pointer_traits.h>
+#include <__memory/swap_allocator.h>
#include <__split_buffer>
#include <__utility/forward.h>
#include <__utility/move.h>
@@ -895,9 +897,11 @@ template <class _Tp, class _Allocator>
void
vector<_Tp, _Allocator>::__swap_out_circular_buffer(__split_buffer<value_type, allocator_type&>& __v)
{
-
__annotate_delete();
- _VSTD::__construct_backward_with_exception_guarantees(this->__alloc(), this->__begin_, this->__end_, __v.__begin_);
+ using _RevIter = std::reverse_iterator<pointer>;
+ __v.__begin_ = std::__uninitialized_allocator_move_if_noexcept(
+ __alloc(), _RevIter(__end_), _RevIter(__begin_), _RevIter(__v.__begin_))
+ .base();
_VSTD::swap(this->__begin_, __v.__begin_);
_VSTD::swap(this->__end_, __v.__end_);
_VSTD::swap(this->__end_cap(), __v.__end_cap());
@@ -912,8 +916,11 @@ vector<_Tp, _Allocator>::__swap_out_circular_buffer(__split_buffer<value_type, a
{
__annotate_delete();
pointer __r = __v.__begin_;
- _VSTD::__construct_backward_with_exception_guarantees(this->__alloc(), this->__begin_, __p, __v.__begin_);
- _VSTD::__construct_forward_with_exception_guarantees(this->__alloc(), __p, this->__end_, __v.__end_);
+ using _RevIter = std::reverse_iterator<pointer>;
+ __v.__begin_ = std::__uninitialized_allocator_move_if_noexcept(
+ __alloc(), _RevIter(__p), _RevIter(__begin_), _RevIter(__v.__begin_))
+ .base();
+ __v.__end_ = std::__uninitialized_allocator_move_if_noexcept(__alloc(), __p, __end_, __v.__end_);
_VSTD::swap(this->__begin_, __v.__begin_);
_VSTD::swap(this->__end_, __v.__end_);
_VSTD::swap(this->__end_cap(), __v.__end_cap());
@@ -1001,8 +1008,8 @@ typename enable_if
>::type
vector<_Tp, _Allocator>::__construct_at_end(_ForwardIterator __first, _ForwardIterator __last, size_type __n)
{
- _ConstructTransaction __tx(*this, __n);
- _VSTD::__construct_range_forward(this->__alloc(), __first, __last, __tx.__pos_);
+ _ConstructTransaction __tx(*this, __n);
+ __tx.__pos_ = std::__uninitialized_allocator_copy(__alloc(), __first, __last, __tx.__pos_);
}
// Default constructs __n objects starting at __end_
diff --git a/contrib/llvm-project/libcxx/src/assert.cpp b/contrib/llvm-project/libcxx/src/assert.cpp
index 54459800728b..c218645f1771 100644
--- a/contrib/llvm-project/libcxx/src/assert.cpp
+++ b/contrib/llvm-project/libcxx/src/assert.cpp
@@ -8,14 +8,57 @@
#include <__assert>
#include <__config>
+#include <cstdarg>
#include <cstdio>
#include <cstdlib>
+#ifdef __BIONIC__
+# include <android/api-level.h>
+# include <syslog.h>
+extern "C" void android_set_abort_message(const char* msg);
+#endif
+
+#if defined(__APPLE__) && __has_include(<CrashReporterClient.h>)
+# include <CrashReporterClient.h>
+#endif
+
_LIBCPP_BEGIN_NAMESPACE_STD
_LIBCPP_WEAK
-void __libcpp_assertion_handler(char const* __file, int __line, char const* __expression, char const* __message) {
- std::fprintf(stderr, "%s:%d: libc++ assertion '%s' failed. %s\n", __file, __line, __expression, __message);
+void __libcpp_assertion_handler(char const* format, ...) {
+ // Write message to stderr. We do this before formatting into a
+ // buffer so that we still get some information out if that fails.
+ {
+ va_list list;
+ va_start(list, format);
+ std::vfprintf(stderr, format, list);
+ va_end(list);
+ }
+
+ // Format the arguments into an allocated buffer for CrashReport & friends.
+ // We leak the buffer on purpose, since we're about to abort() anyway.
+ char* buffer; (void)buffer;
+ va_list list;
+ va_start(list, format);
+
+#if defined(__APPLE__) && __has_include(<CrashReporterClient.h>)
+ // Note that we should technically synchronize accesses here (by e.g. taking a lock),
+ // however concretely we're only setting a pointer, so the likelihood of a race here
+ // is low.
+ vasprintf(&buffer, format, list);
+ CRSetCrashLogMessage(buffer);
+#elif defined(__BIONIC__)
+ // Show error in tombstone.
+ vasprintf(&buffer, format, list);
+ android_set_abort_message(buffer);
+
+ // Show error in logcat.
+ openlog("libc++", 0, 0);
+ syslog(LOG_CRIT, "%s", buffer);
+ closelog();
+#endif
+ va_end(list);
+
std::abort();
}
diff --git a/contrib/llvm-project/lld/ELF/Driver.cpp b/contrib/llvm-project/lld/ELF/Driver.cpp
index dd17adc4dbea..6c0fd3139e87 100644
--- a/contrib/llvm-project/lld/ELF/Driver.cpp
+++ b/contrib/llvm-project/lld/ELF/Driver.cpp
@@ -290,7 +290,7 @@ void LinkerDriver::addFile(StringRef path, bool withLOption) {
// Add a given library by searching it from input search paths.
void LinkerDriver::addLibrary(StringRef name) {
if (Optional<std::string> path = searchLibrary(name))
- addFile(*path, /*withLOption=*/true);
+ addFile(saver().save(*path), /*withLOption=*/true);
else
error("unable to find library -l" + name, ErrorTag::LibNotFound, {name});
}
@@ -809,13 +809,10 @@ static OrphanHandlingPolicy getOrphanHandling(opt::InputArgList &args) {
// --build-id=sha1 are actually tree hashes for performance reasons.
static std::pair<BuildIdKind, std::vector<uint8_t>>
getBuildId(opt::InputArgList &args) {
- auto *arg = args.getLastArg(OPT_build_id, OPT_build_id_eq);
+ auto *arg = args.getLastArg(OPT_build_id);
if (!arg)
return {BuildIdKind::None, {}};
- if (arg->getOption().getID() == OPT_build_id)
- return {BuildIdKind::Fast, {}};
-
StringRef s = arg->getValue();
if (s == "fast")
return {BuildIdKind::Fast, {}};
@@ -1691,8 +1688,10 @@ void LinkerDriver::inferMachineType() {
static uint64_t getMaxPageSize(opt::InputArgList &args) {
uint64_t val = args::getZOptionValue(args, OPT_z, "max-page-size",
target->defaultMaxPageSize);
- if (!isPowerOf2_64(val))
+ if (!isPowerOf2_64(val)) {
error("max-page-size: value isn't a power of 2");
+ return target->defaultMaxPageSize;
+ }
if (config->nmagic || config->omagic) {
if (val != target->defaultMaxPageSize)
warn("-z max-page-size set, but paging disabled by omagic or nmagic");
@@ -1706,8 +1705,10 @@ static uint64_t getMaxPageSize(opt::InputArgList &args) {
static uint64_t getCommonPageSize(opt::InputArgList &args) {
uint64_t val = args::getZOptionValue(args, OPT_z, "common-page-size",
target->defaultCommonPageSize);
- if (!isPowerOf2_64(val))
+ if (!isPowerOf2_64(val)) {
error("common-page-size: value isn't a power of 2");
+ return target->defaultCommonPageSize;
+ }
if (config->nmagic || config->omagic) {
if (val != target->defaultCommonPageSize)
warn("-z common-page-size set, but paging disabled by omagic or nmagic");
diff --git a/contrib/llvm-project/lld/ELF/DriverUtils.cpp b/contrib/llvm-project/lld/ELF/DriverUtils.cpp
index 54e4f9374e61..51f3dc3a056e 100644
--- a/contrib/llvm-project/lld/ELF/DriverUtils.cpp
+++ b/contrib/llvm-project/lld/ELF/DriverUtils.cpp
@@ -52,23 +52,16 @@ ELFOptTable::ELFOptTable() : OptTable(optInfo) {}
// Set color diagnostics according to --color-diagnostics={auto,always,never}
// or --no-color-diagnostics flags.
static void handleColorDiagnostics(opt::InputArgList &args) {
- auto *arg = args.getLastArg(OPT_color_diagnostics, OPT_color_diagnostics_eq,
- OPT_no_color_diagnostics);
+ auto *arg = args.getLastArg(OPT_color_diagnostics);
if (!arg)
return;
- if (arg->getOption().getID() == OPT_color_diagnostics) {
+ StringRef s = arg->getValue();
+ if (s == "always")
lld::errs().enable_colors(true);
- } else if (arg->getOption().getID() == OPT_no_color_diagnostics) {
+ else if (s == "never")
lld::errs().enable_colors(false);
- } else {
- StringRef s = arg->getValue();
- if (s == "always")
- lld::errs().enable_colors(true);
- else if (s == "never")
- lld::errs().enable_colors(false);
- else if (s != "auto")
- error("unknown option: --color-diagnostics=" + s);
- }
+ else if (s != "auto")
+ error("unknown option: --color-diagnostics=" + s);
}
static cl::TokenizerCallback getQuotingStyle(opt::InputArgList &args) {
@@ -187,6 +180,7 @@ std::string elf::createResponseFile(const opt::InputArgList &args) {
break;
case OPT_call_graph_ordering_file:
case OPT_dynamic_list:
+ case OPT_export_dynamic_symbol_list:
case OPT_just_symbols:
case OPT_library_path:
case OPT_retain_symbols_file:
diff --git a/contrib/llvm-project/lld/ELF/InputFiles.cpp b/contrib/llvm-project/lld/ELF/InputFiles.cpp
index c0076a3722fe..927dc272b532 100644
--- a/contrib/llvm-project/lld/ELF/InputFiles.cpp
+++ b/contrib/llvm-project/lld/ELF/InputFiles.cpp
@@ -460,9 +460,9 @@ static void addDependentLibrary(StringRef specifier, const InputFile *f) {
if (!config->dependentLibraries)
return;
if (Optional<std::string> s = searchLibraryBaseName(specifier))
- driver->addFile(*s, /*withLOption=*/true);
+ driver->addFile(saver().save(*s), /*withLOption=*/true);
else if (Optional<std::string> s = findFromSearchPaths(specifier))
- driver->addFile(*s, /*withLOption=*/true);
+ driver->addFile(saver().save(*s), /*withLOption=*/true);
else if (fs::exists(specifier))
driver->addFile(specifier, /*withLOption=*/false);
else
diff --git a/contrib/llvm-project/lld/ELF/LinkerScript.cpp b/contrib/llvm-project/lld/ELF/LinkerScript.cpp
index 3be42904b7fe..9f30117c0279 100644
--- a/contrib/llvm-project/lld/ELF/LinkerScript.cpp
+++ b/contrib/llvm-project/lld/ELF/LinkerScript.cpp
@@ -112,9 +112,9 @@ static StringRef getOutputSectionName(const InputSectionBase *s) {
uint64_t ExprValue::getValue() const {
if (sec)
- return alignTo(sec->getOutputSection()->addr + sec->getOffset(val),
- alignment);
- return alignTo(val, alignment);
+ return alignToPowerOf2(sec->getOutputSection()->addr + sec->getOffset(val),
+ alignment);
+ return alignToPowerOf2(val, alignment);
}
uint64_t ExprValue::getSecAddr() const {
@@ -989,7 +989,7 @@ void LinkerScript::assignOffsets(OutputSection *sec) {
// sec->alignment is the max of ALIGN and the maximum of input
// section alignments.
const uint64_t pos = dot;
- dot = alignTo(dot, sec->alignment);
+ dot = alignToPowerOf2(dot, sec->alignment);
sec->addr = dot;
expandMemoryRegions(dot - pos);
}
@@ -1003,7 +1003,7 @@ void LinkerScript::assignOffsets(OutputSection *sec) {
if (sec->lmaExpr) {
ctx->lmaOffset = sec->lmaExpr().getValue() - dot;
} else if (MemoryRegion *mr = sec->lmaRegion) {
- uint64_t lmaStart = alignTo(mr->curPos, sec->alignment);
+ uint64_t lmaStart = alignToPowerOf2(mr->curPos, sec->alignment);
if (mr->curPos < lmaStart)
expandMemoryRegion(mr, lmaStart - mr->curPos, sec->name);
ctx->lmaOffset = lmaStart - dot;
@@ -1046,7 +1046,7 @@ void LinkerScript::assignOffsets(OutputSection *sec) {
for (InputSection *isec : cast<InputSectionDescription>(cmd)->sections) {
assert(isec->getParent() == sec);
const uint64_t pos = dot;
- dot = alignTo(dot, isec->alignment);
+ dot = alignToPowerOf2(dot, isec->alignment);
isec->outSecOff = dot - sec->addr;
dot += isec->getSize();
diff --git a/contrib/llvm-project/lld/ELF/Options.td b/contrib/llvm-project/lld/ELF/Options.td
index c98d21717de0..80c0ff9fe1b8 100644
--- a/contrib/llvm-project/lld/ELF/Options.td
+++ b/contrib/llvm-project/lld/ELF/Options.td
@@ -50,10 +50,9 @@ def Bdynamic: F<"Bdynamic">, HelpText<"Link against shared libraries (default)">
def Bstatic: F<"Bstatic">, HelpText<"Do not link against shared libraries">;
-def build_id: F<"build-id">, HelpText<"Alias for --build-id=fast">;
-
-def build_id_eq: J<"build-id=">, HelpText<"Generate build ID note">,
+def build_id: J<"build-id=">, HelpText<"Generate build ID note">,
MetaVarName<"[fast,md5,sha1,uuid,0x<hexstring>]">;
+def : F<"build-id">, Alias<build_id>, AliasArgs<["fast"]>, HelpText<"Alias for --build-id=fast">;
defm check_sections: B<"check-sections",
"Check section addresses for overlaps (default)",
@@ -119,12 +118,13 @@ defm call_graph_profile_sort: BB<"call-graph-profile-sort",
// --chroot doesn't have a help text because it is an internal option.
def chroot: Separate<["--"], "chroot">;
-defm color_diagnostics: BB<"color-diagnostics",
- "Alias for --color-diagnostics=always",
- "Alias for --color-diagnostics=never">;
-def color_diagnostics_eq: JJ<"color-diagnostics=">,
+def color_diagnostics: JJ<"color-diagnostics=">,
HelpText<"Use colors in diagnostics (default: auto)">,
MetaVarName<"[auto,always,never]">;
+def : Flag<["--"], "color-diagnostics">, Alias<color_diagnostics>, AliasArgs<["always"]>,
+ HelpText<"Alias for --color-diagnostics=always">;
+def : Flag<["--"], "no-color-diagnostics">, Alias<color_diagnostics>, AliasArgs<["never"]>,
+ HelpText<"Alias for --color-diagnostics=never">;
def cref: FF<"cref">,
HelpText<"Output cross reference table. If -Map is specified, print to the map file">;
diff --git a/contrib/llvm-project/lld/ELF/ScriptParser.cpp b/contrib/llvm-project/lld/ELF/ScriptParser.cpp
index 4d73541b3d42..7fc50b293b15 100644
--- a/contrib/llvm-project/lld/ELF/ScriptParser.cpp
+++ b/contrib/llvm-project/lld/ELF/ScriptParser.cpp
@@ -1392,7 +1392,7 @@ Expr ScriptParser::readPrimary() {
Expr e = readExpr();
if (consume(")")) {
e = checkAlignment(e, location);
- return [=] { return alignTo(script->getDot(), e().getValue()); };
+ return [=] { return alignToPowerOf2(script->getDot(), e().getValue()); };
}
expect(",");
Expr e2 = checkAlignment(readExpr(), location);
@@ -1423,7 +1423,8 @@ Expr ScriptParser::readPrimary() {
expect(")");
seenDataAlign = true;
return [=] {
- return alignTo(script->getDot(), std::max((uint64_t)1, e().getValue()));
+ uint64_t align = std::max(uint64_t(1), e().getValue());
+ return (script->getDot() + align - 1) & -align;
};
}
if (tok == "DATA_SEGMENT_END") {
@@ -1443,7 +1444,7 @@ Expr ScriptParser::readPrimary() {
expect(")");
seenRelroEnd = true;
Expr e = getPageSize();
- return [=] { return alignTo(script->getDot(), e().getValue()); };
+ return [=] { return alignToPowerOf2(script->getDot(), e().getValue()); };
}
if (tok == "DEFINED") {
StringRef name = unquote(readParenLiteral());
diff --git a/contrib/llvm-project/lld/ELF/SyntheticSections.cpp b/contrib/llvm-project/lld/ELF/SyntheticSections.cpp
index a0c5e6d04748..919afc7a6e0e 100644
--- a/contrib/llvm-project/lld/ELF/SyntheticSections.cpp
+++ b/contrib/llvm-project/lld/ELF/SyntheticSections.cpp
@@ -498,7 +498,7 @@ void EhFrameSection::iterateFDEWithLSDA(
static void writeCieFde(uint8_t *buf, ArrayRef<uint8_t> d) {
memcpy(buf, d.data(), d.size());
- size_t aligned = alignTo(d.size(), config->wordsize);
+ size_t aligned = alignToPowerOf2(d.size(), config->wordsize);
assert(std::all_of(buf + d.size(), buf + aligned,
[](uint8_t c) { return c == 0; }));
@@ -533,11 +533,11 @@ void EhFrameSection::finalizeContents() {
size_t off = 0;
for (CieRecord *rec : cieRecords) {
rec->cie->outputOff = off;
- off += alignTo(rec->cie->size, config->wordsize);
+ off += alignToPowerOf2(rec->cie->size, config->wordsize);
for (EhSectionPiece *fde : rec->fdes) {
fde->outputOff = off;
- off += alignTo(fde->size, config->wordsize);
+ off += alignToPowerOf2(fde->size, config->wordsize);
}
}
@@ -919,7 +919,7 @@ void MipsGotSection::build() {
for (SectionCommand *cmd : os->commands) {
if (auto *isd = dyn_cast<InputSectionDescription>(cmd))
for (InputSection *isec : isd->sections) {
- uint64_t off = alignTo(secSize, isec->alignment);
+ uint64_t off = alignToPowerOf2(secSize, isec->alignment);
secSize = off + isec->getSize();
}
}
@@ -3330,7 +3330,7 @@ void MergeNoTailSection::finalizeContents() {
for (size_t i = 0; i < numShards; ++i) {
shards[i].finalizeInOrder();
if (shards[i].getSize() > 0)
- off = alignTo(off, alignment);
+ off = alignToPowerOf2(off, alignment);
shardOffsets[i] = off;
off += shards[i].getSize();
}
@@ -3612,7 +3612,7 @@ InputSection *ThunkSection::getTargetInputSection() const {
bool ThunkSection::assignOffsets() {
uint64_t off = 0;
for (Thunk *t : thunks) {
- off = alignTo(off, t->alignment);
+ off = alignToPowerOf2(off, t->alignment);
t->setOffset(off);
uint32_t size = t->size();
t->getThunkTargetSym()->size = size;
diff --git a/contrib/llvm-project/lld/ELF/Writer.cpp b/contrib/llvm-project/lld/ELF/Writer.cpp
index 2994e79cd1de..c9345d812270 100644
--- a/contrib/llvm-project/lld/ELF/Writer.cpp
+++ b/contrib/llvm-project/lld/ELF/Writer.cpp
@@ -2483,7 +2483,7 @@ template <class ELFT> void Writer<ELFT>::fixSectionAlignments() {
(prev->p_flags & PF_X) != (p->p_flags & PF_X)) ||
cmd->type == SHT_LLVM_PART_EHDR)
cmd->addrExpr = [] {
- return alignTo(script->getDot(), config->maxPageSize);
+ return alignToPowerOf2(script->getDot(), config->maxPageSize);
};
// PT_TLS is at the start of the first RW PT_LOAD. If `p` includes PT_TLS,
// it must be the RW. Align to p_align(PT_TLS) to make sure
@@ -2500,13 +2500,13 @@ template <class ELFT> void Writer<ELFT>::fixSectionAlignments() {
// blocks correctly. We need to keep the workaround for a while.
else if (Out::tlsPhdr && Out::tlsPhdr->firstSec == p->firstSec)
cmd->addrExpr = [] {
- return alignTo(script->getDot(), config->maxPageSize) +
- alignTo(script->getDot() % config->maxPageSize,
- Out::tlsPhdr->p_align);
+ return alignToPowerOf2(script->getDot(), config->maxPageSize) +
+ alignToPowerOf2(script->getDot() % config->maxPageSize,
+ Out::tlsPhdr->p_align);
};
else
cmd->addrExpr = [] {
- return alignTo(script->getDot(), config->maxPageSize) +
+ return alignToPowerOf2(script->getDot(), config->maxPageSize) +
script->getDot() % config->maxPageSize;
};
}
@@ -2540,7 +2540,7 @@ static uint64_t computeFileOffset(OutputSection *os, uint64_t off) {
// If the section is not in a PT_LOAD, we just have to align it.
if (!os->ptLoad)
- return alignTo(off, os->alignment);
+ return alignToPowerOf2(off, os->alignment);
// If two sections share the same PT_LOAD the file offset is calculated
// using this formula: Off2 = Off1 + (VA2 - VA1).
@@ -2599,15 +2599,15 @@ template <class ELFT> void Writer<ELFT>::assignFileOffsets() {
// following section to avoid loading non-segments parts of the file.
if (config->zSeparate != SeparateSegmentKind::None && lastRX &&
lastRX->lastSec == sec)
- off = alignTo(off, config->maxPageSize);
+ off = alignToPowerOf2(off, config->maxPageSize);
}
for (OutputSection *osec : outputSections)
if (!(osec->flags & SHF_ALLOC)) {
- osec->offset = alignTo(off, osec->alignment);
+ osec->offset = alignToPowerOf2(off, osec->alignment);
off = osec->offset + osec->size;
}
- sectionHeaderOff = alignTo(off, config->wordsize);
+ sectionHeaderOff = alignToPowerOf2(off, config->wordsize);
fileSize = sectionHeaderOff + (outputSections.size() + 1) * sizeof(Elf_Shdr);
// Our logic assumes that sections have rising VA within the same segment.
@@ -2659,8 +2659,9 @@ template <class ELFT> void Writer<ELFT>::setPhdrs(Partition &part) {
// musl/glibc ld.so rounds the size down, so we need to round up
// to protect the last page. This is a no-op on FreeBSD which always
// rounds up.
- p->p_memsz = alignTo(p->p_offset + p->p_memsz, config->commonPageSize) -
- p->p_offset;
+ p->p_memsz =
+ alignToPowerOf2(p->p_offset + p->p_memsz, config->commonPageSize) -
+ p->p_offset;
}
}
}
@@ -2880,8 +2881,9 @@ template <class ELFT> void Writer<ELFT>::writeTrapInstr() {
if (p->p_type == PT_LOAD && (p->p_flags & PF_X))
fillTrap(Out::bufferStart +
alignDown(p->firstSec->offset + p->p_filesz, 4),
- Out::bufferStart + alignTo(p->firstSec->offset + p->p_filesz,
- config->maxPageSize));
+ Out::bufferStart +
+ alignToPowerOf2(p->firstSec->offset + p->p_filesz,
+ config->maxPageSize));
// Round up the file size of the last segment to the page boundary iff it is
// an executable segment to ensure that other tools don't accidentally
@@ -2893,7 +2895,7 @@ template <class ELFT> void Writer<ELFT>::writeTrapInstr() {
if (last && (last->p_flags & PF_X))
last->p_memsz = last->p_filesz =
- alignTo(last->p_filesz, config->maxPageSize);
+ alignToPowerOf2(last->p_filesz, config->maxPageSize);
}
}
diff --git a/contrib/llvm-project/lld/MachO/Driver.cpp b/contrib/llvm-project/lld/MachO/Driver.cpp
index 454708fad4ef..ce2d55bef456 100644
--- a/contrib/llvm-project/lld/MachO/Driver.cpp
+++ b/contrib/llvm-project/lld/MachO/Driver.cpp
@@ -266,7 +266,8 @@ static DenseMap<StringRef, ArchiveFileInfo> loadedArchives;
static InputFile *addFile(StringRef path, LoadType loadType,
bool isLazy = false, bool isExplicit = true,
- bool isBundleLoader = false) {
+ bool isBundleLoader = false,
+ bool isForceHidden = false) {
Optional<MemoryBufferRef> buffer = readFile(path);
if (!buffer)
return nullptr;
@@ -293,7 +294,7 @@ static InputFile *addFile(StringRef path, LoadType loadType,
if (!archive->isEmpty() && !archive->hasSymbolTable())
error(path + ": archive has no index; run ranlib to add one");
- file = make<ArchiveFile>(std::move(archive));
+ file = make<ArchiveFile>(std::move(archive), isForceHidden);
} else {
file = entry->second.file;
// Command-line loads take precedence. If file is previously loaded via
@@ -406,10 +407,12 @@ static InputFile *addFile(StringRef path, LoadType loadType,
}
static void addLibrary(StringRef name, bool isNeeded, bool isWeak,
- bool isReexport, bool isExplicit, LoadType loadType) {
+ bool isReexport, bool isHidden, bool isExplicit,
+ LoadType loadType) {
if (Optional<StringRef> path = findLibrary(name)) {
if (auto *dylibFile = dyn_cast_or_null<DylibFile>(
- addFile(*path, loadType, /*isLazy=*/false, isExplicit))) {
+ addFile(*path, loadType, /*isLazy=*/false, isExplicit,
+ /*isBundleLoader=*/false, isHidden))) {
if (isNeeded)
dylibFile->forceNeeded = true;
if (isWeak)
@@ -473,7 +476,7 @@ void macho::parseLCLinkerOption(InputFile *f, unsigned argc, StringRef data) {
StringRef arg = argv[i];
if (arg.consume_front("-l")) {
addLibrary(arg, /*isNeeded=*/false, /*isWeak=*/false,
- /*isReexport=*/false, /*isExplicit=*/false,
+ /*isReexport=*/false, /*isHidden=*/false, /*isExplicit=*/false,
LoadType::LCLinkerOption);
} else if (arg == "-framework") {
StringRef name = argv[++i];
@@ -1035,12 +1038,19 @@ static void createFiles(const InputArgList &args) {
case OPT_force_load:
addFile(rerootPath(arg->getValue()), LoadType::CommandLineForce);
break;
+ case OPT_load_hidden:
+ addFile(rerootPath(arg->getValue()), LoadType::CommandLine,
+ /*isLazy=*/false, /*isExplicit=*/true, /*isBundleLoader=*/false,
+ /*isForceHidden=*/true);
+ break;
case OPT_l:
case OPT_needed_l:
case OPT_reexport_l:
case OPT_weak_l:
+ case OPT_hidden_l:
addLibrary(arg->getValue(), opt.getID() == OPT_needed_l,
opt.getID() == OPT_weak_l, opt.getID() == OPT_reexport_l,
+ opt.getID() == OPT_hidden_l,
/*isExplicit=*/true, LoadType::CommandLine);
break;
case OPT_framework:
diff --git a/contrib/llvm-project/lld/MachO/DriverUtils.cpp b/contrib/llvm-project/lld/MachO/DriverUtils.cpp
index b52d5e851c62..d8e474d15cfd 100644
--- a/contrib/llvm-project/lld/MachO/DriverUtils.cpp
+++ b/contrib/llvm-project/lld/MachO/DriverUtils.cpp
@@ -150,6 +150,7 @@ std::string macho::createResponseFile(const InputArgList &args) {
break;
case OPT_force_load:
case OPT_weak_library:
+ case OPT_load_hidden:
os << arg->getSpelling() << " "
<< quote(rewriteInputPath(arg->getValue())) << "\n";
break;
diff --git a/contrib/llvm-project/lld/MachO/InputFiles.cpp b/contrib/llvm-project/lld/MachO/InputFiles.cpp
index e3bf553e5334..b463d7817594 100644
--- a/contrib/llvm-project/lld/MachO/InputFiles.cpp
+++ b/contrib/llvm-project/lld/MachO/InputFiles.cpp
@@ -768,7 +768,7 @@ void ObjFile::parseRelocations(ArrayRef<SectionHeader> sectionHeaders,
template <class NList>
static macho::Symbol *createDefined(const NList &sym, StringRef name,
InputSection *isec, uint64_t value,
- uint64_t size) {
+ uint64_t size, bool forceHidden) {
// Symbol scope is determined by sym.n_type & (N_EXT | N_PEXT):
// N_EXT: Global symbols. These go in the symbol table during the link,
// and also in the export table of the output so that the dynamic
@@ -787,7 +787,10 @@ static macho::Symbol *createDefined(const NList &sym, StringRef name,
(sym.n_desc & (N_WEAK_DEF | N_WEAK_REF)) == (N_WEAK_DEF | N_WEAK_REF);
if (sym.n_type & N_EXT) {
- bool isPrivateExtern = sym.n_type & N_PEXT;
+ // -load_hidden makes us treat global symbols as linkage unit scoped.
+ // Duplicates are reported but the symbol does not go in the export trie.
+ bool isPrivateExtern = sym.n_type & N_PEXT || forceHidden;
+
// lld's behavior for merging symbols is slightly different from ld64:
// ld64 picks the winning symbol based on several criteria (see
// pickBetweenRegularAtoms() in ld64's SymbolTable.cpp), while lld
@@ -844,11 +847,12 @@ static macho::Symbol *createDefined(const NList &sym, StringRef name,
// InputSection. They cannot be weak.
template <class NList>
static macho::Symbol *createAbsolute(const NList &sym, InputFile *file,
- StringRef name) {
+ StringRef name, bool forceHidden) {
if (sym.n_type & N_EXT) {
+ bool isPrivateExtern = sym.n_type & N_PEXT || forceHidden;
return symtab->addDefined(
name, file, nullptr, sym.n_value, /*size=*/0,
- /*isWeakDef=*/false, sym.n_type & N_PEXT, sym.n_desc & N_ARM_THUMB_DEF,
+ /*isWeakDef=*/false, isPrivateExtern, sym.n_desc & N_ARM_THUMB_DEF,
/*isReferencedDynamically=*/false, sym.n_desc & N_NO_DEAD_STRIP,
/*isWeakDefCanBeHidden=*/false);
}
@@ -864,15 +868,16 @@ template <class NList>
macho::Symbol *ObjFile::parseNonSectionSymbol(const NList &sym,
StringRef name) {
uint8_t type = sym.n_type & N_TYPE;
+ bool isPrivateExtern = sym.n_type & N_PEXT || forceHidden;
switch (type) {
case N_UNDF:
return sym.n_value == 0
? symtab->addUndefined(name, this, sym.n_desc & N_WEAK_REF)
: symtab->addCommon(name, this, sym.n_value,
1 << GET_COMM_ALIGN(sym.n_desc),
- sym.n_type & N_PEXT);
+ isPrivateExtern);
case N_ABS:
- return createAbsolute(sym, this, name);
+ return createAbsolute(sym, this, name, forceHidden);
case N_PBUD:
case N_INDR:
error("TODO: support symbols of type " + std::to_string(type));
@@ -944,7 +949,8 @@ void ObjFile::parseSymbols(ArrayRef<typename LP::section> sectionHeaders,
" at misaligned offset");
continue;
}
- symbols[symIndex] = createDefined(sym, name, isec, 0, isec->getSize());
+ symbols[symIndex] =
+ createDefined(sym, name, isec, 0, isec->getSize(), forceHidden);
}
continue;
}
@@ -979,8 +985,8 @@ void ObjFile::parseSymbols(ArrayRef<typename LP::section> sectionHeaders,
// 4. If we have a literal section (e.g. __cstring and __literal4).
if (!subsectionsViaSymbols || symbolOffset == 0 ||
sym.n_desc & N_ALT_ENTRY || !isa<ConcatInputSection>(isec)) {
- symbols[symIndex] =
- createDefined(sym, name, isec, symbolOffset, symbolSize);
+ symbols[symIndex] = createDefined(sym, name, isec, symbolOffset,
+ symbolSize, forceHidden);
continue;
}
auto *concatIsec = cast<ConcatInputSection>(isec);
@@ -998,8 +1004,8 @@ void ObjFile::parseSymbols(ArrayRef<typename LP::section> sectionHeaders,
// By construction, the symbol will be at offset zero in the new
// subsection.
- symbols[symIndex] =
- createDefined(sym, name, nextIsec, /*value=*/0, symbolSize);
+ symbols[symIndex] = createDefined(sym, name, nextIsec, /*value=*/0,
+ symbolSize, forceHidden);
// TODO: ld64 appears to preserve the original alignment as well as each
// subsection's offset from the last aligned address. We should consider
// emulating that behavior.
@@ -1036,8 +1042,8 @@ OpaqueFile::OpaqueFile(MemoryBufferRef mb, StringRef segName,
}
ObjFile::ObjFile(MemoryBufferRef mb, uint32_t modTime, StringRef archiveName,
- bool lazy)
- : InputFile(ObjKind, mb, lazy), modTime(modTime) {
+ bool lazy, bool forceHidden)
+ : InputFile(ObjKind, mb, lazy), modTime(modTime), forceHidden(forceHidden) {
this->archiveName = std::string(archiveName);
if (lazy) {
if (target->wordSize == 8)
@@ -2061,26 +2067,27 @@ void DylibFile::checkAppExtensionSafety(bool dylibIsAppExtensionSafe) const {
warn("using '-application_extension' with unsafe dylib: " + toString(this));
}
-ArchiveFile::ArchiveFile(std::unique_ptr<object::Archive> &&f)
- : InputFile(ArchiveKind, f->getMemoryBufferRef()), file(std::move(f)) {}
+ArchiveFile::ArchiveFile(std::unique_ptr<object::Archive> &&f, bool forceHidden)
+ : InputFile(ArchiveKind, f->getMemoryBufferRef()), file(std::move(f)),
+ forceHidden(forceHidden) {}
void ArchiveFile::addLazySymbols() {
for (const object::Archive::Symbol &sym : file->symbols())
symtab->addLazyArchive(sym.getName(), this, sym);
}
-static Expected<InputFile *> loadArchiveMember(MemoryBufferRef mb,
- uint32_t modTime,
- StringRef archiveName,
- uint64_t offsetInArchive) {
+static Expected<InputFile *>
+loadArchiveMember(MemoryBufferRef mb, uint32_t modTime, StringRef archiveName,
+ uint64_t offsetInArchive, bool forceHidden) {
if (config->zeroModTime)
modTime = 0;
switch (identify_magic(mb.getBuffer())) {
case file_magic::macho_object:
- return make<ObjFile>(mb, modTime, archiveName);
+ return make<ObjFile>(mb, modTime, archiveName, /*lazy=*/false, forceHidden);
case file_magic::bitcode:
- return make<BitcodeFile>(mb, archiveName, offsetInArchive);
+ return make<BitcodeFile>(mb, archiveName, offsetInArchive, /*lazy=*/false,
+ forceHidden);
default:
return createStringError(inconvertibleErrorCode(),
mb.getBufferIdentifier() +
@@ -2104,8 +2111,8 @@ Error ArchiveFile::fetch(const object::Archive::Child &c, StringRef reason) {
if (!modTime)
return modTime.takeError();
- Expected<InputFile *> file =
- loadArchiveMember(*mb, toTimeT(*modTime), getName(), c.getChildOffset());
+ Expected<InputFile *> file = loadArchiveMember(
+ *mb, toTimeT(*modTime), getName(), c.getChildOffset(), forceHidden);
if (!file)
return file.takeError();
@@ -2153,7 +2160,8 @@ static macho::Symbol *createBitcodeSymbol(const lto::InputFile::Symbol &objSym,
case GlobalValue::DefaultVisibility:
break;
}
- isPrivateExtern = isPrivateExtern || objSym.canBeOmittedFromSymbolTable();
+ isPrivateExtern = isPrivateExtern || objSym.canBeOmittedFromSymbolTable() ||
+ file.forceHidden;
if (objSym.isCommon())
return symtab->addCommon(name, &file, objSym.getCommonSize(),
@@ -2168,8 +2176,8 @@ static macho::Symbol *createBitcodeSymbol(const lto::InputFile::Symbol &objSym,
}
BitcodeFile::BitcodeFile(MemoryBufferRef mb, StringRef archiveName,
- uint64_t offsetInArchive, bool lazy)
- : InputFile(BitcodeKind, mb, lazy) {
+ uint64_t offsetInArchive, bool lazy, bool forceHidden)
+ : InputFile(BitcodeKind, mb, lazy), forceHidden(forceHidden) {
this->archiveName = std::string(archiveName);
std::string path = mb.getBufferIdentifier().str();
// ThinLTO assumes that all MemoryBufferRefs given to it have a unique
diff --git a/contrib/llvm-project/lld/MachO/InputFiles.h b/contrib/llvm-project/lld/MachO/InputFiles.h
index 5deb05272a6b..ea6802814e4c 100644
--- a/contrib/llvm-project/lld/MachO/InputFiles.h
+++ b/contrib/llvm-project/lld/MachO/InputFiles.h
@@ -156,7 +156,7 @@ struct FDE {
class ObjFile final : public InputFile {
public:
ObjFile(MemoryBufferRef mb, uint32_t modTime, StringRef archiveName,
- bool lazy = false);
+ bool lazy = false, bool forceHidden = false);
ArrayRef<llvm::MachO::data_in_code_entry> getDataInCode() const;
template <class LP> void parse();
@@ -171,6 +171,7 @@ public:
std::unique_ptr<lld::DWARFCache> dwarfCache;
Section *addrSigSection = nullptr;
const uint32_t modTime;
+ bool forceHidden;
std::vector<ConcatInputSection *> debugSections;
std::vector<CallGraphEntry> callGraph;
llvm::DenseMap<ConcatInputSection *, FDE> fdes;
@@ -259,7 +260,8 @@ private:
// .a file
class ArchiveFile final : public InputFile {
public:
- explicit ArchiveFile(std::unique_ptr<llvm::object::Archive> &&file);
+ explicit ArchiveFile(std::unique_ptr<llvm::object::Archive> &&file,
+ bool forceHidden);
void addLazySymbols();
void fetch(const llvm::object::Archive::Symbol &);
// LLD normally doesn't use Error for error-handling, but the underlying
@@ -273,16 +275,20 @@ private:
// Keep track of children fetched from the archive by tracking
// which address offsets have been fetched already.
llvm::DenseSet<uint64_t> seen;
+ // Load all symbols with hidden visibility (-load_hidden).
+ bool forceHidden;
};
class BitcodeFile final : public InputFile {
public:
explicit BitcodeFile(MemoryBufferRef mb, StringRef archiveName,
- uint64_t offsetInArchive, bool lazy = false);
+ uint64_t offsetInArchive, bool lazy = false,
+ bool forceHidden = false);
static bool classof(const InputFile *f) { return f->kind() == BitcodeKind; }
void parse();
std::unique_ptr<llvm::lto::InputFile> obj;
+ bool forceHidden;
private:
void parseLazy();
diff --git a/contrib/llvm-project/lld/MachO/InputSection.cpp b/contrib/llvm-project/lld/MachO/InputSection.cpp
index 76b11d9da4f8..ca073f8ac6f3 100644
--- a/contrib/llvm-project/lld/MachO/InputSection.cpp
+++ b/contrib/llvm-project/lld/MachO/InputSection.cpp
@@ -67,7 +67,7 @@ std::string InputSection::getLocation(uint64_t off) const {
// First, try to find a symbol that's near the offset. Use it as a reference
// point.
if (auto *sym = getContainingSymbol(off))
- return (toString(getFile()) + ":(symbol " + sym->getName() + "+0x" +
+ return (toString(getFile()) + ":(symbol " + toString(*sym) + "+0x" +
Twine::utohexstr(off - sym->value) + ")")
.str();
diff --git a/contrib/llvm-project/lld/MachO/Options.td b/contrib/llvm-project/lld/MachO/Options.td
index b3d74a83f582..064862fb1bb0 100644
--- a/contrib/llvm-project/lld/MachO/Options.td
+++ b/contrib/llvm-project/lld/MachO/Options.td
@@ -240,6 +240,14 @@ def force_load : Separate<["-"], "force_load">,
def force_load_swift_libs : Flag<["-"], "force_load_swift_libs">,
HelpText<"Apply -force_load to libraries listed in LC_LINKER_OPTIONS whose names start with 'swift'">,
Group<grp_libs>;
+def load_hidden : Separate<["-"], "load_hidden">,
+ MetaVarName<"<path>">,
+ HelpText<"Load all symbols from static library with hidden visibility">,
+ Group<grp_libs>;
+def hidden_l : Joined<["-"], "hidden-l">,
+ MetaVarName<"<name>">,
+ HelpText<"Like -l<name>, but load all symbols with hidden visibility">,
+ Group<grp_libs>;
def grp_content : OptionGroup<"content">, HelpText<"ADDITIONAL CONTENT">;
@@ -1174,7 +1182,7 @@ def allow_simulator_linking_to_macosx_dylibs : Flag<["-"], "allow_simulator_link
HelpText<"This option is undocumented in ld64">,
Flags<[HelpHidden]>,
Group<grp_undocumented>;
-def bitcode_process_mode : Flag<["-"], "bitcode_process_mode">,
+def bitcode_process_mode : Separate<["-"], "bitcode_process_mode">,
HelpText<"This option is undocumented in ld64">,
Flags<[HelpHidden]>,
Group<grp_undocumented>;
diff --git a/contrib/llvm-project/lld/MachO/SyntheticSections.h b/contrib/llvm-project/lld/MachO/SyntheticSections.h
index afdd46d8a7de..29c2d98c6625 100644
--- a/contrib/llvm-project/lld/MachO/SyntheticSections.h
+++ b/contrib/llvm-project/lld/MachO/SyntheticSections.h
@@ -70,7 +70,7 @@ public:
// Sections in __LINKEDIT are special: their offsets are recorded in the
// load commands like LC_DYLD_INFO_ONLY and LC_SYMTAB, instead of in section
// headers.
- bool isHidden() const override final { return true; }
+ bool isHidden() const final { return true; }
virtual uint64_t getRawSize() const = 0;
@@ -80,9 +80,7 @@ public:
//
// NOTE: This assumes that the extra bytes required for alignment can be
// zero-valued bytes.
- uint64_t getSize() const override final {
- return llvm::alignTo(getRawSize(), align);
- }
+ uint64_t getSize() const final { return llvm::alignTo(getRawSize(), align); }
};
// The header of the Mach-O file, which must have a file offset of zero.
diff --git a/contrib/llvm-project/lldb/include/lldb/Core/Disassembler.h b/contrib/llvm-project/lldb/include/lldb/Core/Disassembler.h
index 2cb983c40d19..b9ac0a5bca39 100644
--- a/contrib/llvm-project/lldb/include/lldb/Core/Disassembler.h
+++ b/contrib/llvm-project/lldb/include/lldb/Core/Disassembler.h
@@ -83,7 +83,10 @@ public:
/// The control flow kind of this instruction, or
/// eInstructionControlFlowKindUnknown if the instruction
/// can't be classified.
- lldb::InstructionControlFlowKind GetControlFlowKind(const ArchSpec &arch);
+ virtual lldb::InstructionControlFlowKind
+ GetControlFlowKind(const ExecutionContext *exe_ctx) {
+ return lldb::eInstructionControlFlowKindUnknown;
+ }
virtual void
CalculateMnemonicOperandsAndComment(const ExecutionContext *exe_ctx) = 0;
@@ -223,6 +226,9 @@ public:
virtual bool IsCall() { return false; }
+ static const char *GetNameForInstructionControlFlowKind(
+ lldb::InstructionControlFlowKind instruction_control_flow_kind);
+
protected:
Address m_address; // The section offset address of this instruction
// We include an address class in the Instruction class to
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/MemoryTagManager.h b/contrib/llvm-project/lldb/include/lldb/Target/MemoryTagManager.h
index 28a8acc34632..b082224c38ed 100644
--- a/contrib/llvm-project/lldb/include/lldb/Target/MemoryTagManager.h
+++ b/contrib/llvm-project/lldb/include/lldb/Target/MemoryTagManager.h
@@ -113,6 +113,21 @@ public:
UnpackTagsData(const std::vector<uint8_t> &tags,
size_t granules = 0) const = 0;
+ // Unpack tags from a corefile segment containing compressed tags
+ // (compression that may be different from the one used for GDB transport).
+ //
+ // This method asumes that:
+ // * addr and len have been granule aligned by a tag manager
+ // * addr >= tag_segment_virtual_address
+ //
+ // 'reader' will always be a wrapper around a CoreFile in real use
+ // but allows testing without having to mock a CoreFile.
+ typedef std::function<size_t(lldb::offset_t, size_t, void *)> CoreReaderFn;
+ std::vector<lldb::addr_t> virtual UnpackTagsFromCoreFileSegment(
+ CoreReaderFn reader, lldb::addr_t tag_segment_virtual_address,
+ lldb::addr_t tag_segment_data_address, lldb::addr_t addr,
+ size_t len) const = 0;
+
// Pack uncompressed tags into their storage format (e.g. for gdb QMemTags).
// Checks that each tag is within the expected value range.
// We do not check the number of tags or range they apply to because
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/Process.h b/contrib/llvm-project/lldb/include/lldb/Target/Process.h
index a55659225ef1..505e211e09b6 100644
--- a/contrib/llvm-project/lldb/include/lldb/Target/Process.h
+++ b/contrib/llvm-project/lldb/include/lldb/Target/Process.h
@@ -1715,8 +1715,8 @@ public:
/// an error saying so.
/// If it does, either the memory tags or an error describing a
/// failure to read or unpack them.
- llvm::Expected<std::vector<lldb::addr_t>> ReadMemoryTags(lldb::addr_t addr,
- size_t len);
+ virtual llvm::Expected<std::vector<lldb::addr_t>>
+ ReadMemoryTags(lldb::addr_t addr, size_t len);
/// Write memory tags for a range of memory.
/// (calls DoWriteMemoryTags to do the target specific work)
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/TraceCursor.h b/contrib/llvm-project/lldb/include/lldb/Target/TraceCursor.h
index f6337e3d3d3f..95b022331634 100644
--- a/contrib/llvm-project/lldb/include/lldb/Target/TraceCursor.h
+++ b/contrib/llvm-project/lldb/include/lldb/Target/TraceCursor.h
@@ -215,7 +215,7 @@ public:
/// of this cursor.
ExecutionContextRef &GetExecutionContextRef();
- /// Instruction, event or error information
+ /// Trace item information (instructions, errors and events)
/// \{
/// \return
@@ -255,27 +255,35 @@ public:
/// The load address of the instruction the cursor is pointing at.
virtual lldb::addr_t GetLoadAddress() const = 0;
- /// Get the hardware counter of a given type associated with the current
- /// instruction. Each architecture might support different counters. It might
- /// happen that only some instructions of an entire trace have a given counter
- /// associated with them.
- ///
- /// \param[in] counter_type
- /// The counter type.
- /// \return
- /// The value of the counter or \b llvm::None if not available.
- virtual llvm::Optional<uint64_t>
- GetCounter(lldb::TraceCounter counter_type) const = 0;
-
/// Get the CPU associated with the current trace item.
///
/// This call might not be O(1), so it's suggested to invoke this method
- /// whenever a cpu change event is fired.
+ /// whenever an eTraceEventCPUChanged event is fired.
///
/// \return
/// The requested CPU id, or \a llvm::None if this information is
/// not available for the current item.
virtual llvm::Optional<lldb::cpu_id_t> GetCPU() const = 0;
+
+ /// Get the last hardware clock value that was emitted before the current
+ /// trace item.
+ ///
+ /// This call might not be O(1), so it's suggested to invoke this method
+ /// whenever an eTraceEventHWClockTick event is fired.
+ ///
+ /// \return
+ /// The requested HW clock value, or \a llvm::None if this information is
+ /// not available for the current item.
+ virtual llvm::Optional<uint64_t> GetHWClock() const = 0;
+
+ /// Get the approximate wall clock time in nanoseconds at which the current
+ /// trace item was executed. Each trace plug-in has a different definition for
+ /// what time 0 means.
+ ///
+ /// \return
+ /// The approximate wall clock time for the trace item, or \a llvm::None
+ /// if not available.
+ virtual llvm::Optional<double> GetWallClockTime() const = 0;
/// \}
protected:
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/TraceDumper.h b/contrib/llvm-project/lldb/include/lldb/Target/TraceDumper.h
index bbc1a55873d7..ada779990e07 100644
--- a/contrib/llvm-project/lldb/include/lldb/Target/TraceDumper.h
+++ b/contrib/llvm-project/lldb/include/lldb/Target/TraceDumper.h
@@ -29,9 +29,9 @@ struct TraceDumperOptions {
bool json = false;
/// When dumping in JSON format, pretty print the output.
bool pretty_print_json = false;
- /// For each instruction, print the corresponding timestamp counter if
+ /// For each trace item, print the corresponding timestamp in nanoseconds if
/// available.
- bool show_tsc = false;
+ bool show_timestamps = false;
/// Dump the events that happened between instructions.
bool show_events = false;
/// For each instruction, print the instruction kind.
@@ -61,7 +61,8 @@ public:
struct TraceItem {
lldb::user_id_t id;
lldb::addr_t load_address;
- llvm::Optional<uint64_t> tsc;
+ llvm::Optional<double> timestamp;
+ llvm::Optional<uint64_t> hw_clock;
llvm::Optional<llvm::StringRef> error;
llvm::Optional<lldb::TraceEvent> event;
llvm::Optional<SymbolInfo> symbol_info;
diff --git a/contrib/llvm-project/lldb/include/lldb/Utility/TraceIntelPTGDBRemotePackets.h b/contrib/llvm-project/lldb/include/lldb/Utility/TraceIntelPTGDBRemotePackets.h
index bf9409743a6d..5930cd9970e7 100644
--- a/contrib/llvm-project/lldb/include/lldb/Utility/TraceIntelPTGDBRemotePackets.h
+++ b/contrib/llvm-project/lldb/include/lldb/Utility/TraceIntelPTGDBRemotePackets.h
@@ -91,8 +91,8 @@ struct LinuxPerfZeroTscConversion {
/// nanoseconds) is defined by the kernel at boot time and has no particularly
/// useful meaning. On the other hand, this value is constant for an entire
/// trace session.
- // See 'time_zero' section of
- // https://man7.org/linux/man-pages/man2/perf_event_open.2.html
+ /// See 'time_zero' section of
+ /// https://man7.org/linux/man-pages/man2/perf_event_open.2.html
///
/// \param[in] tsc
/// The TSC value to be converted.
diff --git a/contrib/llvm-project/lldb/include/lldb/lldb-enumerations.h b/contrib/llvm-project/lldb/include/lldb/lldb-enumerations.h
index 83b98c37d122..80046e7e6bee 100644
--- a/contrib/llvm-project/lldb/include/lldb/lldb-enumerations.h
+++ b/contrib/llvm-project/lldb/include/lldb/lldb-enumerations.h
@@ -1159,12 +1159,6 @@ enum SaveCoreStyle {
eSaveCoreStackOnly = 3,
};
-// Type of counter values associated with instructions in a trace.
-enum TraceCounter {
- // Timestamp counter, like the one offered by Intel CPUs (TSC).
- eTraceCounterTSC = 0,
-};
-
/// Events that might happen during a trace session.
enum TraceEvent {
/// Tracing was disabled for some time due to a software trigger
@@ -1174,6 +1168,8 @@ enum TraceEvent {
/// Event due to CPU change for a thread. This event is also fired when
/// suddenly it's not possible to identify the cpu of a given thread.
eTraceEventCPUChanged,
+ /// Event due to a CPU HW clock tick
+ eTraceEventHWClockTick,
};
// Enum used to identify which kind of item a \a TraceCursor is pointing at
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectDisassemble.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectDisassemble.cpp
index a11e2b719727..e65e12fe557a 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectDisassemble.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectDisassemble.cpp
@@ -216,8 +216,7 @@ CommandObjectDisassemble::CommandObjectDisassemble(
"Disassemble specified instructions in the current target. "
"Defaults to the current function for the current thread and "
"stack frame.",
- "disassemble [<cmd-options>]", eCommandRequiresTarget),
- m_options() {}
+ "disassemble [<cmd-options>]", eCommandRequiresTarget) {}
CommandObjectDisassemble::~CommandObjectDisassemble() = default;
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectExpression.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectExpression.cpp
index 0fb50420f70f..083309121b66 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectExpression.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectExpression.cpp
@@ -187,7 +187,7 @@ CommandObjectExpression::CommandObjectExpression(
m_format_options(eFormatDefault),
m_repl_option(LLDB_OPT_SET_1, false, "repl", 'r', "Drop into REPL", false,
true),
- m_command_options(), m_expr_line_count(0) {
+ m_expr_line_count(0) {
SetHelpLong(
R"(
Single and multi-line expressions:
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectMemory.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectMemory.cpp
index ca0384cf9453..5051f9aeec85 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectMemory.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectMemory.cpp
@@ -1659,7 +1659,7 @@ class CommandObjectMemoryRegion : public CommandObjectParsed {
public:
class OptionGroupMemoryRegion : public OptionGroup {
public:
- OptionGroupMemoryRegion() : OptionGroup(), m_all(false, false) {}
+ OptionGroupMemoryRegion() : m_all(false, false) {}
~OptionGroupMemoryRegion() override = default;
diff --git a/contrib/llvm-project/lldb/source/Commands/CommandObjectThread.cpp b/contrib/llvm-project/lldb/source/Commands/CommandObjectThread.cpp
index 1371b9dbda1e..fe0cb0945cde 100644
--- a/contrib/llvm-project/lldb/source/Commands/CommandObjectThread.cpp
+++ b/contrib/llvm-project/lldb/source/Commands/CommandObjectThread.cpp
@@ -2162,7 +2162,7 @@ public:
break;
}
case 't': {
- m_dumper_options.show_tsc = true;
+ m_dumper_options.show_timestamps = true;
break;
}
case 'e': {
diff --git a/contrib/llvm-project/lldb/source/Commands/Options.td b/contrib/llvm-project/lldb/source/Commands/Options.td
index 78221db18e65..cc47db575306 100644
--- a/contrib/llvm-project/lldb/source/Commands/Options.td
+++ b/contrib/llvm-project/lldb/source/Commands/Options.td
@@ -1146,15 +1146,17 @@ let Command = "thread trace dump instructions" in {
Desc<"Dump in simple JSON format.">;
def thread_trace_dump_instructions_pretty_print: Option<"pretty-json", "J">,
Group<1>,
- Desc<"Dump in JSON format but pretty printing the output for easier readability.">;
+ Desc<"Dump in JSON format but pretty printing the output for easier "
+ "readability.">;
def thread_trace_dump_instructions_show_kind : Option<"kind", "k">, Group<1>,
Desc<"Show instruction control flow kind. Refer to the enum "
"`InstructionControlFlowKind` for a list of control flow kind. "
"As an important note, far jumps, far calls and far returns often indicate "
"calls to and from kernel.">;
- def thread_trace_dump_instructions_show_tsc : Option<"tsc", "t">, Group<1>,
- Desc<"For each instruction, print the corresponding timestamp counter if "
- "available.">;
+ def thread_trace_dump_instructions_show_timestamps: Option<"time", "t">,
+ Group<1>,
+ Desc<"For each trace item, print the corresponding wall clock timestamp "
+ "if available.">;
def thread_trace_dump_instructions_show_events : Option<"events", "e">,
Group<1>,
Desc<"Dump the events that happened during the execution of the target.">;
diff --git a/contrib/llvm-project/lldb/source/Core/Disassembler.cpp b/contrib/llvm-project/lldb/source/Core/Disassembler.cpp
index 7a9e214748a7..4c57be44dc9c 100644
--- a/contrib/llvm-project/lldb/source/Core/Disassembler.cpp
+++ b/contrib/llvm-project/lldb/source/Core/Disassembler.cpp
@@ -571,340 +571,36 @@ Instruction::Instruction(const Address &address, AddressClass addr_class)
Instruction::~Instruction() = default;
-namespace x86 {
-
-/// These are the three values deciding instruction control flow kind.
-/// InstructionLengthDecode function decodes an instruction and get this struct.
-///
-/// primary_opcode
-/// Primary opcode of the instruction.
-/// For one-byte opcode instruction, it's the first byte after prefix.
-/// For two- and three-byte opcodes, it's the second byte.
-///
-/// opcode_len
-/// The length of opcode in bytes. Valid opcode lengths are 1, 2, or 3.
-///
-/// modrm
-/// ModR/M byte of the instruction.
-/// Bits[7:6] indicate MOD. Bits[5:3] specify a register and R/M bits[2:0]
-/// may contain a register or specify an addressing mode, depending on MOD.
-struct InstructionOpcodeAndModrm {
- uint8_t primary_opcode;
- uint8_t opcode_len;
- uint8_t modrm;
-};
-
-/// Determine the InstructionControlFlowKind based on opcode and modrm bytes.
-/// Refer to http://ref.x86asm.net/coder.html for the full list of opcode and
-/// instruction set.
-///
-/// \param[in] opcode_and_modrm
-/// Contains primary_opcode byte, its length, and ModR/M byte.
-/// Refer to the struct InstructionOpcodeAndModrm for details.
-///
-/// \return
-/// The control flow kind of the instruction or
-/// eInstructionControlFlowKindOther if the instruction doesn't affect
-/// the control flow of the program.
-lldb::InstructionControlFlowKind
-MapOpcodeIntoControlFlowKind(InstructionOpcodeAndModrm opcode_and_modrm) {
- uint8_t opcode = opcode_and_modrm.primary_opcode;
- uint8_t opcode_len = opcode_and_modrm.opcode_len;
- uint8_t modrm = opcode_and_modrm.modrm;
-
- if (opcode_len > 2)
- return lldb::eInstructionControlFlowKindOther;
-
- if (opcode >= 0x70 && opcode <= 0x7F) {
- if (opcode_len == 1)
- return lldb::eInstructionControlFlowKindCondJump;
- else
- return lldb::eInstructionControlFlowKindOther;
- }
-
- if (opcode >= 0x80 && opcode <= 0x8F) {
- if (opcode_len == 2)
- return lldb::eInstructionControlFlowKindCondJump;
- else
- return lldb::eInstructionControlFlowKindOther;
- }
-
- switch (opcode) {
- case 0x9A:
- if (opcode_len == 1)
- return lldb::eInstructionControlFlowKindFarCall;
- break;
- case 0xFF:
- if (opcode_len == 1) {
- uint8_t modrm_reg = (modrm >> 3) & 7;
- if (modrm_reg == 2)
- return lldb::eInstructionControlFlowKindCall;
- else if (modrm_reg == 3)
- return lldb::eInstructionControlFlowKindFarCall;
- else if (modrm_reg == 4)
- return lldb::eInstructionControlFlowKindJump;
- else if (modrm_reg == 5)
- return lldb::eInstructionControlFlowKindFarJump;
- }
- break;
- case 0xE8:
- if (opcode_len == 1)
- return lldb::eInstructionControlFlowKindCall;
- break;
- case 0xCD:
- case 0xCC:
- case 0xCE:
- case 0xF1:
- if (opcode_len == 1)
- return lldb::eInstructionControlFlowKindFarCall;
- break;
- case 0xCF:
- if (opcode_len == 1)
- return lldb::eInstructionControlFlowKindFarReturn;
- break;
- case 0xE9:
- case 0xEB:
- if (opcode_len == 1)
- return lldb::eInstructionControlFlowKindJump;
- break;
- case 0xEA:
- if (opcode_len == 1)
- return lldb::eInstructionControlFlowKindFarJump;
- break;
- case 0xE3:
- case 0xE0:
- case 0xE1:
- case 0xE2:
- if (opcode_len == 1)
- return lldb::eInstructionControlFlowKindCondJump;
- break;
- case 0xC3:
- case 0xC2:
- if (opcode_len == 1)
- return lldb::eInstructionControlFlowKindReturn;
- break;
- case 0xCB:
- case 0xCA:
- if (opcode_len == 1)
- return lldb::eInstructionControlFlowKindFarReturn;
- break;
- case 0x05:
- case 0x34:
- if (opcode_len == 2)
- return lldb::eInstructionControlFlowKindFarCall;
- break;
- case 0x35:
- case 0x07:
- if (opcode_len == 2)
- return lldb::eInstructionControlFlowKindFarReturn;
- break;
- case 0x01:
- if (opcode_len == 2) {
- switch (modrm) {
- case 0xc1:
- return lldb::eInstructionControlFlowKindFarCall;
- case 0xc2:
- case 0xc3:
- return lldb::eInstructionControlFlowKindFarReturn;
- default:
- break;
- }
- }
- break;
- default:
- break;
- }
-
- return lldb::eInstructionControlFlowKindOther;
-}
-
-/// Decode an instruction into opcode, modrm and opcode_len.
-/// Refer to http://ref.x86asm.net/coder.html for the instruction bytes layout.
-/// Opcodes in x86 are generally the first byte of instruction, though two-byte
-/// instructions and prefixes exist. ModR/M is the byte following the opcode
-/// and adds additional information for how the instruction is executed.
-///
-/// \param[in] inst_bytes
-/// Raw bytes of the instruction
-///
-///
-/// \param[in] bytes_len
-/// The length of the inst_bytes array.
-///
-/// \param[in] is_exec_mode_64b
-/// If true, the execution mode is 64 bit.
-///
-/// \return
-/// Returns decoded instruction as struct InstructionOpcodeAndModrm, holding
-/// primary_opcode, opcode_len and modrm byte. Refer to the struct definition
-/// for more details.
-/// Otherwise if the given instruction is invalid, returns None.
-llvm::Optional<InstructionOpcodeAndModrm>
-InstructionLengthDecode(const uint8_t *inst_bytes, int bytes_len,
- bool is_exec_mode_64b) {
- int op_idx = 0;
- bool prefix_done = false;
- InstructionOpcodeAndModrm ret = {0, 0, 0};
-
- // In most cases, the primary_opcode is the first byte of the instruction
- // but some instructions have a prefix to be skipped for these calculations.
- // The following mapping is inspired from libipt's instruction decoding logic
- // in `src/pt_ild.c`
- while (!prefix_done) {
- if (op_idx >= bytes_len)
- return llvm::None;
-
- ret.primary_opcode = inst_bytes[op_idx];
- switch (ret.primary_opcode) {
- // prefix_ignore
- case 0x26:
- case 0x2e:
- case 0x36:
- case 0x3e:
- case 0x64:
- case 0x65:
- // prefix_osz, prefix_asz
- case 0x66:
- case 0x67:
- // prefix_lock, prefix_f2, prefix_f3
- case 0xf0:
- case 0xf2:
- case 0xf3:
- op_idx++;
- break;
-
- // prefix_rex
- case 0x40:
- case 0x41:
- case 0x42:
- case 0x43:
- case 0x44:
- case 0x45:
- case 0x46:
- case 0x47:
- case 0x48:
- case 0x49:
- case 0x4a:
- case 0x4b:
- case 0x4c:
- case 0x4d:
- case 0x4e:
- case 0x4f:
- if (is_exec_mode_64b)
- op_idx++;
- else
- prefix_done = true;
- break;
-
- // prefix_vex_c4, c5
- case 0xc5:
- if (!is_exec_mode_64b && (inst_bytes[op_idx + 1] & 0xc0) != 0xc0) {
- prefix_done = true;
- break;
- }
-
- ret.opcode_len = 2;
- ret.primary_opcode = inst_bytes[op_idx + 2];
- ret.modrm = inst_bytes[op_idx + 3];
- return ret;
-
- case 0xc4:
- if (!is_exec_mode_64b && (inst_bytes[op_idx + 1] & 0xc0) != 0xc0) {
- prefix_done = true;
- break;
- }
- ret.opcode_len = inst_bytes[op_idx + 1] & 0x1f;
- ret.primary_opcode = inst_bytes[op_idx + 3];
- ret.modrm = inst_bytes[op_idx + 4];
- return ret;
-
- // prefix_evex
- case 0x62:
- if (!is_exec_mode_64b && (inst_bytes[op_idx + 1] & 0xc0) != 0xc0) {
- prefix_done = true;
- break;
- }
- ret.opcode_len = inst_bytes[op_idx + 1] & 0x03;
- ret.primary_opcode = inst_bytes[op_idx + 4];
- ret.modrm = inst_bytes[op_idx + 5];
- return ret;
-
- default:
- prefix_done = true;
- break;
- }
- } // prefix done
-
- ret.primary_opcode = inst_bytes[op_idx];
- ret.modrm = inst_bytes[op_idx + 1];
- ret.opcode_len = 1;
-
- // If the first opcode is 0F, it's two- or three- byte opcodes.
- if (ret.primary_opcode == 0x0F) {
- ret.primary_opcode = inst_bytes[++op_idx]; // get the next byte
-
- if (ret.primary_opcode == 0x38) {
- ret.opcode_len = 3;
- ret.primary_opcode = inst_bytes[++op_idx]; // get the next byte
- ret.modrm = inst_bytes[op_idx + 1];
- } else if (ret.primary_opcode == 0x3A) {
- ret.opcode_len = 3;
- ret.primary_opcode = inst_bytes[++op_idx];
- ret.modrm = inst_bytes[op_idx + 1];
- } else if ((ret.primary_opcode & 0xf8) == 0x38) {
- ret.opcode_len = 0;
- ret.primary_opcode = inst_bytes[++op_idx];
- ret.modrm = inst_bytes[op_idx + 1];
- } else if (ret.primary_opcode == 0x0F) {
- ret.opcode_len = 3;
- // opcode is 0x0F, no needs to update
- ret.modrm = inst_bytes[op_idx + 1];
- } else {
- ret.opcode_len = 2;
- ret.modrm = inst_bytes[op_idx + 1];
- }
- }
-
- return ret;
-}
-
-lldb::InstructionControlFlowKind GetControlFlowKind(bool is_exec_mode_64b,
- Opcode m_opcode) {
- llvm::Optional<InstructionOpcodeAndModrm> ret = llvm::None;
-
- if (m_opcode.GetOpcodeBytes() == nullptr || m_opcode.GetByteSize() <= 0) {
- // x86_64 and i386 instructions are categorized as Opcode::Type::eTypeBytes
- return lldb::eInstructionControlFlowKindUnknown;
- }
-
- // Opcode bytes will be decoded into primary_opcode, modrm and opcode length.
- // These are the three values deciding instruction control flow kind.
- ret = InstructionLengthDecode((const uint8_t *)m_opcode.GetOpcodeBytes(),
- m_opcode.GetByteSize(), is_exec_mode_64b);
- if (!ret)
- return lldb::eInstructionControlFlowKindUnknown;
- else
- return MapOpcodeIntoControlFlowKind(ret.value());
-}
-
-} // namespace x86
-
-lldb::InstructionControlFlowKind
-Instruction::GetControlFlowKind(const ArchSpec &arch) {
- if (arch.GetTriple().getArch() == llvm::Triple::x86)
- return x86::GetControlFlowKind(/*is_exec_mode_64b=*/false, m_opcode);
- else if (arch.GetTriple().getArch() == llvm::Triple::x86_64)
- return x86::GetControlFlowKind(/*is_exec_mode_64b=*/true, m_opcode);
- else
- return eInstructionControlFlowKindUnknown; // not implemented
-}
-
AddressClass Instruction::GetAddressClass() {
if (m_address_class == AddressClass::eInvalid)
m_address_class = m_address.GetAddressClass();
return m_address_class;
}
+const char *Instruction::GetNameForInstructionControlFlowKind(
+ lldb::InstructionControlFlowKind instruction_control_flow_kind) {
+ switch (instruction_control_flow_kind) {
+ case eInstructionControlFlowKindUnknown:
+ return "unknown";
+ case eInstructionControlFlowKindOther:
+ return "other";
+ case eInstructionControlFlowKindCall:
+ return "call";
+ case eInstructionControlFlowKindReturn:
+ return "return";
+ case eInstructionControlFlowKindJump:
+ return "jump";
+ case eInstructionControlFlowKindCondJump:
+ return "cond jump";
+ case eInstructionControlFlowKindFarCall:
+ return "far call";
+ case eInstructionControlFlowKindFarReturn:
+ return "far return";
+ case eInstructionControlFlowKindFarJump:
+ return "far jump";
+ }
+}
+
void Instruction::Dump(lldb_private::Stream *s, uint32_t max_opcode_byte_size,
bool show_address, bool show_bytes,
bool show_control_flow_kind,
@@ -946,35 +642,10 @@ void Instruction::Dump(lldb_private::Stream *s, uint32_t max_opcode_byte_size,
}
if (show_control_flow_kind) {
- switch (GetControlFlowKind(exe_ctx->GetTargetRef().GetArchitecture())) {
- case eInstructionControlFlowKindUnknown:
- ss.Printf("%-12s", "unknown");
- break;
- case eInstructionControlFlowKindOther:
- ss.Printf("%-12s", "other");
- break;
- case eInstructionControlFlowKindCall:
- ss.Printf("%-12s", "call");
- break;
- case eInstructionControlFlowKindReturn:
- ss.Printf("%-12s", "return");
- break;
- case eInstructionControlFlowKindJump:
- ss.Printf("%-12s", "jump");
- break;
- case eInstructionControlFlowKindCondJump:
- ss.Printf("%-12s", "cond jump");
- break;
- case eInstructionControlFlowKindFarCall:
- ss.Printf("%-12s", "far call");
- break;
- case eInstructionControlFlowKindFarReturn:
- ss.Printf("%-12s", "far return");
- break;
- case eInstructionControlFlowKindFarJump:
- ss.Printf("%-12s", "far jump");
- break;
- }
+ lldb::InstructionControlFlowKind instruction_control_flow_kind =
+ GetControlFlowKind(exe_ctx);
+ ss.Printf("%-12s", GetNameForInstructionControlFlowKind(
+ instruction_control_flow_kind));
}
const size_t opcode_pos = ss.GetSizeOfLastLine();
diff --git a/contrib/llvm-project/lldb/source/Host/common/Host.cpp b/contrib/llvm-project/lldb/source/Host/common/Host.cpp
index f35eb47ff683..4a0f0240bd19 100644
--- a/contrib/llvm-project/lldb/source/Host/common/Host.cpp
+++ b/contrib/llvm-project/lldb/source/Host/common/Host.cpp
@@ -172,7 +172,7 @@ MonitorChildProcessThreadFunction(::pid_t pid,
::sigaction(SIGUSR1, &sigUsr1Action, nullptr);
#endif // __linux__
- while(1) {
+ while (true) {
log = GetLog(LLDBLog::Process);
LLDB_LOG(log, "::waitpid({0}, &status, 0)...", pid);
diff --git a/contrib/llvm-project/lldb/source/Plugins/Disassembler/LLVMC/DisassemblerLLVMC.cpp b/contrib/llvm-project/lldb/source/Plugins/Disassembler/LLVMC/DisassemblerLLVMC.cpp
index fb404e985f80..973884283f46 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Disassembler/LLVMC/DisassemblerLLVMC.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Disassembler/LLVMC/DisassemblerLLVMC.cpp
@@ -85,6 +85,324 @@ private:
std::unique_ptr<llvm::MCInstPrinter> m_instr_printer_up;
};
+namespace x86 {
+
+/// These are the three values deciding instruction control flow kind.
+/// InstructionLengthDecode function decodes an instruction and get this struct.
+///
+/// primary_opcode
+/// Primary opcode of the instruction.
+/// For one-byte opcode instruction, it's the first byte after prefix.
+/// For two- and three-byte opcodes, it's the second byte.
+///
+/// opcode_len
+/// The length of opcode in bytes. Valid opcode lengths are 1, 2, or 3.
+///
+/// modrm
+/// ModR/M byte of the instruction.
+/// Bits[7:6] indicate MOD. Bits[5:3] specify a register and R/M bits[2:0]
+/// may contain a register or specify an addressing mode, depending on MOD.
+struct InstructionOpcodeAndModrm {
+ uint8_t primary_opcode;
+ uint8_t opcode_len;
+ uint8_t modrm;
+};
+
+/// Determine the InstructionControlFlowKind based on opcode and modrm bytes.
+/// Refer to http://ref.x86asm.net/coder.html for the full list of opcode and
+/// instruction set.
+///
+/// \param[in] opcode_and_modrm
+/// Contains primary_opcode byte, its length, and ModR/M byte.
+/// Refer to the struct InstructionOpcodeAndModrm for details.
+///
+/// \return
+/// The control flow kind of the instruction or
+/// eInstructionControlFlowKindOther if the instruction doesn't affect
+/// the control flow of the program.
+lldb::InstructionControlFlowKind
+MapOpcodeIntoControlFlowKind(InstructionOpcodeAndModrm opcode_and_modrm) {
+ uint8_t opcode = opcode_and_modrm.primary_opcode;
+ uint8_t opcode_len = opcode_and_modrm.opcode_len;
+ uint8_t modrm = opcode_and_modrm.modrm;
+
+ if (opcode_len > 2)
+ return lldb::eInstructionControlFlowKindOther;
+
+ if (opcode >= 0x70 && opcode <= 0x7F) {
+ if (opcode_len == 1)
+ return lldb::eInstructionControlFlowKindCondJump;
+ else
+ return lldb::eInstructionControlFlowKindOther;
+ }
+
+ if (opcode >= 0x80 && opcode <= 0x8F) {
+ if (opcode_len == 2)
+ return lldb::eInstructionControlFlowKindCondJump;
+ else
+ return lldb::eInstructionControlFlowKindOther;
+ }
+
+ switch (opcode) {
+ case 0x9A:
+ if (opcode_len == 1)
+ return lldb::eInstructionControlFlowKindFarCall;
+ break;
+ case 0xFF:
+ if (opcode_len == 1) {
+ uint8_t modrm_reg = (modrm >> 3) & 7;
+ if (modrm_reg == 2)
+ return lldb::eInstructionControlFlowKindCall;
+ else if (modrm_reg == 3)
+ return lldb::eInstructionControlFlowKindFarCall;
+ else if (modrm_reg == 4)
+ return lldb::eInstructionControlFlowKindJump;
+ else if (modrm_reg == 5)
+ return lldb::eInstructionControlFlowKindFarJump;
+ }
+ break;
+ case 0xE8:
+ if (opcode_len == 1)
+ return lldb::eInstructionControlFlowKindCall;
+ break;
+ case 0xCD:
+ case 0xCC:
+ case 0xCE:
+ case 0xF1:
+ if (opcode_len == 1)
+ return lldb::eInstructionControlFlowKindFarCall;
+ break;
+ case 0xCF:
+ if (opcode_len == 1)
+ return lldb::eInstructionControlFlowKindFarReturn;
+ break;
+ case 0xE9:
+ case 0xEB:
+ if (opcode_len == 1)
+ return lldb::eInstructionControlFlowKindJump;
+ break;
+ case 0xEA:
+ if (opcode_len == 1)
+ return lldb::eInstructionControlFlowKindFarJump;
+ break;
+ case 0xE3:
+ case 0xE0:
+ case 0xE1:
+ case 0xE2:
+ if (opcode_len == 1)
+ return lldb::eInstructionControlFlowKindCondJump;
+ break;
+ case 0xC3:
+ case 0xC2:
+ if (opcode_len == 1)
+ return lldb::eInstructionControlFlowKindReturn;
+ break;
+ case 0xCB:
+ case 0xCA:
+ if (opcode_len == 1)
+ return lldb::eInstructionControlFlowKindFarReturn;
+ break;
+ case 0x05:
+ case 0x34:
+ if (opcode_len == 2)
+ return lldb::eInstructionControlFlowKindFarCall;
+ break;
+ case 0x35:
+ case 0x07:
+ if (opcode_len == 2)
+ return lldb::eInstructionControlFlowKindFarReturn;
+ break;
+ case 0x01:
+ if (opcode_len == 2) {
+ switch (modrm) {
+ case 0xc1:
+ return lldb::eInstructionControlFlowKindFarCall;
+ case 0xc2:
+ case 0xc3:
+ return lldb::eInstructionControlFlowKindFarReturn;
+ default:
+ break;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return lldb::eInstructionControlFlowKindOther;
+}
+
+/// Decode an instruction into opcode, modrm and opcode_len.
+/// Refer to http://ref.x86asm.net/coder.html for the instruction bytes layout.
+/// Opcodes in x86 are generally the first byte of instruction, though two-byte
+/// instructions and prefixes exist. ModR/M is the byte following the opcode
+/// and adds additional information for how the instruction is executed.
+///
+/// \param[in] inst_bytes
+/// Raw bytes of the instruction
+///
+///
+/// \param[in] bytes_len
+/// The length of the inst_bytes array.
+///
+/// \param[in] is_exec_mode_64b
+/// If true, the execution mode is 64 bit.
+///
+/// \return
+/// Returns decoded instruction as struct InstructionOpcodeAndModrm, holding
+/// primary_opcode, opcode_len and modrm byte. Refer to the struct definition
+/// for more details.
+/// Otherwise if the given instruction is invalid, returns None.
+llvm::Optional<InstructionOpcodeAndModrm>
+InstructionLengthDecode(const uint8_t *inst_bytes, int bytes_len,
+ bool is_exec_mode_64b) {
+ int op_idx = 0;
+ bool prefix_done = false;
+ InstructionOpcodeAndModrm ret = {0, 0, 0};
+
+ // In most cases, the primary_opcode is the first byte of the instruction
+ // but some instructions have a prefix to be skipped for these calculations.
+ // The following mapping is inspired from libipt's instruction decoding logic
+ // in `src/pt_ild.c`
+ while (!prefix_done) {
+ if (op_idx >= bytes_len)
+ return llvm::None;
+
+ ret.primary_opcode = inst_bytes[op_idx];
+ switch (ret.primary_opcode) {
+ // prefix_ignore
+ case 0x26:
+ case 0x2e:
+ case 0x36:
+ case 0x3e:
+ case 0x64:
+ case 0x65:
+ // prefix_osz, prefix_asz
+ case 0x66:
+ case 0x67:
+ // prefix_lock, prefix_f2, prefix_f3
+ case 0xf0:
+ case 0xf2:
+ case 0xf3:
+ op_idx++;
+ break;
+
+ // prefix_rex
+ case 0x40:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x44:
+ case 0x45:
+ case 0x46:
+ case 0x47:
+ case 0x48:
+ case 0x49:
+ case 0x4a:
+ case 0x4b:
+ case 0x4c:
+ case 0x4d:
+ case 0x4e:
+ case 0x4f:
+ if (is_exec_mode_64b)
+ op_idx++;
+ else
+ prefix_done = true;
+ break;
+
+ // prefix_vex_c4, c5
+ case 0xc5:
+ if (!is_exec_mode_64b && (inst_bytes[op_idx + 1] & 0xc0) != 0xc0) {
+ prefix_done = true;
+ break;
+ }
+
+ ret.opcode_len = 2;
+ ret.primary_opcode = inst_bytes[op_idx + 2];
+ ret.modrm = inst_bytes[op_idx + 3];
+ return ret;
+
+ case 0xc4:
+ if (!is_exec_mode_64b && (inst_bytes[op_idx + 1] & 0xc0) != 0xc0) {
+ prefix_done = true;
+ break;
+ }
+ ret.opcode_len = inst_bytes[op_idx + 1] & 0x1f;
+ ret.primary_opcode = inst_bytes[op_idx + 3];
+ ret.modrm = inst_bytes[op_idx + 4];
+ return ret;
+
+ // prefix_evex
+ case 0x62:
+ if (!is_exec_mode_64b && (inst_bytes[op_idx + 1] & 0xc0) != 0xc0) {
+ prefix_done = true;
+ break;
+ }
+ ret.opcode_len = inst_bytes[op_idx + 1] & 0x03;
+ ret.primary_opcode = inst_bytes[op_idx + 4];
+ ret.modrm = inst_bytes[op_idx + 5];
+ return ret;
+
+ default:
+ prefix_done = true;
+ break;
+ }
+ } // prefix done
+
+ ret.primary_opcode = inst_bytes[op_idx];
+ ret.modrm = inst_bytes[op_idx + 1];
+ ret.opcode_len = 1;
+
+ // If the first opcode is 0F, it's two- or three- byte opcodes.
+ if (ret.primary_opcode == 0x0F) {
+ ret.primary_opcode = inst_bytes[++op_idx]; // get the next byte
+
+ if (ret.primary_opcode == 0x38) {
+ ret.opcode_len = 3;
+ ret.primary_opcode = inst_bytes[++op_idx]; // get the next byte
+ ret.modrm = inst_bytes[op_idx + 1];
+ } else if (ret.primary_opcode == 0x3A) {
+ ret.opcode_len = 3;
+ ret.primary_opcode = inst_bytes[++op_idx];
+ ret.modrm = inst_bytes[op_idx + 1];
+ } else if ((ret.primary_opcode & 0xf8) == 0x38) {
+ ret.opcode_len = 0;
+ ret.primary_opcode = inst_bytes[++op_idx];
+ ret.modrm = inst_bytes[op_idx + 1];
+ } else if (ret.primary_opcode == 0x0F) {
+ ret.opcode_len = 3;
+ // opcode is 0x0F, no needs to update
+ ret.modrm = inst_bytes[op_idx + 1];
+ } else {
+ ret.opcode_len = 2;
+ ret.modrm = inst_bytes[op_idx + 1];
+ }
+ }
+
+ return ret;
+}
+
+lldb::InstructionControlFlowKind GetControlFlowKind(bool is_exec_mode_64b,
+ Opcode m_opcode) {
+ llvm::Optional<InstructionOpcodeAndModrm> ret = llvm::None;
+
+ if (m_opcode.GetOpcodeBytes() == nullptr || m_opcode.GetByteSize() <= 0) {
+ // x86_64 and i386 instructions are categorized as Opcode::Type::eTypeBytes
+ return lldb::eInstructionControlFlowKindUnknown;
+ }
+
+ // Opcode bytes will be decoded into primary_opcode, modrm and opcode length.
+ // These are the three values deciding instruction control flow kind.
+ ret = InstructionLengthDecode((const uint8_t *)m_opcode.GetOpcodeBytes(),
+ m_opcode.GetByteSize(), is_exec_mode_64b);
+ if (!ret)
+ return lldb::eInstructionControlFlowKindUnknown;
+ else
+ return MapOpcodeIntoControlFlowKind(ret.value());
+}
+
+} // namespace x86
+
class InstructionLLVMC : public lldb_private::Instruction {
public:
InstructionLLVMC(DisassemblerLLVMC &disasm,
@@ -223,6 +541,19 @@ public:
}
}
+ lldb::InstructionControlFlowKind
+ GetControlFlowKind(const lldb_private::ExecutionContext *exe_ctx) override {
+ DisassemblerScope disasm(*this, exe_ctx);
+ if (disasm){
+ if (disasm->GetArchitecture().GetMachine() == llvm::Triple::x86)
+ return x86::GetControlFlowKind(/*is_64b=*/false, m_opcode);
+ else if (disasm->GetArchitecture().GetMachine() == llvm::Triple::x86_64)
+ return x86::GetControlFlowKind(/*is_64b=*/true, m_opcode);
+ }
+
+ return eInstructionControlFlowKindUnknown;
+ }
+
void CalculateMnemonicOperandsAndComment(
const lldb_private::ExecutionContext *exe_ctx) override {
DataExtractor data;
diff --git a/contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.cpp b/contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.cpp
index f8443d608ac3..71242925862b 100644
--- a/contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/ClangASTSource.cpp
@@ -191,12 +191,12 @@ TagDecl *ClangASTSource::FindCompleteType(const TagDecl *decl) {
ClangASTImporter::NamespaceMapSP namespace_map =
m_ast_importer_sp->GetNamespaceMap(namespace_context);
- LLDB_LOGV(log, " CTD Inspecting namespace map{0} ({1} entries)",
- namespace_map.get(), namespace_map->size());
-
if (!namespace_map)
return nullptr;
+ LLDB_LOGV(log, " CTD Inspecting namespace map{0} ({1} entries)",
+ namespace_map.get(), namespace_map->size());
+
for (const ClangASTImporter::NamespaceMapItem &item : *namespace_map) {
LLDB_LOG(log, " CTD Searching namespace {0} in module {1}",
item.second.GetName(), item.first->GetFileSpec().GetFilename());
@@ -1430,9 +1430,7 @@ static bool ImportOffsetMap(llvm::DenseMap<const D *, O> &destination_map,
std::vector<PairType> sorted_items;
sorted_items.reserve(source_map.size());
sorted_items.assign(source_map.begin(), source_map.end());
- llvm::sort(sorted_items, [](const PairType &lhs, const PairType &rhs) {
- return lhs.second < rhs.second;
- });
+ llvm::sort(sorted_items, llvm::less_second());
for (const auto &item : sorted_items) {
DeclFromUser<D> user_decl(const_cast<D *>(item.first));
diff --git a/contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp b/contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp
index fad0f724e4c8..ec3dc28a3a8c 100644
--- a/contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp
@@ -722,8 +722,9 @@ ClangExpressionParser::ClangExpressionParser(
m_llvm_context = std::make_unique<LLVMContext>();
m_code_generator.reset(CreateLLVMCodeGen(
m_compiler->getDiagnostics(), module_name,
- m_compiler->getHeaderSearchOpts(), m_compiler->getPreprocessorOpts(),
- m_compiler->getCodeGenOpts(), *m_llvm_context));
+ &m_compiler->getVirtualFileSystem(), m_compiler->getHeaderSearchOpts(),
+ m_compiler->getPreprocessorOpts(), m_compiler->getCodeGenOpts(),
+ *m_llvm_context));
}
ClangExpressionParser::~ClangExpressionParser() = default;
diff --git a/contrib/llvm-project/lldb/source/Plugins/Instruction/ARM/EmulateInstructionARM.cpp b/contrib/llvm-project/lldb/source/Plugins/Instruction/ARM/EmulateInstructionARM.cpp
index 5bc745cf3b8b..b00a17736679 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Instruction/ARM/EmulateInstructionARM.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Instruction/ARM/EmulateInstructionARM.cpp
@@ -14453,10 +14453,10 @@ bool EmulateInstructionARM::TestEmulation(Stream *out_stream, ArchSpec &arch,
return false;
}
- success = before_state.CompareState(after_state);
+ success = before_state.CompareState(after_state, out_stream);
if (!success)
out_stream->Printf(
- "TestEmulation: 'before' and 'after' states do not match.\n");
+ "TestEmulation: State after emulation does not match 'after' state.\n");
return success;
}
diff --git a/contrib/llvm-project/lldb/source/Plugins/Instruction/ARM/EmulationStateARM.cpp b/contrib/llvm-project/lldb/source/Plugins/Instruction/ARM/EmulationStateARM.cpp
index 569482c7b23b..da679a3e8547 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Instruction/ARM/EmulationStateARM.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Instruction/ARM/EmulationStateARM.cpp
@@ -97,7 +97,7 @@ uint64_t EmulationStateARM::ReadPseudoRegisterValue(uint32_t reg_num,
uint32_t idx = reg_num - dwarf_d0;
if (idx < 16)
value = (uint64_t)m_vfp_regs.s_regs[idx * 2] |
- ((uint64_t)m_vfp_regs.s_regs[idx * 2 + 1] >> 32);
+ ((uint64_t)m_vfp_regs.s_regs[idx * 2 + 1] << 32);
else
value = m_vfp_regs.d_regs[idx - 16];
} else
@@ -251,27 +251,67 @@ bool EmulationStateARM::WritePseudoRegister(
reg_value.GetAsUInt64());
}
-bool EmulationStateARM::CompareState(EmulationStateARM &other_state) {
+bool EmulationStateARM::CompareState(EmulationStateARM &other_state,
+ Stream *out_stream) {
bool match = true;
for (int i = 0; match && i < 17; ++i) {
- if (m_gpr[i] != other_state.m_gpr[i])
+ if (m_gpr[i] != other_state.m_gpr[i]) {
match = false;
+ out_stream->Printf("r%d: 0x%x != 0x%x\n", i, m_gpr[i],
+ other_state.m_gpr[i]);
+ }
}
for (int i = 0; match && i < 32; ++i) {
- if (m_vfp_regs.s_regs[i] != other_state.m_vfp_regs.s_regs[i])
+ if (m_vfp_regs.s_regs[i] != other_state.m_vfp_regs.s_regs[i]) {
match = false;
+ out_stream->Printf("s%d: 0x%x != 0x%x\n", i, m_vfp_regs.s_regs[i],
+ other_state.m_vfp_regs.s_regs[i]);
+ }
}
for (int i = 0; match && i < 16; ++i) {
- if (m_vfp_regs.d_regs[i] != other_state.m_vfp_regs.d_regs[i])
+ if (m_vfp_regs.d_regs[i] != other_state.m_vfp_regs.d_regs[i]) {
match = false;
+ out_stream->Printf("d%d: 0x%" PRIx64 " != 0x%" PRIx64 "\n", i + 16,
+ m_vfp_regs.d_regs[i],
+ other_state.m_vfp_regs.d_regs[i]);
+ }
+ }
+
+ // other_state is the expected state. If it has memory, check it.
+ if (!other_state.m_memory.empty() && m_memory != other_state.m_memory) {
+ match = false;
+ out_stream->Printf("memory does not match\n");
+ out_stream->Printf("got memory:\n");
+ for (auto p : m_memory)
+ out_stream->Printf("0x%08" PRIx64 ": 0x%08x\n", p.first, p.second);
+ out_stream->Printf("expected memory:\n");
+ for (auto p : other_state.m_memory)
+ out_stream->Printf("0x%08" PRIx64 ": 0x%08x\n", p.first, p.second);
}
return match;
}
+bool EmulationStateARM::LoadRegistersStateFromDictionary(
+ OptionValueDictionary *reg_dict, char kind, int first_reg, int num) {
+ StreamString sstr;
+ for (int i = 0; i < num; ++i) {
+ sstr.Clear();
+ sstr.Printf("%c%d", kind, i);
+ OptionValueSP value_sp =
+ reg_dict->GetValueForKey(ConstString(sstr.GetString()));
+ if (value_sp.get() == nullptr)
+ return false;
+ uint64_t reg_value = value_sp->GetUInt64Value();
+ StorePseudoRegisterValue(first_reg + i, reg_value);
+ }
+
+ return true;
+}
+
bool EmulationStateARM::LoadStateFromDictionary(
OptionValueDictionary *test_data) {
static ConstString memory_key("memory");
@@ -321,18 +361,8 @@ bool EmulationStateARM::LoadStateFromDictionary(
// Load General Registers
OptionValueDictionary *reg_dict = value_sp->GetAsDictionary();
-
- StreamString sstr;
- for (int i = 0; i < 16; ++i) {
- sstr.Clear();
- sstr.Printf("r%d", i);
- ConstString reg_name(sstr.GetString());
- value_sp = reg_dict->GetValueForKey(reg_name);
- if (value_sp.get() == nullptr)
- return false;
- uint64_t reg_value = value_sp->GetUInt64Value();
- StorePseudoRegisterValue(dwarf_r0 + i, reg_value);
- }
+ if (!LoadRegistersStateFromDictionary(reg_dict, 'r', dwarf_r0, 16))
+ return false;
static ConstString cpsr_name("cpsr");
value_sp = reg_dict->GetValueForKey(cpsr_name);
@@ -341,16 +371,13 @@ bool EmulationStateARM::LoadStateFromDictionary(
StorePseudoRegisterValue(dwarf_cpsr, value_sp->GetUInt64Value());
// Load s/d Registers
- for (int i = 0; i < 32; ++i) {
- sstr.Clear();
- sstr.Printf("s%d", i);
- ConstString reg_name(sstr.GetString());
- value_sp = reg_dict->GetValueForKey(reg_name);
- if (value_sp.get() == nullptr)
- return false;
- uint64_t reg_value = value_sp->GetUInt64Value();
- StorePseudoRegisterValue(dwarf_s0 + i, reg_value);
- }
-
- return true;
+ // To prevent you giving both types in a state and overwriting
+ // one or the other, we'll expect to get either all S registers,
+ // or all D registers. Not a mix of the two.
+ bool found_s_registers =
+ LoadRegistersStateFromDictionary(reg_dict, 's', dwarf_s0, 32);
+ bool found_d_registers =
+ LoadRegistersStateFromDictionary(reg_dict, 'd', dwarf_d0, 32);
+
+ return found_s_registers != found_d_registers;
}
diff --git a/contrib/llvm-project/lldb/source/Plugins/Instruction/ARM/EmulationStateARM.h b/contrib/llvm-project/lldb/source/Plugins/Instruction/ARM/EmulationStateARM.h
index 28bc5d98649d..bc885dab9ac7 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Instruction/ARM/EmulationStateARM.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Instruction/ARM/EmulationStateARM.h
@@ -36,7 +36,8 @@ public:
bool LoadStateFromDictionary(lldb_private::OptionValueDictionary *test_data);
- bool CompareState(EmulationStateARM &other_state);
+ bool CompareState(EmulationStateARM &other_state,
+ lldb_private::Stream *out_stream);
static size_t
ReadPseudoMemory(lldb_private::EmulateInstruction *instruction, void *baton,
@@ -61,6 +62,10 @@ public:
const lldb_private::RegisterValue &reg_value);
private:
+ bool LoadRegistersStateFromDictionary(
+ lldb_private::OptionValueDictionary *reg_dict, char kind, int first_reg,
+ int num);
+
uint32_t m_gpr[17] = {0};
struct _sd_regs {
uint32_t s_regs[32]; // sregs 0 - 31 & dregs 0 - 15
diff --git a/contrib/llvm-project/lldb/source/Plugins/ObjectFile/Minidump/MinidumpFileBuilder.cpp b/contrib/llvm-project/lldb/source/Plugins/ObjectFile/Minidump/MinidumpFileBuilder.cpp
index acb131b8a775..c396cb061c01 100644
--- a/contrib/llvm-project/lldb/source/Plugins/ObjectFile/Minidump/MinidumpFileBuilder.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/ObjectFile/Minidump/MinidumpFileBuilder.cpp
@@ -348,7 +348,7 @@ llvm::support::ulittle64_t read_register_u64(RegisterContext *reg_ctx,
lldb_private::minidump::MinidumpContext_x86_64
GetThreadContext_64(RegisterContext *reg_ctx) {
- lldb_private::minidump::MinidumpContext_x86_64 thread_context;
+ lldb_private::minidump::MinidumpContext_x86_64 thread_context = {};
thread_context.p1_home = {};
thread_context.context_flags = static_cast<uint32_t>(
lldb_private::minidump::MinidumpContext_x86_64_Flags::x86_64_Flag |
@@ -534,7 +534,7 @@ Status MinidumpFileBuilder::AddException(const lldb::ProcessSP &process_sp) {
helper_data.AppendData(
&thread_context, sizeof(lldb_private::minidump::MinidumpContext_x86_64));
- Exception exp_record;
+ Exception exp_record = {};
exp_record.ExceptionCode =
static_cast<llvm::support::ulittle32_t>(stop_info_sp->GetValue());
exp_record.ExceptionFlags = static_cast<llvm::support::ulittle32_t>(0);
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.cpp
index b71de4cadb18..7e25bc4ea2a2 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.cpp
@@ -247,6 +247,71 @@ MemoryTagManagerAArch64MTE::UnpackTagsData(const std::vector<uint8_t> &tags,
return unpacked;
}
+std::vector<lldb::addr_t>
+MemoryTagManagerAArch64MTE::UnpackTagsFromCoreFileSegment(
+ CoreReaderFn reader, lldb::addr_t tag_segment_virtual_address,
+ lldb::addr_t tag_segment_data_address, lldb::addr_t addr,
+ size_t len) const {
+ // We can assume by now that addr and len have been granule aligned by a tag
+ // manager. However because we have 2 tags per byte we need to round the range
+ // up again to align to 2 granule boundaries.
+ const size_t granule = GetGranuleSize();
+ const size_t two_granules = granule * 2;
+ lldb::addr_t aligned_addr = addr;
+ size_t aligned_len = len;
+
+ // First align the start address down.
+ if (aligned_addr % two_granules) {
+ assert(aligned_addr % two_granules == granule);
+ aligned_addr -= granule;
+ aligned_len += granule;
+ }
+
+ // Then align the length up.
+ bool aligned_length_up = false;
+ if (aligned_len % two_granules) {
+ assert(aligned_len % two_granules == granule);
+ aligned_len += granule;
+ aligned_length_up = true;
+ }
+
+ // ProcessElfCore should have validated this when it found the segment.
+ assert(aligned_addr >= tag_segment_virtual_address);
+
+ // By now we know that aligned_addr is aligned to a 2 granule boundary.
+ const size_t offset_granules =
+ (aligned_addr - tag_segment_virtual_address) / granule;
+ // 2 tags per byte.
+ const size_t file_offset_in_bytes = offset_granules / 2;
+
+ // By now we know that aligned_len is at least 2 granules.
+ const size_t tag_bytes_to_read = aligned_len / granule / 2;
+ std::vector<uint8_t> tag_data(tag_bytes_to_read);
+ const size_t bytes_copied =
+ reader(tag_segment_data_address + file_offset_in_bytes, tag_bytes_to_read,
+ tag_data.data());
+ UNUSED_IF_ASSERT_DISABLED(bytes_copied);
+ assert(bytes_copied == tag_bytes_to_read);
+
+ std::vector<lldb::addr_t> tags;
+ tags.reserve(2 * tag_data.size());
+ // No need to check the range of the tag value here as each occupies only 4
+ // bits.
+ for (auto tag_byte : tag_data) {
+ tags.push_back(tag_byte & 0xf);
+ tags.push_back(tag_byte >> 4);
+ }
+
+ // If we aligned the address down, don't return the extra first tag.
+ if (addr != aligned_addr)
+ tags.erase(tags.begin());
+ // If we aligned the length up, don't return the extra last tag.
+ if (aligned_length_up)
+ tags.pop_back();
+
+ return tags;
+}
+
llvm::Expected<std::vector<uint8_t>> MemoryTagManagerAArch64MTE::PackTags(
const std::vector<lldb::addr_t> &tags) const {
std::vector<uint8_t> packed;
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.h b/contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.h
index 7cda728b140f..365e176e5b1d 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/Utility/MemoryTagManagerAArch64MTE.h
@@ -44,6 +44,12 @@ public:
UnpackTagsData(const std::vector<uint8_t> &tags,
size_t granules = 0) const override;
+ std::vector<lldb::addr_t>
+ UnpackTagsFromCoreFileSegment(CoreReaderFn reader,
+ lldb::addr_t tag_segment_virtual_address,
+ lldb::addr_t tag_segment_data_address,
+ lldb::addr_t addr, size_t len) const override;
+
llvm::Expected<std::vector<uint8_t>>
PackTags(const std::vector<lldb::addr_t> &tags) const override;
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/Utility/RegisterContextDarwin_arm64.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/Utility/RegisterContextDarwin_arm64.cpp
index 11b300bc44fb..691e7db3fc79 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/Utility/RegisterContextDarwin_arm64.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/Utility/RegisterContextDarwin_arm64.cpp
@@ -95,7 +95,7 @@ static size_t k_num_register_infos =
RegisterContextDarwin_arm64::RegisterContextDarwin_arm64(
Thread &thread, uint32_t concrete_frame_idx)
- : RegisterContext(thread, concrete_frame_idx), gpr(), fpu(), exc() {
+ : RegisterContext(thread, concrete_frame_idx), gpr(), fpu(), exc(), dbg() {
uint32_t i;
for (i = 0; i < kNumErrors; i++) {
gpr_errs[i] = -1;
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/Utility/ThreadMemory.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/Utility/ThreadMemory.cpp
index 7469e7633e71..89ecc757a68f 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/Utility/ThreadMemory.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/Utility/ThreadMemory.cpp
@@ -23,7 +23,8 @@ using namespace lldb_private;
ThreadMemory::ThreadMemory(Process &process, tid_t tid,
const ValueObjectSP &thread_info_valobj_sp)
: Thread(process, tid), m_backing_thread_sp(),
- m_thread_info_valobj_sp(thread_info_valobj_sp), m_name(), m_queue() {}
+ m_thread_info_valobj_sp(thread_info_valobj_sp), m_name(), m_queue(),
+ m_register_data_addr(LLDB_INVALID_ADDRESS) {}
ThreadMemory::ThreadMemory(Process &process, lldb::tid_t tid,
llvm::StringRef name, llvm::StringRef queue,
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/elf-core/ProcessElfCore.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/elf-core/ProcessElfCore.cpp
index 58b4fe3add1b..24d3c4bd0ba2 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/elf-core/ProcessElfCore.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/elf-core/ProcessElfCore.cpp
@@ -144,6 +144,18 @@ lldb::addr_t ProcessElfCore::AddAddressRangeFromLoadSegment(
return addr;
}
+lldb::addr_t ProcessElfCore::AddAddressRangeFromMemoryTagSegment(
+ const elf::ELFProgramHeader &header) {
+ // If lldb understood multiple kinds of tag segments we would record the type
+ // of the segment here also. As long as there is only 1 type lldb looks for,
+ // there is no need.
+ FileRange file_range(header.p_offset, header.p_filesz);
+ m_core_tag_ranges.Append(
+ VMRangeToFileOffset::Entry(header.p_vaddr, header.p_memsz, file_range));
+
+ return header.p_vaddr;
+}
+
// Process Control
Status ProcessElfCore::DoLoadCore() {
Status error;
@@ -170,9 +182,12 @@ Status ProcessElfCore::DoLoadCore() {
bool ranges_are_sorted = true;
lldb::addr_t vm_addr = 0;
+ lldb::addr_t tag_addr = 0;
/// Walk through segments and Thread and Address Map information.
/// PT_NOTE - Contains Thread and Register information
/// PT_LOAD - Contains a contiguous range of Process Address Space
+ /// PT_AARCH64_MEMTAG_MTE - Contains AArch64 MTE memory tags for a range of
+ /// Process Address Space.
for (const elf::ELFProgramHeader &H : segments) {
DataExtractor data = core->GetSegmentData(H);
@@ -187,12 +202,18 @@ Status ProcessElfCore::DoLoadCore() {
if (vm_addr > last_addr)
ranges_are_sorted = false;
vm_addr = last_addr;
+ } else if (H.p_type == llvm::ELF::PT_AARCH64_MEMTAG_MTE) {
+ lldb::addr_t last_addr = AddAddressRangeFromMemoryTagSegment(H);
+ if (tag_addr > last_addr)
+ ranges_are_sorted = false;
+ tag_addr = last_addr;
}
}
if (!ranges_are_sorted) {
m_core_aranges.Sort();
m_core_range_infos.Sort();
+ m_core_tag_ranges.Sort();
}
// Even if the architecture is set in the target, we need to override it to
@@ -310,6 +331,15 @@ Status ProcessElfCore::DoGetMemoryRegionInfo(lldb::addr_t load_addr,
? MemoryRegionInfo::eYes
: MemoryRegionInfo::eNo);
region_info.SetMapped(MemoryRegionInfo::eYes);
+
+ // A region is memory tagged if there is a memory tag segment that covers
+ // the exact same range.
+ region_info.SetMemoryTagged(MemoryRegionInfo::eNo);
+ const VMRangeToFileOffset::Entry *tag_entry =
+ m_core_tag_ranges.FindEntryStartsAt(permission_entry->GetRangeBase());
+ if (tag_entry &&
+ tag_entry->GetRangeEnd() == permission_entry->GetRangeEnd())
+ region_info.SetMemoryTagged(MemoryRegionInfo::eYes);
} else if (load_addr < permission_entry->GetRangeBase()) {
region_info.GetRange().SetRangeBase(load_addr);
region_info.GetRange().SetRangeEnd(permission_entry->GetRangeBase());
@@ -317,6 +347,7 @@ Status ProcessElfCore::DoGetMemoryRegionInfo(lldb::addr_t load_addr,
region_info.SetWritable(MemoryRegionInfo::eNo);
region_info.SetExecutable(MemoryRegionInfo::eNo);
region_info.SetMapped(MemoryRegionInfo::eNo);
+ region_info.SetMemoryTagged(MemoryRegionInfo::eNo);
}
return Status();
}
@@ -327,6 +358,7 @@ Status ProcessElfCore::DoGetMemoryRegionInfo(lldb::addr_t load_addr,
region_info.SetWritable(MemoryRegionInfo::eNo);
region_info.SetExecutable(MemoryRegionInfo::eNo);
region_info.SetMapped(MemoryRegionInfo::eNo);
+ region_info.SetMemoryTagged(MemoryRegionInfo::eNo);
return Status();
}
@@ -376,6 +408,38 @@ size_t ProcessElfCore::DoReadMemory(lldb::addr_t addr, void *buf, size_t size,
return bytes_copied;
}
+llvm::Expected<std::vector<lldb::addr_t>>
+ProcessElfCore::ReadMemoryTags(lldb::addr_t addr, size_t len) {
+ ObjectFile *core_objfile = m_core_module_sp->GetObjectFile();
+ if (core_objfile == nullptr)
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "No core object file.");
+
+ llvm::Expected<const MemoryTagManager *> tag_manager_or_err =
+ GetMemoryTagManager();
+ if (!tag_manager_or_err)
+ return tag_manager_or_err.takeError();
+
+ // LLDB only supports AArch64 MTE tag segments so we do not need to worry
+ // about the segment type here. If you got here then you must have a tag
+ // manager (meaning you are debugging AArch64) and all the segments in this
+ // list will have had type PT_AARCH64_MEMTAG_MTE.
+ const VMRangeToFileOffset::Entry *tag_entry =
+ m_core_tag_ranges.FindEntryThatContains(addr);
+ // If we don't have a tag segment or the range asked for extends outside the
+ // segment.
+ if (!tag_entry || (addr + len) >= tag_entry->GetRangeEnd())
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "No tag segment that covers this range.");
+
+ const MemoryTagManager *tag_manager = *tag_manager_or_err;
+ return tag_manager->UnpackTagsFromCoreFileSegment(
+ [core_objfile](lldb::offset_t offset, size_t length, void *dst) {
+ return core_objfile->CopyData(offset, length, dst);
+ },
+ tag_entry->GetRangeBase(), tag_entry->data.GetRangeBase(), addr, len);
+}
+
void ProcessElfCore::Clear() {
m_thread_list.Clear();
@@ -610,9 +674,9 @@ llvm::Error ProcessElfCore::parseNetBSDNotes(llvm::ArrayRef<CoreNote> notes) {
// To be extracted from struct netbsd_elfcore_procinfo
// Used to sanity check of the LWPs of the process
uint32_t nlwps = 0;
- uint32_t signo; // killing signal
- uint32_t siglwp; // LWP target of killing signal
- uint32_t pr_pid;
+ uint32_t signo = 0; // killing signal
+ uint32_t siglwp = 0; // LWP target of killing signal
+ uint32_t pr_pid = 0;
for (const auto &note : notes) {
llvm::StringRef name = note.info.n_name;
@@ -764,7 +828,7 @@ llvm::Error ProcessElfCore::parseNetBSDNotes(llvm::ArrayRef<CoreNote> notes) {
}
llvm::Error ProcessElfCore::parseOpenBSDNotes(llvm::ArrayRef<CoreNote> notes) {
- ThreadData thread_data;
+ ThreadData thread_data = {};
for (const auto &note : notes) {
// OpenBSD per-thread information is stored in notes named "OpenBSD@nnn" so
// match on the initial part of the string.
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/elf-core/ProcessElfCore.h b/contrib/llvm-project/lldb/source/Plugins/Process/elf-core/ProcessElfCore.h
index fd36e5027816..03c23378e3c1 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/elf-core/ProcessElfCore.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/elf-core/ProcessElfCore.h
@@ -86,6 +86,11 @@ public:
size_t DoReadMemory(lldb::addr_t addr, void *buf, size_t size,
lldb_private::Status &error) override;
+ // We do not implement DoReadMemoryTags. Instead all the work is done in
+ // ReadMemoryTags which avoids having to unpack and repack tags.
+ llvm::Expected<std::vector<lldb::addr_t>> ReadMemoryTags(lldb::addr_t addr,
+ size_t len) override;
+
lldb::addr_t GetImageInfoAddress() override;
lldb_private::ArchSpec GetArchitecture();
@@ -105,6 +110,8 @@ protected:
DoGetMemoryRegionInfo(lldb::addr_t load_addr,
lldb_private::MemoryRegionInfo &region_info) override;
+ bool SupportsMemoryTagging() override { return !m_core_tag_ranges.IsEmpty(); }
+
private:
struct NT_FILE_Entry {
lldb::addr_t start;
@@ -139,6 +146,9 @@ private:
// Permissions for all ranges
VMRangeToPermissions m_core_range_infos;
+ // Memory tag ranges found in the core
+ VMRangeToFileOffset m_core_tag_ranges;
+
// NT_FILE entries found from the NOTE segment
std::vector<NT_FILE_Entry> m_nt_file_entries;
@@ -154,6 +164,10 @@ private:
lldb::addr_t
AddAddressRangeFromLoadSegment(const elf::ELFProgramHeader &header);
+ // Parse a contiguous address range from a memory tag segment
+ lldb::addr_t
+ AddAddressRangeFromMemoryTagSegment(const elf::ELFProgramHeader &header);
+
llvm::Expected<std::vector<lldb_private::CoreNote>>
parseSegment(const lldb_private::DataExtractor &segment);
llvm::Error parseFreeBSDNotes(llvm::ArrayRef<lldb_private::CoreNote> notes);
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.cpp
index c44ace96dd55..580cdde57d80 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.cpp
@@ -4265,3 +4265,21 @@ bool GDBRemoteCommunicationClient::UsesNativeSignals() {
// check whether it is an old version of lldb-server.
return GetThreadSuffixSupported();
}
+
+llvm::Expected<int> GDBRemoteCommunicationClient::KillProcess(lldb::pid_t pid) {
+ StringExtractorGDBRemote response;
+ GDBRemoteCommunication::ScopedTimeout(*this, seconds(3));
+
+ if (SendPacketAndWaitForResponse("k", response, GetPacketTimeout()) !=
+ PacketResult::Success)
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "failed to send k packet");
+
+ char packet_cmd = response.GetChar(0);
+ if (packet_cmd == 'W' || packet_cmd == 'X')
+ return response.GetHexU8();
+
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "unexpected response to k packet: %s",
+ response.GetStringRef().str().c_str());
+}
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.h b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.h
index d367f75cee0e..3d838d6d8074 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationClient.h
@@ -521,6 +521,8 @@ public:
bool GetSaveCoreSupported() const;
+ llvm::Expected<int> KillProcess(lldb::pid_t pid);
+
protected:
LazyBool m_supports_not_sending_acks = eLazyBoolCalculate;
LazyBool m_supports_thread_suffix = eLazyBoolCalculate;
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp
index 5f18706f67e5..3e1a6fb6620a 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp
@@ -2394,7 +2394,6 @@ Status ProcessGDBRemote::DoDetach(bool keep_stopped) {
}
Status ProcessGDBRemote::DoDestroy() {
- Status error;
Log *log = GetLog(GDBRLog::Process);
LLDB_LOGF(log, "ProcessGDBRemote::DoDestroy()");
@@ -2404,54 +2403,35 @@ Status ProcessGDBRemote::DoDestroy() {
if (m_gdb_comm.IsConnected()) {
if (m_public_state.GetValue() != eStateAttaching) {
- StringExtractorGDBRemote response;
- GDBRemoteCommunication::ScopedTimeout(m_gdb_comm,
- std::chrono::seconds(3));
-
- if (m_gdb_comm.SendPacketAndWaitForResponse("k", response,
- GetInterruptTimeout()) ==
- GDBRemoteCommunication::PacketResult::Success) {
- char packet_cmd = response.GetChar(0);
+ llvm::Expected<int> kill_res = m_gdb_comm.KillProcess(GetID());
- if (packet_cmd == 'W' || packet_cmd == 'X') {
+ if (kill_res) {
+ exit_status = kill_res.get();
#if defined(__APPLE__)
- // For Native processes on Mac OS X, we launch through the Host
- // Platform, then hand the process off to debugserver, which becomes
- // the parent process through "PT_ATTACH". Then when we go to kill
- // the process on Mac OS X we call ptrace(PT_KILL) to kill it, then
- // we call waitpid which returns with no error and the correct
- // status. But amusingly enough that doesn't seem to actually reap
- // the process, but instead it is left around as a Zombie. Probably
- // the kernel is in the process of switching ownership back to lldb
- // which was the original parent, and gets confused in the handoff.
- // Anyway, so call waitpid here to finally reap it.
- PlatformSP platform_sp(GetTarget().GetPlatform());
- if (platform_sp && platform_sp->IsHost()) {
- int status;
- ::pid_t reap_pid;
- reap_pid = waitpid(GetID(), &status, WNOHANG);
- LLDB_LOGF(log, "Reaped pid: %d, status: %d.\n", reap_pid, status);
- }
-#endif
- SetLastStopPacket(response);
- ClearThreadIDList();
- exit_status = response.GetHexU8();
- } else {
- LLDB_LOGF(log,
- "ProcessGDBRemote::DoDestroy - got unexpected response "
- "to k packet: %s",
- response.GetStringRef().data());
- exit_string.assign("got unexpected response to k packet: ");
- exit_string.append(std::string(response.GetStringRef()));
+ // For Native processes on Mac OS X, we launch through the Host
+ // Platform, then hand the process off to debugserver, which becomes
+ // the parent process through "PT_ATTACH". Then when we go to kill
+ // the process on Mac OS X we call ptrace(PT_KILL) to kill it, then
+ // we call waitpid which returns with no error and the correct
+ // status. But amusingly enough that doesn't seem to actually reap
+ // the process, but instead it is left around as a Zombie. Probably
+ // the kernel is in the process of switching ownership back to lldb
+ // which was the original parent, and gets confused in the handoff.
+ // Anyway, so call waitpid here to finally reap it.
+ PlatformSP platform_sp(GetTarget().GetPlatform());
+ if (platform_sp && platform_sp->IsHost()) {
+ int status;
+ ::pid_t reap_pid;
+ reap_pid = waitpid(GetID(), &status, WNOHANG);
+ LLDB_LOGF(log, "Reaped pid: %d, status: %d.\n", reap_pid, status);
}
+#endif
+ ClearThreadIDList();
+ exit_string.assign("killed");
} else {
- LLDB_LOGF(log, "ProcessGDBRemote::DoDestroy - failed to send k packet");
- exit_string.assign("failed to send the k packet");
+ exit_string.assign(llvm::toString(kill_res.takeError()));
}
} else {
- LLDB_LOGF(log,
- "ProcessGDBRemote::DoDestroy - killed or interrupted while "
- "attaching");
exit_string.assign("killed or interrupted while attaching.");
}
} else {
@@ -2465,7 +2445,7 @@ Status ProcessGDBRemote::DoDestroy() {
StopAsyncThread();
KillDebugserverProcess();
- return error;
+ return Status();
}
void ProcessGDBRemote::SetLastStopPacket(
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/minidump/ProcessMinidump.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/minidump/ProcessMinidump.cpp
index c91c111d8df3..64219e1a960b 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/minidump/ProcessMinidump.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/minidump/ProcessMinidump.cpp
@@ -233,7 +233,8 @@ ProcessMinidump::ProcessMinidump(lldb::TargetSP target_sp,
const FileSpec &core_file,
DataBufferSP core_data)
: PostMortemProcess(target_sp, listener_sp), m_core_file(core_file),
- m_core_data(std::move(core_data)), m_is_wow64(false) {}
+ m_core_data(std::move(core_data)), m_active_exception(nullptr),
+ m_is_wow64(false) {}
ProcessMinidump::~ProcessMinidump() {
Clear();
diff --git a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/PdbUtil.cpp b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/PdbUtil.cpp
index 6317b140f7e8..7d730ecdd1f3 100644
--- a/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/PdbUtil.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/SymbolFile/NativePDB/PdbUtil.cpp
@@ -475,7 +475,7 @@ llvm::StringRef lldb_private::npdb::DropNameScope(llvm::StringRef name) {
}
VariableInfo lldb_private::npdb::GetVariableNameInfo(CVSymbol sym) {
- VariableInfo result;
+ VariableInfo result = {};
if (sym.kind() == S_REGREL32) {
RegRelativeSym reg(SymbolRecordKind::RegRelativeSym);
diff --git a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/DecodedThread.cpp b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/DecodedThread.cpp
index 0859c5a20b7e..02f1d2f24d8c 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/DecodedThread.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/DecodedThread.cpp
@@ -44,11 +44,65 @@ void IntelPTError::log(llvm::raw_ostream &OS) const {
OS << formatv(": {0:x+16}", m_address);
}
-int64_t DecodedThread::GetItemsCount() const {
- return static_cast<int64_t>(m_item_kinds.size());
+bool DecodedThread::TSCRange::InRange(uint64_t item_index) const {
+ return item_index >= first_item_index &&
+ item_index < first_item_index + items_count;
+}
+
+bool DecodedThread::NanosecondsRange::InRange(uint64_t item_index) const {
+ return item_index >= first_item_index &&
+ item_index < first_item_index + items_count;
+}
+
+double DecodedThread::NanosecondsRange::GetInterpolatedTime(
+ uint64_t item_index, uint64_t begin_of_time_nanos,
+ const LinuxPerfZeroTscConversion &tsc_conversion) const {
+ uint64_t items_since_last_tsc = item_index - first_item_index;
+
+ auto interpolate = [&](uint64_t next_range_start_ns) {
+ if (next_range_start_ns == nanos) {
+ // If the resolution of the conversion formula is bad enough to consider
+ // these two timestamps as equal, then we just increase the next one by 1
+ // for correction
+ next_range_start_ns++;
+ }
+ long double item_duration =
+ static_cast<long double>(items_count) / (next_range_start_ns - nanos);
+ return (nanos - begin_of_time_nanos) + items_since_last_tsc * item_duration;
+ };
+
+ if (!next_range) {
+ // If this is the last TSC range, so we have to extrapolate. In this case,
+ // we assume that each instruction took one TSC, which is what an
+ // instruction would take if no parallelism is achieved and the frequency
+ // multiplier is 1.
+ return interpolate(tsc_conversion.ToNanos(tsc + items_count));
+ }
+ if (items_count < (next_range->tsc - tsc)) {
+ // If the numbers of items in this range is less than the total TSC duration
+ // of this range, i.e. each instruction taking longer than 1 TSC, then we
+ // can assume that something else happened between these TSCs (e.g. a
+ // context switch, change to kernel, decoding errors, etc). In this case, we
+ // also assume that each instruction took 1 TSC. A proper way to improve
+ // this would be to analize the next events in the trace looking for context
+ // switches or trace disablement events, but for now, as we only want an
+ // approximation, we keep it simple. We are also guaranteed that the time in
+ // nanos of the next range is different to the current one, just because of
+ // the definition of a NanosecondsRange.
+ return interpolate(
+ std::min(tsc_conversion.ToNanos(tsc + items_count), next_range->nanos));
+ }
+
+ // In this case, each item took less than 1 TSC, so some parallelism was
+ // achieved, which is an indication that we didn't suffered of any kind of
+ // interruption.
+ return interpolate(next_range->nanos);
}
-lldb::addr_t DecodedThread::GetInstructionLoadAddress(size_t item_index) const {
+uint64_t DecodedThread::GetItemsCount() const { return m_item_kinds.size(); }
+
+lldb::addr_t
+DecodedThread::GetInstructionLoadAddress(uint64_t item_index) const {
return m_item_data[item_index].load_address;
}
@@ -58,33 +112,69 @@ DecodedThread::TraceItemStorage &
DecodedThread::CreateNewTraceItem(lldb::TraceItemKind kind) {
m_item_kinds.push_back(kind);
m_item_data.emplace_back();
+ if (m_last_tsc)
+ (*m_last_tsc)->second.items_count++;
+ if (m_last_nanoseconds)
+ (*m_last_nanoseconds)->second.items_count++;
return m_item_data.back();
}
-void DecodedThread::NotifyTsc(uint64_t tsc) {
- if (!m_last_tsc || *m_last_tsc != tsc) {
- m_timestamps.emplace(m_item_kinds.size(), tsc);
- m_last_tsc = tsc;
+void DecodedThread::NotifyTsc(TSC tsc) {
+ if (m_last_tsc && (*m_last_tsc)->second.tsc == tsc)
+ return;
+
+ m_last_tsc =
+ m_tscs.emplace(GetItemsCount(), TSCRange{tsc, 0, GetItemsCount()}).first;
+
+ if (m_tsc_conversion) {
+ uint64_t nanos = m_tsc_conversion->ToNanos(tsc);
+ if (!m_last_nanoseconds || (*m_last_nanoseconds)->second.nanos != nanos) {
+ m_last_nanoseconds =
+ m_nanoseconds
+ .emplace(GetItemsCount(), NanosecondsRange{nanos, tsc, nullptr, 0,
+ GetItemsCount()})
+ .first;
+ if (*m_last_nanoseconds != m_nanoseconds.begin()) {
+ auto prev_range = prev(*m_last_nanoseconds);
+ prev_range->second.next_range = &(*m_last_nanoseconds)->second;
+ }
+ }
}
+ AppendEvent(lldb::eTraceEventHWClockTick);
}
void DecodedThread::NotifyCPU(lldb::cpu_id_t cpu_id) {
if (!m_last_cpu || *m_last_cpu != cpu_id) {
- m_cpus.emplace(m_item_kinds.size(), cpu_id);
+ m_cpus.emplace(GetItemsCount(), cpu_id);
m_last_cpu = cpu_id;
AppendEvent(lldb::eTraceEventCPUChanged);
}
}
Optional<lldb::cpu_id_t>
-DecodedThread::GetCPUByIndex(uint64_t insn_index) const {
- // Could possibly optimize the search
- auto it = m_cpus.upper_bound(insn_index);
+DecodedThread::GetCPUByIndex(uint64_t item_index) const {
+ auto it = m_cpus.upper_bound(item_index);
if (it == m_cpus.begin())
return None;
return prev(it)->second;
}
+Optional<DecodedThread::TSCRange>
+DecodedThread::GetTSCRangeByIndex(uint64_t item_index) const {
+ auto next_it = m_tscs.upper_bound(item_index);
+ if (next_it == m_tscs.begin())
+ return None;
+ return prev(next_it)->second;
+}
+
+Optional<DecodedThread::NanosecondsRange>
+DecodedThread::GetNanosecondsRangeByIndex(uint64_t item_index) {
+ auto next_it = m_nanoseconds.upper_bound(item_index);
+ if (next_it == m_nanoseconds.begin())
+ return None;
+ return prev(next_it)->second;
+}
+
void DecodedThread::AppendEvent(lldb::TraceEvent event) {
CreateNewTraceItem(lldb::eTraceItemKindEvent).event = event;
m_events_stats.RecordEvent(event);
@@ -134,90 +224,24 @@ void DecodedThread::EventsStats::RecordEvent(lldb::TraceEvent event) {
total_count++;
}
-Optional<DecodedThread::TscRange> DecodedThread::CalculateTscRange(
- size_t insn_index,
- const Optional<DecodedThread::TscRange> &hint_range) const {
- // We first try to check the given hint range in case we are traversing the
- // trace in short jumps. If that fails, then we do the more expensive
- // arbitrary lookup.
- if (hint_range) {
- Optional<TscRange> candidate_range;
- if (insn_index < hint_range->GetStartInstructionIndex())
- candidate_range = hint_range->Prev();
- else if (insn_index > hint_range->GetEndInstructionIndex())
- candidate_range = hint_range->Next();
- else
- candidate_range = hint_range;
-
- if (candidate_range && candidate_range->InRange(insn_index))
- return candidate_range;
- }
- // Now we do a more expensive lookup
- auto it = m_timestamps.upper_bound(insn_index);
- if (it == m_timestamps.begin())
- return None;
-
- return TscRange(--it, *this);
-}
-
-lldb::TraceItemKind DecodedThread::GetItemKindByIndex(size_t item_index) const {
+lldb::TraceItemKind
+DecodedThread::GetItemKindByIndex(uint64_t item_index) const {
return static_cast<lldb::TraceItemKind>(m_item_kinds[item_index]);
}
-const char *DecodedThread::GetErrorByIndex(size_t item_index) const {
+const char *DecodedThread::GetErrorByIndex(uint64_t item_index) const {
return m_item_data[item_index].error;
}
-DecodedThread::DecodedThread(ThreadSP thread_sp) : m_thread_sp(thread_sp) {}
-
-lldb::TraceCursorUP DecodedThread::CreateNewCursor() {
- return std::make_unique<TraceCursorIntelPT>(m_thread_sp, shared_from_this());
-}
+DecodedThread::DecodedThread(
+ ThreadSP thread_sp,
+ const llvm::Optional<LinuxPerfZeroTscConversion> &tsc_conversion)
+ : m_thread_sp(thread_sp), m_tsc_conversion(tsc_conversion) {}
size_t DecodedThread::CalculateApproximateMemoryUsage() const {
return sizeof(TraceItemStorage) * m_item_data.size() +
sizeof(uint8_t) * m_item_kinds.size() +
- (sizeof(size_t) + sizeof(uint64_t)) * m_timestamps.size() +
- (sizeof(size_t) + sizeof(lldb::cpu_id_t)) * m_cpus.size();
-}
-
-DecodedThread::TscRange::TscRange(std::map<size_t, uint64_t>::const_iterator it,
- const DecodedThread &decoded_thread)
- : m_it(it), m_decoded_thread(&decoded_thread) {
- auto next_it = m_it;
- ++next_it;
- m_end_index = (next_it == m_decoded_thread->m_timestamps.end())
- ? std::numeric_limits<uint64_t>::max()
- : next_it->first - 1;
-}
-
-size_t DecodedThread::TscRange::GetTsc() const { return m_it->second; }
-
-size_t DecodedThread::TscRange::GetStartInstructionIndex() const {
- return m_it->first;
-}
-
-size_t DecodedThread::TscRange::GetEndInstructionIndex() const {
- return m_end_index;
-}
-
-bool DecodedThread::TscRange::InRange(size_t insn_index) const {
- return GetStartInstructionIndex() <= insn_index &&
- insn_index <= GetEndInstructionIndex();
-}
-
-Optional<DecodedThread::TscRange> DecodedThread::TscRange::Next() const {
- auto next_it = m_it;
- ++next_it;
- if (next_it == m_decoded_thread->m_timestamps.end())
- return None;
- return TscRange(next_it, *m_decoded_thread);
-}
-
-Optional<DecodedThread::TscRange> DecodedThread::TscRange::Prev() const {
- if (m_it == m_decoded_thread->m_timestamps.begin())
- return None;
- auto prev_it = m_it;
- --prev_it;
- return TscRange(prev_it, *m_decoded_thread);
+ (sizeof(uint64_t) + sizeof(TSC)) * m_tscs.size() +
+ (sizeof(uint64_t) + sizeof(uint64_t)) * m_nanoseconds.size() +
+ (sizeof(uint64_t) + sizeof(lldb::cpu_id_t)) * m_cpus.size();
}
diff --git a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/DecodedThread.h b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/DecodedThread.h
index bd1a90aaf250..9376a0af169d 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/DecodedThread.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/DecodedThread.h
@@ -71,45 +71,7 @@ private:
/// stopped at. See \a Trace::GetCursorPosition for more information.
class DecodedThread : public std::enable_shared_from_this<DecodedThread> {
public:
- /// \class TscRange
- /// Class that represents the trace range associated with a given TSC.
- /// It provides efficient iteration to the previous or next TSC range in the
- /// decoded trace.
- ///
- /// TSC timestamps are emitted by the decoder infrequently, which means
- /// that each TSC covers a range of instruction indices, which can be used to
- /// speed up TSC lookups.
- class TscRange {
- public:
- /// Check if this TSC range includes the given instruction index.
- bool InRange(size_t insn_index) const;
-
- /// Get the next range chronologically.
- llvm::Optional<TscRange> Next() const;
-
- /// Get the previous range chronologically.
- llvm::Optional<TscRange> Prev() const;
-
- /// Get the TSC value.
- size_t GetTsc() const;
- /// Get the smallest instruction index that has this TSC.
- size_t GetStartInstructionIndex() const;
- /// Get the largest instruction index that has this TSC.
- size_t GetEndInstructionIndex() const;
-
- private:
- friend class DecodedThread;
-
- TscRange(std::map<size_t, uint64_t>::const_iterator it,
- const DecodedThread &decoded_thread);
-
- /// The iterator pointing to the beginning of the range.
- std::map<size_t, uint64_t>::const_iterator m_it;
- /// The largest instruction index that has this TSC.
- size_t m_end_index;
-
- const DecodedThread *m_decoded_thread;
- };
+ using TSC = uint64_t;
// Struct holding counts for libipts errors;
struct LibiptErrorsStats {
@@ -120,6 +82,61 @@ public:
void RecordError(int libipt_error_code);
};
+ /// A structure that represents a maximal range of trace items associated to
+ /// the same TSC value.
+ struct TSCRange {
+ TSC tsc;
+ /// Number of trace items in this range.
+ uint64_t items_count;
+ /// Index of the first trace item in this range.
+ uint64_t first_item_index;
+
+ /// \return
+ /// \b true if and only if the given \p item_index is covered by this
+ /// range.
+ bool InRange(uint64_t item_index) const;
+ };
+
+ /// A structure that represents a maximal range of trace items associated to
+ /// the same non-interpolated timestamps in nanoseconds.
+ struct NanosecondsRange {
+ /// The nanoseconds value for this range.
+ uint64_t nanos;
+ /// The corresponding TSC value for this range.
+ TSC tsc;
+ /// A nullable pointer to the next range.
+ NanosecondsRange *next_range;
+ /// Number of trace items in this range.
+ uint64_t items_count;
+ /// Index of the first trace item in this range.
+ uint64_t first_item_index;
+
+ /// Calculate an interpolated timestamp in nanoseconds for the given item
+ /// index. It's guaranteed that two different item indices will produce
+ /// different interpolated values.
+ ///
+ /// \param[in] item_index
+ /// The index of the item whose timestamp will be estimated. It has to be
+ /// part of this range.
+ ///
+ /// \param[in] beginning_of_time_nanos
+ /// The timestamp at which tracing started.
+ ///
+ /// \param[in] tsc_conversion
+ /// The tsc -> nanos conversion utility
+ ///
+ /// \return
+ /// An interpolated timestamp value for the given trace item.
+ double
+ GetInterpolatedTime(uint64_t item_index, uint64_t beginning_of_time_nanos,
+ const LinuxPerfZeroTscConversion &tsc_conversion) const;
+
+ /// \return
+ /// \b true if and only if the given \p item_index is covered by this
+ /// range.
+ bool InRange(uint64_t item_index) const;
+ };
+
// Struct holding counts for events;
struct EventsStats {
/// A count for each individual event kind. We use an unordered map instead
@@ -130,39 +147,21 @@ public:
void RecordEvent(lldb::TraceEvent event);
};
- DecodedThread(lldb::ThreadSP thread_sp);
-
- /// Utility constructor that initializes the trace with a provided error.
- DecodedThread(lldb::ThreadSP thread_sp, llvm::Error &&err);
+ DecodedThread(
+ lldb::ThreadSP thread_sp,
+ const llvm::Optional<LinuxPerfZeroTscConversion> &tsc_conversion);
/// Get the total number of instruction, errors and events from the decoded
/// trace.
- int64_t GetItemsCount() const;
-
- /// Construct the TSC range that covers the given instruction index.
- /// This operation is O(logn) and should be used sparingly.
- /// If the trace was collected with TSC support, all the instructions of
- /// the trace will have associated TSCs. This means that this method will
- /// only return \b llvm::None if there are no TSCs whatsoever in the trace.
- ///
- /// \param[in] insn_index
- /// The instruction index in question.
- ///
- /// \param[in] hint_range
- /// An optional range that might include the given index or might be a
- /// neighbor of it. It might help speed it traversals of the trace with
- /// short jumps.
- llvm::Optional<TscRange> CalculateTscRange(
- size_t insn_index,
- const llvm::Optional<DecodedThread::TscRange> &hint_range) const;
+ uint64_t GetItemsCount() const;
/// \return
/// The error associated with a given trace item.
- const char *GetErrorByIndex(size_t item_index) const;
+ const char *GetErrorByIndex(uint64_t item_index) const;
/// \return
/// The trace item kind given an item index.
- lldb::TraceItemKind GetItemKindByIndex(size_t item_index) const;
+ lldb::TraceItemKind GetItemKindByIndex(uint64_t item_index) const;
/// \return
/// The underlying event type for the given trace item index.
@@ -177,12 +176,31 @@ public:
/// The requested cpu id, or \a llvm::None if not available.
llvm::Optional<lldb::cpu_id_t> GetCPUByIndex(uint64_t item_index) const;
+ /// Get a maximal range of trace items that include the given \p item_index
+ /// that have the same TSC value.
+ ///
+ /// \param[in] item_index
+ /// The trace item index to compare with.
+ ///
+ /// \return
+ /// The requested TSC range, or \a llvm::None if not available.
+ llvm::Optional<DecodedThread::TSCRange>
+ GetTSCRangeByIndex(uint64_t item_index) const;
+
+ /// Get a maximal range of trace items that include the given \p item_index
+ /// that have the same nanoseconds timestamp without interpolation.
+ ///
+ /// \param[in] item_index
+ /// The trace item index to compare with.
+ ///
+ /// \return
+ /// The requested nanoseconds range, or \a llvm::None if not available.
+ llvm::Optional<DecodedThread::NanosecondsRange>
+ GetNanosecondsRangeByIndex(uint64_t item_index);
+
/// \return
/// The load address of the instruction at the given index.
- lldb::addr_t GetInstructionLoadAddress(size_t item_index) const;
-
- /// Get a new cursor for the decoded thread.
- lldb::TraceCursorUP CreateNewCursor();
+ lldb::addr_t GetInstructionLoadAddress(uint64_t item_index) const;
/// Return an object with statistics of the TSC decoding errors that happened.
/// A TSC error is not a fatal error and doesn't create gaps in the trace.
@@ -214,7 +232,7 @@ public:
/// Notify this object that a new tsc has been seen.
/// If this a new TSC, an event will be created.
- void NotifyTsc(uint64_t tsc);
+ void NotifyTsc(TSC tsc);
/// Notify this object that a CPU has been seen.
/// If this a new CPU, an event will be created.
@@ -262,15 +280,22 @@ private:
/// it in TraceItemStorage to avoid padding.
std::vector<uint8_t> m_item_kinds;
- /// This map contains the TSCs of the decoded instructions. It maps
- /// `instruction index -> TSC`, where `instruction index` is the first index
- /// at which the mapped TSC appears. We use this representation because TSCs
- /// are sporadic and we can think of them as ranges. If TSCs are present in
- /// the trace, all instructions will have an associated TSC, including the
- /// first one. Otherwise, this map will be empty.
- std::map<uint64_t, uint64_t> m_timestamps;
+ /// This map contains the TSCs of the decoded trace items. It maps
+ /// `item index -> TSC`, where `item index` is the first index
+ /// at which the mapped TSC first appears. We use this representation because
+ /// TSCs are sporadic and we can think of them as ranges.
+ std::map<uint64_t, TSCRange> m_tscs;
/// This is the chronologically last TSC that has been added.
- llvm::Optional<uint64_t> m_last_tsc = llvm::None;
+ llvm::Optional<std::map<uint64_t, TSCRange>::iterator> m_last_tsc =
+ llvm::None;
+ /// This map contains the non-interpolated nanoseconds timestamps of the
+ /// decoded trace items. It maps `item index -> nanoseconds`, where `item
+ /// index` is the first index at which the mapped nanoseconds first appears.
+ /// We use this representation because timestamps are sporadic and we think of
+ /// them as ranges.
+ std::map<uint64_t, NanosecondsRange> m_nanoseconds;
+ llvm::Optional<std::map<uint64_t, NanosecondsRange>::iterator>
+ m_last_nanoseconds = llvm::None;
// The cpu information is stored as a map. It maps `instruction index -> CPU`
// A CPU is associated with the next instructions that follow until the next
@@ -279,6 +304,9 @@ private:
/// This is the chronologically last CPU ID.
llvm::Optional<uint64_t> m_last_cpu = llvm::None;
+ /// TSC -> nanos conversion utility.
+ llvm::Optional<LinuxPerfZeroTscConversion> m_tsc_conversion;
+
/// Statistics of all tracing events.
EventsStats m_events_stats;
/// Statistics of libipt errors when decoding TSCs.
diff --git a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/LibiptDecoder.cpp b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/LibiptDecoder.cpp
index a98337a4e058..234b9f917d32 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/LibiptDecoder.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/LibiptDecoder.cpp
@@ -285,9 +285,23 @@ Error lldb_private::trace_intel_pt::DecodeSystemWideTraceForThread(
for (size_t i = 0; i < executions.size(); i++) {
const IntelPTThreadContinousExecution &execution = executions[i];
- decoded_thread.NotifyCPU(execution.thread_execution.cpu_id);
auto variant = execution.thread_execution.variant;
+ // We report the TSCs we are sure of
+ switch (variant) {
+ case ThreadContinuousExecution::Variant::Complete:
+ decoded_thread.NotifyTsc(execution.thread_execution.tscs.complete.start);
+ break;
+ case ThreadContinuousExecution::Variant::OnlyStart:
+ decoded_thread.NotifyTsc(
+ execution.thread_execution.tscs.only_start.start);
+ break;
+ default:
+ break;
+ }
+
+ decoded_thread.NotifyCPU(execution.thread_execution.cpu_id);
+
// If we haven't seen a PSB yet, then it's fine not to show errors
if (has_seen_psbs) {
if (execution.intelpt_subtraces.empty()) {
@@ -299,12 +313,12 @@ Error lldb_private::trace_intel_pt::DecodeSystemWideTraceForThread(
}
// If the first execution is incomplete because it doesn't have a previous
- // context switch in its cpu, all good.
+ // context switch in its cpu, all good, otherwise we report the error.
if (variant == ThreadContinuousExecution::Variant::OnlyEnd ||
variant == ThreadContinuousExecution::Variant::HintedStart) {
decoded_thread.AppendCustomError(
- formatv("Thread execution starting on cpu id = {0} doesn't "
- "have a matching context switch in.",
+ formatv("Unable to find the context switch in for the thread "
+ "execution starting on cpu id = {0}",
execution.thread_execution.cpu_id)
.str());
}
@@ -318,6 +332,18 @@ Error lldb_private::trace_intel_pt::DecodeSystemWideTraceForThread(
decoder.DecodePSB(intel_pt_execution.psb_offset);
}
+ // We report the TSCs we are sure of
+ switch (variant) {
+ case ThreadContinuousExecution::Variant::Complete:
+ decoded_thread.NotifyTsc(execution.thread_execution.tscs.complete.end);
+ break;
+ case ThreadContinuousExecution::Variant::OnlyEnd:
+ decoded_thread.NotifyTsc(execution.thread_execution.tscs.only_end.end);
+ break;
+ default:
+ break;
+ }
+
// If we haven't seen a PSB yet, then it's fine not to show errors
if (has_seen_psbs) {
// If the last execution is incomplete because it doesn't have a following
@@ -326,8 +352,8 @@ Error lldb_private::trace_intel_pt::DecodeSystemWideTraceForThread(
i + 1 != executions.size()) ||
variant == ThreadContinuousExecution::Variant::HintedEnd) {
decoded_thread.AppendCustomError(
- formatv("Thread execution on cpu id = {0} doesn't have a "
- "matching context switch out",
+ formatv("Unable to find the context switch out for the thread "
+ "execution on cpu id = {0}",
execution.thread_execution.cpu_id)
.str());
}
@@ -380,3 +406,22 @@ lldb_private::trace_intel_pt::SplitTraceInContinuousExecutions(
}
return executions;
}
+
+Expected<Optional<uint64_t>>
+lldb_private::trace_intel_pt::FindLowestTSCInTrace(TraceIntelPT &trace_intel_pt,
+ ArrayRef<uint8_t> buffer) {
+ Expected<PtInsnDecoderUP> decoder_up =
+ CreateInstructionDecoder(trace_intel_pt, buffer);
+ if (!decoder_up)
+ return decoder_up.takeError();
+
+ pt_insn_decoder *decoder = decoder_up.get().get();
+ int status = pte_ok;
+ if (IsLibiptError(status = pt_insn_sync_forward(decoder)))
+ return None;
+
+ uint64_t tsc;
+ if (IsLibiptError(pt_insn_time(decoder, &tsc, nullptr, nullptr)))
+ return None;
+ return tsc;
+}
diff --git a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/LibiptDecoder.h b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/LibiptDecoder.h
index cad4d39fcf24..66f3798cd600 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/LibiptDecoder.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/LibiptDecoder.h
@@ -94,6 +94,16 @@ llvm::Expected<std::vector<IntelPTThreadSubtrace>>
SplitTraceInContinuousExecutions(TraceIntelPT &trace_intel_pt,
llvm::ArrayRef<uint8_t> buffer);
+/// Find the lowest TSC in the given trace.
+///
+/// \return
+/// The lowest TSC value in this trace if available, \a llvm::None if the
+/// trace is empty or the trace contains no timing information, or an \a
+/// llvm::Error if it was not possible to set up the decoder.
+llvm::Expected<llvm::Optional<uint64_t>>
+FindLowestTSCInTrace(TraceIntelPT &trace_intel_pt,
+ llvm::ArrayRef<uint8_t> buffer);
+
} // namespace trace_intel_pt
} // namespace lldb_private
diff --git a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/ThreadDecoder.cpp b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/ThreadDecoder.cpp
index d3ac61f7e658..920992d9d636 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/ThreadDecoder.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/ThreadDecoder.cpp
@@ -23,6 +23,21 @@ using namespace llvm;
ThreadDecoder::ThreadDecoder(const ThreadSP &thread_sp, TraceIntelPT &trace)
: m_thread_sp(thread_sp), m_trace(trace) {}
+Expected<Optional<uint64_t>> ThreadDecoder::FindLowestTSC() {
+ Optional<uint64_t> lowest_tsc;
+ Error err = m_trace.OnThreadBufferRead(
+ m_thread_sp->GetID(), [&](llvm::ArrayRef<uint8_t> data) -> llvm::Error {
+ Expected<Optional<uint64_t>> tsc = FindLowestTSCInTrace(m_trace, data);
+ if (!tsc)
+ return tsc.takeError();
+ lowest_tsc = *tsc;
+ return Error::success();
+ });
+ if (err)
+ return std::move(err);
+ return lowest_tsc;
+}
+
Expected<DecodedThreadSP> ThreadDecoder::Decode() {
if (!m_decoded_thread.hasValue()) {
if (Expected<DecodedThreadSP> decoded_thread = DoDecode()) {
@@ -38,8 +53,8 @@ llvm::Expected<DecodedThreadSP> ThreadDecoder::DoDecode() {
return m_trace.GetThreadTimer(m_thread_sp->GetID())
.TimeTask(
"Decoding instructions", [&]() -> Expected<DecodedThreadSP> {
- DecodedThreadSP decoded_thread_sp =
- std::make_shared<DecodedThread>(m_thread_sp);
+ DecodedThreadSP decoded_thread_sp = std::make_shared<DecodedThread>(
+ m_thread_sp, m_trace.GetPerfZeroTscConversion());
Error err = m_trace.OnThreadBufferRead(
m_thread_sp->GetID(), [&](llvm::ArrayRef<uint8_t> data) {
diff --git a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/ThreadDecoder.h b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/ThreadDecoder.h
index 5c77ad93d27a..d580bc4dd335 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/ThreadDecoder.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/ThreadDecoder.h
@@ -36,6 +36,12 @@ public:
/// A \a DecodedThread instance.
llvm::Expected<DecodedThreadSP> Decode();
+ /// \return
+ /// The lowest TSC value in this trace if available, \a llvm::None if the
+ /// trace is empty or the trace contains no timing information, or an \a
+ /// llvm::Error if it was not possible to set up the decoder.
+ llvm::Expected<llvm::Optional<uint64_t>> FindLowestTSC();
+
ThreadDecoder(const ThreadDecoder &other) = delete;
ThreadDecoder &operator=(const ThreadDecoder &other) = delete;
diff --git a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceCursorIntelPT.cpp b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceCursorIntelPT.cpp
index 185c02b6bcd9..a4d86fb48ebe 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceCursorIntelPT.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceCursorIntelPT.cpp
@@ -17,28 +17,55 @@ using namespace lldb_private;
using namespace lldb_private::trace_intel_pt;
using namespace llvm;
-TraceCursorIntelPT::TraceCursorIntelPT(ThreadSP thread_sp,
- DecodedThreadSP decoded_thread_sp)
- : TraceCursor(thread_sp), m_decoded_thread_sp(decoded_thread_sp) {
+TraceCursorIntelPT::TraceCursorIntelPT(
+ ThreadSP thread_sp, DecodedThreadSP decoded_thread_sp,
+ const Optional<LinuxPerfZeroTscConversion> &tsc_conversion,
+ Optional<uint64_t> beginning_of_time_nanos)
+ : TraceCursor(thread_sp), m_decoded_thread_sp(decoded_thread_sp),
+ m_tsc_conversion(tsc_conversion),
+ m_beginning_of_time_nanos(beginning_of_time_nanos) {
Seek(0, SeekType::End);
}
-void TraceCursorIntelPT::CalculateTscRange() {
- // If we failed, then we look for the exact range
- if (!m_tsc_range || !m_tsc_range->InRange(m_pos))
- m_tsc_range = m_decoded_thread_sp->CalculateTscRange(
- m_pos, /*hit_range=*/m_tsc_range);
-}
-
void TraceCursorIntelPT::Next() {
m_pos += IsForwards() ? 1 : -1;
+ ClearTimingRangesIfInvalid();
+}
+
+void TraceCursorIntelPT::ClearTimingRangesIfInvalid() {
+ if (m_tsc_range_calculated) {
+ if (!m_tsc_range || m_pos < 0 || !m_tsc_range->InRange(m_pos)) {
+ m_tsc_range = None;
+ m_tsc_range_calculated = false;
+ }
+ }
+
+ if (m_nanoseconds_range_calculated) {
+ if (!m_nanoseconds_range || m_pos < 0 ||
+ !m_nanoseconds_range->InRange(m_pos)) {
+ m_nanoseconds_range = None;
+ m_nanoseconds_range_calculated = false;
+ }
+ }
+}
- // We try to go to a neighbor tsc range that might contain the current pos
- if (m_tsc_range && !m_tsc_range->InRange(m_pos))
- m_tsc_range = IsForwards() ? m_tsc_range->Next() : m_tsc_range->Prev();
+const Optional<DecodedThread::TSCRange> &
+TraceCursorIntelPT::GetTSCRange() const {
+ if (!m_tsc_range_calculated) {
+ m_tsc_range_calculated = true;
+ m_tsc_range = m_decoded_thread_sp->GetTSCRangeByIndex(m_pos);
+ }
+ return m_tsc_range;
+}
- // If we failed, this call will fix it
- CalculateTscRange();
+const Optional<DecodedThread::NanosecondsRange> &
+TraceCursorIntelPT::GetNanosecondsRange() const {
+ if (!m_nanoseconds_range_calculated) {
+ m_nanoseconds_range_calculated = true;
+ m_nanoseconds_range =
+ m_decoded_thread_sp->GetNanosecondsRangeByIndex(m_pos);
+ }
+ return m_nanoseconds_range;
}
bool TraceCursorIntelPT::Seek(int64_t offset, SeekType origin) {
@@ -52,13 +79,15 @@ bool TraceCursorIntelPT::Seek(int64_t offset, SeekType origin) {
case TraceCursor::SeekType::Current:
m_pos += offset;
}
- CalculateTscRange();
+
+ ClearTimingRangesIfInvalid();
return HasValue();
}
bool TraceCursorIntelPT::HasValue() const {
- return m_pos >= 0 && m_pos < m_decoded_thread_sp->GetItemsCount();
+ return m_pos >= 0 &&
+ static_cast<uint64_t>(m_pos) < m_decoded_thread_sp->GetItemsCount();
}
lldb::TraceItemKind TraceCursorIntelPT::GetItemKind() const {
@@ -73,15 +102,18 @@ lldb::addr_t TraceCursorIntelPT::GetLoadAddress() const {
return m_decoded_thread_sp->GetInstructionLoadAddress(m_pos);
}
-Optional<uint64_t>
-TraceCursorIntelPT::GetCounter(lldb::TraceCounter counter_type) const {
- switch (counter_type) {
- case lldb::eTraceCounterTSC:
- if (m_tsc_range)
- return m_tsc_range->GetTsc();
- else
- return llvm::None;
- }
+Optional<uint64_t> TraceCursorIntelPT::GetHWClock() const {
+ if (const Optional<DecodedThread::TSCRange> &range = GetTSCRange())
+ return range->tsc;
+ return None;
+}
+
+Optional<double> TraceCursorIntelPT::GetWallClockTime() const {
+ if (const Optional<DecodedThread::NanosecondsRange> &range =
+ GetNanosecondsRange())
+ return range->GetInterpolatedTime(m_pos, *m_beginning_of_time_nanos,
+ *m_tsc_conversion);
+ return None;
}
Optional<lldb::cpu_id_t> TraceCursorIntelPT::GetCPU() const {
@@ -96,13 +128,12 @@ bool TraceCursorIntelPT::GoToId(user_id_t id) {
if (!HasId(id))
return false;
m_pos = id;
- m_tsc_range = m_decoded_thread_sp->CalculateTscRange(m_pos, m_tsc_range);
-
+ ClearTimingRangesIfInvalid();
return true;
}
bool TraceCursorIntelPT::HasId(lldb::user_id_t id) const {
- return static_cast<int64_t>(id) < m_decoded_thread_sp->GetItemsCount();
+ return id < m_decoded_thread_sp->GetItemsCount();
}
user_id_t TraceCursorIntelPT::GetId() const { return m_pos; }
diff --git a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceCursorIntelPT.h b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceCursorIntelPT.h
index 2e0f67e67dfc..3cd9ab831f5e 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceCursorIntelPT.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceCursorIntelPT.h
@@ -16,8 +16,10 @@ namespace trace_intel_pt {
class TraceCursorIntelPT : public TraceCursor {
public:
- TraceCursorIntelPT(lldb::ThreadSP thread_sp,
- DecodedThreadSP decoded_thread_sp);
+ TraceCursorIntelPT(
+ lldb::ThreadSP thread_sp, DecodedThreadSP decoded_thread_sp,
+ const llvm::Optional<LinuxPerfZeroTscConversion> &tsc_conversion,
+ llvm::Optional<uint64_t> beginning_of_time_nanos);
bool Seek(int64_t offset, SeekType origin) override;
@@ -29,13 +31,12 @@ public:
lldb::addr_t GetLoadAddress() const override;
- llvm::Optional<uint64_t>
- GetCounter(lldb::TraceCounter counter_type) const override;
-
lldb::TraceEvent GetEventType() const override;
llvm::Optional<lldb::cpu_id_t> GetCPU() const override;
+ llvm::Optional<uint64_t> GetHWClock() const override;
+
lldb::TraceItemKind GetItemKind() const override;
bool GoToId(lldb::user_id_t id) override;
@@ -44,16 +45,43 @@ public:
bool HasId(lldb::user_id_t id) const override;
+ llvm::Optional<double> GetWallClockTime() const override;
+
private:
- /// Calculate the tsc range for the current position if needed.
- void CalculateTscRange();
+ /// Clear the current TSC and nanoseconds ranges if after moving they are not
+ /// valid anymore.
+ void ClearTimingRangesIfInvalid();
+
+ /// Get or calculate the TSC range that includes the current trace item.
+ const llvm::Optional<DecodedThread::TSCRange> &GetTSCRange() const;
+
+ /// Get or calculate the TSC range that includes the current trace item.
+ const llvm::Optional<DecodedThread::NanosecondsRange> &
+ GetNanosecondsRange() const;
/// Storage of the actual instructions
DecodedThreadSP m_decoded_thread_sp;
/// Internal instruction index currently pointing at.
int64_t m_pos;
- /// Tsc range covering the current instruction.
- llvm::Optional<DecodedThread::TscRange> m_tsc_range;
+
+ /// Timing information and cached values.
+ /// \{
+
+ /// TSC -> nanos conversion utility. \a None if not available at all.
+ llvm::Optional<LinuxPerfZeroTscConversion> m_tsc_conversion;
+ /// Lowest nanoseconds timestamp seen in any thread trace, \a None if not
+ /// available at all.
+ llvm::Optional<uint64_t> m_beginning_of_time_nanos;
+ /// Range of trace items with the same TSC that includes the current trace
+ /// item, \a None if not calculated or not available.
+ llvm::Optional<DecodedThread::TSCRange> mutable m_tsc_range;
+ bool mutable m_tsc_range_calculated = false;
+ /// Range of trace items with the same non-interpolated timestamps in
+ /// nanoseconds that includes the current trace item, \a None if not
+ /// calculated or not available.
+ llvm::Optional<DecodedThread::NanosecondsRange> mutable m_nanoseconds_range;
+ bool mutable m_nanoseconds_range_calculated = false;
+ /// \}
};
} // namespace trace_intel_pt
diff --git a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPT.cpp b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPT.cpp
index 57433ffb14cb..f3f0a513e3fa 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPT.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPT.cpp
@@ -8,6 +8,8 @@
#include "TraceIntelPT.h"
+#include "TraceCursorIntelPT.h"
+
#include "../common/ThreadPostMortemTrace.h"
#include "CommandObjectTraceStartIntelPT.h"
#include "DecodedThread.h"
@@ -138,11 +140,53 @@ Expected<DecodedThreadSP> TraceIntelPT::Decode(Thread &thread) {
return it->second->Decode();
}
+Expected<Optional<uint64_t>> TraceIntelPT::FindBeginningOfTimeNanos() {
+ Storage &storage = GetUpdatedStorage();
+ if (storage.beginning_of_time_nanos_calculated)
+ return storage.beginning_of_time_nanos;
+ storage.beginning_of_time_nanos_calculated = true;
+
+ if (!storage.tsc_conversion)
+ return None;
+
+ Optional<uint64_t> lowest_tsc;
+
+ if (storage.multicpu_decoder) {
+ if (Expected<Optional<uint64_t>> tsc =
+ storage.multicpu_decoder->FindLowestTSC()) {
+ lowest_tsc = *tsc;
+ } else {
+ return tsc.takeError();
+ }
+ }
+
+ for (auto &decoder : storage.thread_decoders) {
+ Expected<Optional<uint64_t>> tsc = decoder.second->FindLowestTSC();
+ if (!tsc)
+ return tsc.takeError();
+
+ if (*tsc && (!lowest_tsc || *lowest_tsc > **tsc))
+ lowest_tsc = **tsc;
+ }
+
+ if (lowest_tsc) {
+ storage.beginning_of_time_nanos =
+ storage.tsc_conversion->ToNanos(*lowest_tsc);
+ }
+ return storage.beginning_of_time_nanos;
+}
+
llvm::Expected<lldb::TraceCursorUP>
TraceIntelPT::CreateNewCursor(Thread &thread) {
- if (Expected<DecodedThreadSP> decoded_thread = Decode(thread))
- return decoded_thread.get()->CreateNewCursor();
- else
+ if (Expected<DecodedThreadSP> decoded_thread = Decode(thread)) {
+ if (Expected<Optional<uint64_t>> beginning_of_time =
+ FindBeginningOfTimeNanos())
+ return std::make_unique<TraceCursorIntelPT>(
+ thread.shared_from_this(), *decoded_thread, m_storage.tsc_conversion,
+ *beginning_of_time);
+ else
+ return beginning_of_time.takeError();
+ } else
return decoded_thread.takeError();
}
diff --git a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPT.h b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPT.h
index d3e58374867d..7f2c3f8dda5d 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPT.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPT.h
@@ -220,6 +220,13 @@ private:
/// returned if the decoder couldn't be properly set up.
llvm::Expected<DecodedThreadSP> Decode(Thread &thread);
+ /// \return
+ /// The lowest timestamp in nanoseconds in all traces if available, \a
+ /// llvm::None if all the traces were empty or no trace contained no
+ /// timing information, or an \a llvm::Error if it was not possible to set
+ /// up the decoder for some trace.
+ llvm::Expected<llvm::Optional<uint64_t>> FindBeginningOfTimeNanos();
+
// Dump out trace info in JSON format
void DumpTraceInfoAsJson(Thread &thread, Stream &s, bool verbose);
@@ -236,6 +243,8 @@ private:
/// It is provided by either a trace bundle or a live process to convert TSC
/// counters to and from nanos. It might not be available on all hosts.
llvm::Optional<LinuxPerfZeroTscConversion> tsc_conversion;
+ llvm::Optional<uint64_t> beginning_of_time_nanos;
+ bool beginning_of_time_nanos_calculated = false;
} m_storage;
/// It is provided by either a trace bundle or a live process' "cpuInfo"
diff --git a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTMultiCpuDecoder.cpp b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTMultiCpuDecoder.cpp
index e547032f739d..08f54b582e3f 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTMultiCpuDecoder.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTMultiCpuDecoder.cpp
@@ -35,6 +35,28 @@ bool TraceIntelPTMultiCpuDecoder::TracesThread(lldb::tid_t tid) const {
return m_tids.count(tid);
}
+Expected<Optional<uint64_t>> TraceIntelPTMultiCpuDecoder::FindLowestTSC() {
+ Optional<uint64_t> lowest_tsc;
+ TraceIntelPTSP trace_sp = GetTrace();
+
+ Error err = GetTrace()->OnAllCpusBinaryDataRead(
+ IntelPTDataKinds::kIptTrace,
+ [&](const DenseMap<cpu_id_t, ArrayRef<uint8_t>> &buffers) -> Error {
+ for (auto &cpu_id_to_buffer : buffers) {
+ Expected<Optional<uint64_t>> tsc =
+ FindLowestTSCInTrace(*trace_sp, cpu_id_to_buffer.second);
+ if (!tsc)
+ return tsc.takeError();
+ if (*tsc && (!lowest_tsc || *lowest_tsc > **tsc))
+ lowest_tsc = **tsc;
+ }
+ return Error::success();
+ });
+ if (err)
+ return std::move(err);
+ return lowest_tsc;
+}
+
Expected<DecodedThreadSP> TraceIntelPTMultiCpuDecoder::Decode(Thread &thread) {
if (Error err = CorrelateContextSwitchesAndIntelPtTraces())
return std::move(err);
@@ -48,8 +70,8 @@ Expected<DecodedThreadSP> TraceIntelPTMultiCpuDecoder::Decode(Thread &thread) {
if (it != m_decoded_threads.end())
return it->second;
- DecodedThreadSP decoded_thread_sp =
- std::make_shared<DecodedThread>(thread.shared_from_this());
+ DecodedThreadSP decoded_thread_sp = std::make_shared<DecodedThread>(
+ thread.shared_from_this(), trace_sp->GetPerfZeroTscConversion());
Error err = trace_sp->OnAllCpusBinaryDataRead(
IntelPTDataKinds::kIptTrace,
diff --git a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTMultiCpuDecoder.h b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTMultiCpuDecoder.h
index 3b7926760f3c..87c370e88ae6 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTMultiCpuDecoder.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTMultiCpuDecoder.h
@@ -66,6 +66,12 @@ public:
/// The total number of PSB blocks in all cores.
size_t GetTotalPSBBlocksCount() const;
+ /// \return
+ /// The lowest TSC value in this trace if available, \a llvm::None if the
+ /// trace is empty or the trace contains no timing information, or an \a
+ /// llvm::Error if it was not possible to set up the decoder.
+ llvm::Expected<llvm::Optional<uint64_t>> FindLowestTSC();
+
private:
/// Traverse the context switch traces and the basic intel pt continuous
/// subtraces and produce a list of continuous executions for each process and
diff --git a/contrib/llvm-project/lldb/source/Plugins/UnwindAssembly/x86/x86AssemblyInspectionEngine.cpp b/contrib/llvm-project/lldb/source/Plugins/UnwindAssembly/x86/x86AssemblyInspectionEngine.cpp
index 92eec139e07c..c796cbc75c1b 100644
--- a/contrib/llvm-project/lldb/source/Plugins/UnwindAssembly/x86/x86AssemblyInspectionEngine.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/UnwindAssembly/x86/x86AssemblyInspectionEngine.cpp
@@ -968,7 +968,7 @@ bool x86AssemblyInspectionEngine::GetNonCallSiteUnwindPlanFromAssembly(
UnwindPlan::RowSP prologue_completed_row; // copy of prologue row of CFI
int prologue_completed_sp_bytes_offset_from_cfa; // The sp value before the
// epilogue started executed
- bool prologue_completed_is_aligned;
+ bool prologue_completed_is_aligned = false;
std::vector<bool> prologue_completed_saved_registers;
while (current_func_text_offset < size) {
diff --git a/contrib/llvm-project/lldb/source/Symbol/Type.cpp b/contrib/llvm-project/lldb/source/Symbol/Type.cpp
index 4ee5a3e76ae4..f83bdcdc1c74 100644
--- a/contrib/llvm-project/lldb/source/Symbol/Type.cpp
+++ b/contrib/llvm-project/lldb/source/Symbol/Type.cpp
@@ -162,8 +162,8 @@ Type::Type(lldb::user_id_t uid, SymbolFile *symbol_file, ConstString name,
}
Type::Type()
- : std::enable_shared_from_this<Type>(), UserID(0),
- m_name("<INVALID TYPE>") {
+ : std::enable_shared_from_this<Type>(), UserID(0), m_name("<INVALID TYPE>"),
+ m_payload(0) {
m_byte_size = 0;
m_byte_size_has_value = false;
}
diff --git a/contrib/llvm-project/lldb/source/Target/Process.cpp b/contrib/llvm-project/lldb/source/Target/Process.cpp
index 046706637691..b2f1318ca91d 100644
--- a/contrib/llvm-project/lldb/source/Target/Process.cpp
+++ b/contrib/llvm-project/lldb/source/Target/Process.cpp
@@ -433,7 +433,8 @@ Process::Process(lldb::TargetSP target_sp, ListenerSP listener_sp,
m_profile_data_comm_mutex(), m_profile_data(), m_iohandler_sync(0),
m_memory_cache(*this), m_allocated_memory_cache(*this),
m_should_detach(false), m_next_event_action_up(), m_public_run_lock(),
- m_private_run_lock(), m_finalizing(false),
+ m_private_run_lock(), m_currently_handling_do_on_removals(false),
+ m_resume_requested(false), m_finalizing(false),
m_clear_thread_plans_on_stop(false), m_force_next_event_delivery(false),
m_last_broadcast_state(eStateInvalid), m_destroy_in_process(false),
m_can_interpret_function_calls(false), m_run_thread_plan_lock(),
@@ -2566,8 +2567,8 @@ Status Process::LaunchPrivate(ProcessLaunchInfo &launch_info, StateType &state,
if (state == eStateStopped || state == eStateCrashed) {
DidLaunch();
-
- // Now that we know the process type, update its signal responses from the
+
+ // Now that we know the process type, update its signal responses from the
// ones stored in the Target:
if (m_unix_signals_sp) {
StreamSP warning_strm = GetTarget().GetDebugger().GetAsyncErrorStream();
@@ -2935,7 +2936,7 @@ void Process::CompleteAttach() {
}
}
}
- // Now that we know the process type, update its signal responses from the
+ // Now that we know the process type, update its signal responses from the
// ones stored in the Target:
if (m_unix_signals_sp) {
StreamSP warning_strm = GetTarget().GetDebugger().GetAsyncErrorStream();
@@ -4550,9 +4551,9 @@ public:
private:
lldb::ThreadPlanSP m_thread_plan_sp;
bool m_already_reset = false;
- bool m_private;
- bool m_is_controlling;
- bool m_okay_to_discard;
+ bool m_private = false;
+ bool m_is_controlling = false;
+ bool m_okay_to_discard = false;
};
} // anonymous namespace
diff --git a/contrib/llvm-project/lldb/source/Target/RegisterContextUnwind.cpp b/contrib/llvm-project/lldb/source/Target/RegisterContextUnwind.cpp
index a0f97d7e7cff..2da40ba2bf61 100644
--- a/contrib/llvm-project/lldb/source/Target/RegisterContextUnwind.cpp
+++ b/contrib/llvm-project/lldb/source/Target/RegisterContextUnwind.cpp
@@ -1525,7 +1525,7 @@ RegisterContextUnwind::SavedLocationForRegister(
// unwindplan_regloc has valid contents about where to retrieve the register
if (unwindplan_regloc.IsUnspecified()) {
- lldb_private::UnwindLLDB::RegisterLocation new_regloc;
+ lldb_private::UnwindLLDB::RegisterLocation new_regloc = {};
new_regloc.type = UnwindLLDB::RegisterLocation::eRegisterNotSaved;
m_registers[regnum.GetAsKind(eRegisterKindLLDB)] = new_regloc;
UnwindLogMsg("save location for %s (%d) is unspecified, continue searching",
@@ -1731,7 +1731,7 @@ bool RegisterContextUnwind::TryFallbackUnwindPlan() {
addr_t old_caller_pc_value = LLDB_INVALID_ADDRESS;
addr_t new_caller_pc_value = LLDB_INVALID_ADDRESS;
- UnwindLLDB::RegisterLocation regloc;
+ UnwindLLDB::RegisterLocation regloc = {};
if (SavedLocationForRegister(pc_regnum.GetAsKind(eRegisterKindLLDB),
regloc) ==
UnwindLLDB::RegisterSearchResult::eRegisterFound) {
diff --git a/contrib/llvm-project/lldb/source/Target/StackFrame.cpp b/contrib/llvm-project/lldb/source/Target/StackFrame.cpp
index e87cf5af3e39..4fb5ba0b735e 100644
--- a/contrib/llvm-project/lldb/source/Target/StackFrame.cpp
+++ b/contrib/llvm-project/lldb/source/Target/StackFrame.cpp
@@ -1145,26 +1145,34 @@ bool StackFrame::HasDebugInformation() {
ValueObjectSP
StackFrame::GetValueObjectForFrameVariable(const VariableSP &variable_sp,
DynamicValueType use_dynamic) {
- std::lock_guard<std::recursive_mutex> guard(m_mutex);
ValueObjectSP valobj_sp;
- if (IsHistorical()) {
- return valobj_sp;
- }
- VariableList *var_list = GetVariableList(true);
- if (var_list) {
- // Make sure the variable is a frame variable
- const uint32_t var_idx = var_list->FindIndexForVariable(variable_sp.get());
- const uint32_t num_variables = var_list->GetSize();
- if (var_idx < num_variables) {
- valobj_sp = m_variable_list_value_objects.GetValueObjectAtIndex(var_idx);
- if (!valobj_sp) {
- if (m_variable_list_value_objects.GetSize() < num_variables)
- m_variable_list_value_objects.Resize(num_variables);
- valobj_sp = ValueObjectVariable::Create(this, variable_sp);
- m_variable_list_value_objects.SetValueObjectAtIndex(var_idx, valobj_sp);
+ { // Scope for stack frame mutex. We need to drop this mutex before we figure
+ // out the dynamic value. That will require converting the StackID in the
+ // VO back to a StackFrame, which will in turn require locking the
+ // StackFrameList. If we still hold the StackFrame mutex, we could suffer
+ // lock inversion against the pattern of getting the StackFrameList and
+ // then the stack frame, which is fairly common.
+ std::lock_guard<std::recursive_mutex> guard(m_mutex);
+ if (IsHistorical()) {
+ return valobj_sp;
+ }
+ VariableList *var_list = GetVariableList(true);
+ if (var_list) {
+ // Make sure the variable is a frame variable
+ const uint32_t var_idx = var_list->FindIndexForVariable(variable_sp.get());
+ const uint32_t num_variables = var_list->GetSize();
+ if (var_idx < num_variables) {
+ valobj_sp = m_variable_list_value_objects.GetValueObjectAtIndex(var_idx);
+ if (!valobj_sp) {
+ if (m_variable_list_value_objects.GetSize() < num_variables)
+ m_variable_list_value_objects.Resize(num_variables);
+ valobj_sp = ValueObjectVariable::Create(this, variable_sp);
+ m_variable_list_value_objects.SetValueObjectAtIndex(var_idx,
+ valobj_sp);
+ }
}
}
- }
+ } // End of StackFrame mutex scope.
if (use_dynamic != eNoDynamicValues && valobj_sp) {
ValueObjectSP dynamic_sp = valobj_sp->GetDynamicValue(use_dynamic);
if (dynamic_sp)
diff --git a/contrib/llvm-project/lldb/source/Target/ThreadPlanCallFunction.cpp b/contrib/llvm-project/lldb/source/Target/ThreadPlanCallFunction.cpp
index a9f774aa6109..7e9bb963bb5d 100644
--- a/contrib/llvm-project/lldb/source/Target/ThreadPlanCallFunction.cpp
+++ b/contrib/llvm-project/lldb/source/Target/ThreadPlanCallFunction.cpp
@@ -104,7 +104,10 @@ ThreadPlanCallFunction::ThreadPlanCallFunction(
m_ignore_breakpoints(options.DoesIgnoreBreakpoints()),
m_debug_execution(options.GetDebug()),
m_trap_exceptions(options.GetTrapExceptions()), m_function_addr(function),
- m_function_sp(0), m_takedown_done(false),
+ m_start_addr(), m_function_sp(0), m_subplan_sp(),
+ m_cxx_language_runtime(nullptr), m_objc_language_runtime(nullptr),
+ m_stored_thread_state(), m_real_stop_info_sp(), m_constructor_errors(),
+ m_return_valobj_sp(), m_takedown_done(false),
m_should_clear_objc_exception_bp(false),
m_should_clear_cxx_exception_bp(false),
m_stop_address(LLDB_INVALID_ADDRESS), m_return_type(return_type) {
@@ -134,7 +137,10 @@ ThreadPlanCallFunction::ThreadPlanCallFunction(
m_ignore_breakpoints(options.DoesIgnoreBreakpoints()),
m_debug_execution(options.GetDebug()),
m_trap_exceptions(options.GetTrapExceptions()), m_function_addr(function),
- m_function_sp(0), m_takedown_done(false),
+ m_start_addr(), m_function_sp(0), m_subplan_sp(),
+ m_cxx_language_runtime(nullptr), m_objc_language_runtime(nullptr),
+ m_stored_thread_state(), m_real_stop_info_sp(), m_constructor_errors(),
+ m_return_valobj_sp(), m_takedown_done(false),
m_should_clear_objc_exception_bp(false),
m_should_clear_cxx_exception_bp(false),
m_stop_address(LLDB_INVALID_ADDRESS), m_return_type(CompilerType()) {}
diff --git a/contrib/llvm-project/lldb/source/Target/ThreadPlanTracer.cpp b/contrib/llvm-project/lldb/source/Target/ThreadPlanTracer.cpp
index f5331428038b..7e0925307644 100644
--- a/contrib/llvm-project/lldb/source/Target/ThreadPlanTracer.cpp
+++ b/contrib/llvm-project/lldb/source/Target/ThreadPlanTracer.cpp
@@ -36,11 +36,11 @@ using namespace lldb_private;
ThreadPlanTracer::ThreadPlanTracer(Thread &thread, lldb::StreamSP &stream_sp)
: m_process(*thread.GetProcess().get()), m_tid(thread.GetID()),
- m_enabled(false), m_stream_sp(stream_sp) {}
+ m_enabled(false), m_stream_sp(stream_sp), m_thread(nullptr) {}
ThreadPlanTracer::ThreadPlanTracer(Thread &thread)
: m_process(*thread.GetProcess().get()), m_tid(thread.GetID()),
- m_enabled(false), m_stream_sp() {}
+ m_enabled(false), m_stream_sp(), m_thread(nullptr) {}
Stream *ThreadPlanTracer::GetLogStream() {
if (m_stream_sp)
diff --git a/contrib/llvm-project/lldb/source/Target/TraceCursor.cpp b/contrib/llvm-project/lldb/source/Target/TraceCursor.cpp
index f99b0d28c154..de3f9bf1b33d 100644
--- a/contrib/llvm-project/lldb/source/Target/TraceCursor.cpp
+++ b/contrib/llvm-project/lldb/source/Target/TraceCursor.cpp
@@ -50,6 +50,8 @@ const char *TraceCursor::EventKindToString(lldb::TraceEvent event_kind) {
return "software disabled tracing";
case lldb::eTraceEventCPUChanged:
return "CPU core changed";
+ case lldb::eTraceEventHWClockTick:
+ return "HW clock tick";
}
llvm_unreachable("Fully covered switch above");
}
diff --git a/contrib/llvm-project/lldb/source/Target/TraceDumper.cpp b/contrib/llvm-project/lldb/source/Target/TraceDumper.cpp
index 739105e9e9fb..5b71e9e4e97a 100644
--- a/contrib/llvm-project/lldb/source/Target/TraceDumper.cpp
+++ b/contrib/llvm-project/lldb/source/Target/TraceDumper.cpp
@@ -128,16 +128,26 @@ public:
m_s.Format(" {0}: ", item.id);
- if (m_options.show_tsc) {
- m_s.Format("[tsc={0}] ",
- item.tsc ? std::to_string(*item.tsc) : "unavailable");
+ if (m_options.show_timestamps) {
+ m_s.Format("[{0}] ", item.timestamp
+ ? formatv("{0:3} ns", *item.timestamp).str()
+ : "unavailable");
}
if (item.event) {
m_s << "(event) " << TraceCursor::EventKindToString(*item.event);
- if (*item.event == eTraceEventCPUChanged) {
+ switch (*item.event) {
+ case eTraceEventCPUChanged:
m_s.Format(" [new CPU={0}]",
item.cpu_id ? std::to_string(*item.cpu_id) : "unavailable");
+ break;
+ case eTraceEventHWClockTick:
+ m_s.Format(" [{0}]", item.hw_clock ? std::to_string(*item.hw_clock)
+ : "unavailable");
+ break;
+ case eTraceEventDisabledHW:
+ case eTraceEventDisabledSW:
+ break;
}
} else if (item.error) {
m_s << "(error) " << *item.error;
@@ -181,7 +191,8 @@ class OutputWriterJSON : public TraceDumper::OutputWriter {
| {
"loadAddress": string decimal,
"id": decimal,
- "tsc"?: string decimal,
+ "hwClock"?: string decimal,
+ "timestamp_ns"?: string decimal,
"module"?: string,
"symbol"?: string,
"line"?: decimal,
@@ -202,8 +213,17 @@ public:
void DumpEvent(const TraceDumper::TraceItem &item) {
m_j.attribute("event", TraceCursor::EventKindToString(*item.event));
- if (item.event == eTraceEventCPUChanged)
+ switch (*item.event) {
+ case eTraceEventCPUChanged:
m_j.attribute("cpuId", item.cpu_id);
+ break;
+ case eTraceEventHWClockTick:
+ m_j.attribute("hwClock", item.hw_clock);
+ break;
+ case eTraceEventDisabledHW:
+ case eTraceEventDisabledSW:
+ break;
+ }
}
void DumpInstruction(const TraceDumper::TraceItem &item) {
@@ -234,10 +254,11 @@ public:
void TraceItem(const TraceDumper::TraceItem &item) override {
m_j.object([&] {
m_j.attribute("id", item.id);
- if (m_options.show_tsc)
- m_j.attribute(
- "tsc",
- item.tsc ? Optional<std::string>(std::to_string(*item.tsc)) : None);
+ if (m_options.show_timestamps)
+ m_j.attribute("timestamp_ns", item.timestamp
+ ? Optional<std::string>(
+ std::to_string(*item.timestamp))
+ : None);
if (item.event) {
DumpEvent(item);
@@ -286,11 +307,11 @@ TraceDumper::TraceDumper(lldb::TraceCursorUP &&cursor_up, Stream &s,
}
TraceDumper::TraceItem TraceDumper::CreatRawTraceItem() {
- TraceItem item;
+ TraceItem item = {};
item.id = m_cursor_up->GetId();
- if (m_options.show_tsc)
- item.tsc = m_cursor_up->GetCounter(lldb::eTraceCounterTSC);
+ if (m_options.show_timestamps)
+ item.timestamp = m_cursor_up->GetWallClockTime();
return item;
}
@@ -366,8 +387,17 @@ Optional<lldb::user_id_t> TraceDumper::DumpInstructions(size_t count) {
if (!m_options.show_events)
continue;
item.event = m_cursor_up->GetEventType();
- if (*item.event == eTraceEventCPUChanged)
+ switch (*item.event) {
+ case eTraceEventCPUChanged:
item.cpu_id = m_cursor_up->GetCPU();
+ break;
+ case eTraceEventHWClockTick:
+ item.hw_clock = m_cursor_up->GetHWClock();
+ break;
+ case eTraceEventDisabledHW:
+ case eTraceEventDisabledSW:
+ break;
+ }
} else if (m_cursor_up->IsError()) {
item.error = m_cursor_up->GetError();
} else {
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/DenseMap.h b/contrib/llvm-project/llvm/include/llvm/ADT/DenseMap.h
index c14414c46419..a6d19d342822 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/DenseMap.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/DenseMap.h
@@ -907,6 +907,8 @@ class SmallDenseMap
public:
explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
+ if (NumInitBuckets > InlineBuckets)
+ NumInitBuckets = NextPowerOf2(NumInitBuckets - 1);
init(NumInitBuckets);
}
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/Optional.h b/contrib/llvm-project/llvm/include/llvm/ADT/Optional.h
index d1615d903e98..e164dc01c840 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/Optional.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/Optional.h
@@ -318,7 +318,9 @@ public:
template <typename U> constexpr T value_or(U &&alt) const & {
return has_value() ? value() : std::forward<U>(alt);
}
- template <typename U> constexpr T getValueOr(U &&alt) const & {
+ template <typename U>
+ [[deprecated("Use value_or instead.")]] constexpr T
+ getValueOr(U &&alt) const & {
return has_value() ? value() : std::forward<U>(alt);
}
@@ -337,7 +339,8 @@ public:
template <typename U> T value_or(U &&alt) && {
return has_value() ? std::move(value()) : std::forward<U>(alt);
}
- template <typename U> T getValueOr(U &&alt) && {
+ template <typename U>
+ [[deprecated("Use value_or instead.")]] T getValueOr(U &&alt) && {
return has_value() ? std::move(value()) : std::forward<U>(alt);
}
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/DDG.h b/contrib/llvm-project/llvm/include/llvm/Analysis/DDG.h
index 7649e630b23d..bc599cb1f9a1 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/DDG.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/DDG.h
@@ -348,37 +348,37 @@ public:
DDGBuilder(DataDependenceGraph &G, DependenceInfo &D,
const BasicBlockListType &BBs)
: AbstractDependenceGraphBuilder(G, D, BBs) {}
- DDGNode &createRootNode() final override {
+ DDGNode &createRootNode() final {
auto *RN = new RootDDGNode();
assert(RN && "Failed to allocate memory for DDG root node.");
Graph.addNode(*RN);
return *RN;
}
- DDGNode &createFineGrainedNode(Instruction &I) final override {
+ DDGNode &createFineGrainedNode(Instruction &I) final {
auto *SN = new SimpleDDGNode(I);
assert(SN && "Failed to allocate memory for simple DDG node.");
Graph.addNode(*SN);
return *SN;
}
- DDGNode &createPiBlock(const NodeListType &L) final override {
+ DDGNode &createPiBlock(const NodeListType &L) final {
auto *Pi = new PiBlockDDGNode(L);
assert(Pi && "Failed to allocate memory for pi-block node.");
Graph.addNode(*Pi);
return *Pi;
}
- DDGEdge &createDefUseEdge(DDGNode &Src, DDGNode &Tgt) final override {
+ DDGEdge &createDefUseEdge(DDGNode &Src, DDGNode &Tgt) final {
auto *E = new DDGEdge(Tgt, DDGEdge::EdgeKind::RegisterDefUse);
assert(E && "Failed to allocate memory for edge");
Graph.connect(Src, Tgt, *E);
return *E;
}
- DDGEdge &createMemoryEdge(DDGNode &Src, DDGNode &Tgt) final override {
+ DDGEdge &createMemoryEdge(DDGNode &Src, DDGNode &Tgt) final {
auto *E = new DDGEdge(Tgt, DDGEdge::EdgeKind::MemoryDependence);
assert(E && "Failed to allocate memory for edge");
Graph.connect(Src, Tgt, *E);
return *E;
}
- DDGEdge &createRootedEdge(DDGNode &Src, DDGNode &Tgt) final override {
+ DDGEdge &createRootedEdge(DDGNode &Src, DDGNode &Tgt) final {
auto *E = new DDGEdge(Tgt, DDGEdge::EdgeKind::Rooted);
assert(E && "Failed to allocate memory for edge");
assert(isa<RootDDGNode>(Src) && "Expected root node");
@@ -386,7 +386,7 @@ public:
return *E;
}
- const NodeListType &getNodesInPiBlock(const DDGNode &N) final override {
+ const NodeListType &getNodesInPiBlock(const DDGNode &N) final {
auto *PiNode = dyn_cast<const PiBlockDDGNode>(&N);
assert(PiNode && "Expected a pi-block node.");
return PiNode->getNodes();
@@ -394,11 +394,10 @@ public:
/// Return true if the two nodes \pSrc and \pTgt are both simple nodes and
/// the consecutive instructions after merging belong to the same basic block.
- bool areNodesMergeable(const DDGNode &Src,
- const DDGNode &Tgt) const final override;
- void mergeNodes(DDGNode &Src, DDGNode &Tgt) final override;
- bool shouldSimplify() const final override;
- bool shouldCreatePiBlocks() const final override;
+ bool areNodesMergeable(const DDGNode &Src, const DDGNode &Tgt) const final;
+ void mergeNodes(DDGNode &Src, DDGNode &Tgt) final;
+ bool shouldSimplify() const final;
+ bool shouldCreatePiBlocks() const final;
};
raw_ostream &operator<<(raw_ostream &OS, const DDGNode &N);
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryBuiltins.h b/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryBuiltins.h
index 422f63db749f..5cc85c4677cb 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryBuiltins.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -30,7 +30,6 @@ namespace llvm {
class AllocaInst;
class AAResults;
class Argument;
-class CallInst;
class ConstantPointerNull;
class DataLayout;
class ExtractElementInst;
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfo.h b/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfo.h
index da4410fcac14..6ea6d2361eba 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -49,7 +49,6 @@ class InstCombiner;
class OptimizationRemarkEmitter;
class IntrinsicInst;
class LoadInst;
-class LoopAccessInfo;
class Loop;
class LoopInfo;
class LoopVectorizationLegality;
diff --git a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h
index 1fd025761127..a0bb50db8c54 100644
--- a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h
+++ b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h
@@ -900,6 +900,26 @@ enum {
#include "ELFRelocs/CSKY.def"
};
+// LoongArch Specific e_flags
+enum : unsigned {
+ // Reference: https://github.com/loongson/LoongArch-Documentation.
+ // The last commit hash (main branch) is
+ // 99016636af64d02dee05e39974d4c1e55875c45b.
+ // Note that there is an open PR
+ // https://github.com/loongson/LoongArch-Documentation/pull/47
+ // talking about using 0x1, 0x2, 0x3 for ILP32S/F/D and use EI_CLASS to
+ // distinguish LP64 and ILP32. If this PR get merged, we will update
+ // the definition here.
+ // Base ABI Types.
+ EF_LOONGARCH_BASE_ABI_LP64S = 0x1, // LP64 soft-float ABI
+ EF_LOONGARCH_BASE_ABI_LP64F = 0x2, // LP64 single-float ABI
+ EF_LOONGARCH_BASE_ABI_LP64D = 0x3, // LP64 double-float ABI
+ EF_LOONGARCH_BASE_ABI_ILP32S = 0x5, // ILP32 soft-float ABI
+ EF_LOONGARCH_BASE_ABI_ILP32F = 0x6, // ILP32 single-float ABI
+ EF_LOONGARCH_BASE_ABI_ILP32D = 0x7, // ILP32 double-float ABI
+ EF_LOONGARCH_BASE_ABI_MASK = 0x7, // Mask for selecting base ABI
+};
+
// ELF Relocation types for LoongArch
enum {
#include "ELFRelocs/LoongArch.def"
@@ -1366,6 +1386,8 @@ enum {
// These all contain stack unwind tables.
PT_ARM_EXIDX = 0x70000001,
PT_ARM_UNWIND = 0x70000001,
+ // MTE memory tag segment type
+ PT_AARCH64_MEMTAG_MTE = 0x70000002,
// MIPS program header types.
PT_MIPS_REGINFO = 0x70000000, // Register usage information.
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CSEInfo.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CSEInfo.h
index dd213bf68e62..87dae64c5f90 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CSEInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CSEInfo.h
@@ -39,14 +39,14 @@ public:
class CSEConfigFull : public CSEConfigBase {
public:
virtual ~CSEConfigFull() = default;
- virtual bool shouldCSEOpc(unsigned Opc) override;
+ bool shouldCSEOpc(unsigned Opc) override;
};
// Commonly used for O0 config.
class CSEConfigConstantOnly : public CSEConfigBase {
public:
virtual ~CSEConfigConstantOnly() = default;
- virtual bool shouldCSEOpc(unsigned Opc) override;
+ bool shouldCSEOpc(unsigned Opc) override;
};
// Returns the standard expected CSEConfig for the given optimization level.
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index f7fafdc57401..e40f00433870 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -590,7 +590,7 @@ private:
assert(irt && "irt is null!");
}
- virtual void addSuccessorWithProb(
+ void addSuccessorWithProb(
MachineBasicBlock *Src, MachineBasicBlock *Dst,
BranchProbability Prob = BranchProbability::getUnknown()) override {
IRT->addSuccessorWithProb(Src, Dst, Prob);
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/LiveIntervals.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/LiveIntervals.h
index b26aa773c9ea..0c846c6671a7 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/LiveIntervals.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/LiveIntervals.h
@@ -39,7 +39,6 @@ namespace llvm {
extern cl::opt<bool> UseSegmentSetForPhysRegs;
-class AAResults;
class BitVector;
class LiveIntervalCalc;
class MachineBlockFrequencyInfo;
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index c531ddf8e906..389fbce72ad0 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -53,6 +53,7 @@
#include <iterator>
#include <string>
#include <tuple>
+#include <utility>
namespace llvm {
@@ -2079,6 +2080,11 @@ public:
bool isConstant() const;
+ /// If this BuildVector is constant and represents the numerical series
+ /// <a, a+n, a+2n, a+3n, ...> where a is integer and n is a non-zero integer,
+ /// the value <a,n> is returned.
+ Optional<std::pair<APInt, APInt>> isConstantSequence() const;
+
/// Recast bit data \p SrcBitElements to \p DstEltSizeInBits wide elements.
/// Undef elements are treated as zero, and entirely undefined elements are
/// flagged in \p DstUndefElements.
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetInstrInfo.h
index a1c9061baee6..72f69f4c6b77 100644
--- a/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/TargetInstrInfo.h
@@ -38,7 +38,6 @@
namespace llvm {
-class AAResults;
class DFAPacketizer;
class InstrItineraryData;
class LiveIntervals;
diff --git a/contrib/llvm-project/llvm/include/llvm/DWARFLinker/DWARFLinker.h b/contrib/llvm-project/llvm/include/llvm/DWARFLinker/DWARFLinker.h
index 3961100e00e1..4729e5f806d8 100644
--- a/contrib/llvm-project/llvm/include/llvm/DWARFLinker/DWARFLinker.h
+++ b/contrib/llvm-project/llvm/include/llvm/DWARFLinker/DWARFLinker.h
@@ -245,7 +245,7 @@ public:
/// Link debug info for added objFiles. Object
/// files are linked all together.
- bool link();
+ Error link();
/// A number of methods setting various linking options:
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h
index 6cd5c8d1d668..93525b716791 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h
@@ -32,15 +32,12 @@ public:
template <typename ChildType>
class NullEnumerator : public IPDBEnumChildren<ChildType> {
- virtual uint32_t getChildCount() const override { return 0; }
- virtual std::unique_ptr<ChildType>
- getChildAtIndex(uint32_t Index) const override {
+ uint32_t getChildCount() const override { return 0; }
+ std::unique_ptr<ChildType> getChildAtIndex(uint32_t Index) const override {
return nullptr;
}
- virtual std::unique_ptr<ChildType> getNext() override {
- return nullptr;
- }
- virtual void reset() override {}
+ std::unique_ptr<ChildType> getNext() override { return nullptr; }
+ void reset() override {}
};
} // end namespace pdb
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/x86_64.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/x86_64.h
index 9a2bc9b09350..d68dccac3fa9 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/x86_64.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/JITLink/x86_64.h
@@ -126,6 +126,24 @@ enum EdgeKind_x86_64 : Edge::Kind {
///
BranchPCRel32,
+ /// A 32-bit PC-relative relocation.
+ ///
+ /// Represents a data/control flow instruction using PC-relative addressing
+ /// to a target.
+ ///
+ /// The fixup expression for this kind includes an implicit offset to account
+ /// for the PC (unlike the Delta edges) so that a PCRel32 with a target
+ /// T and addend zero is a call/branch to the start (offset zero) of T.
+ ///
+ /// Fixup expression:
+ /// Fixup <- Target - (Fixup + 4) + Addend : int32
+ ///
+ /// Errors:
+ /// - The result of the fixup expression must fit into an int32, otherwise
+ /// an out-of-range error will be returned.
+ ///
+ PCRel32,
+
/// A 32-bit PC-relative branch to a pointer jump stub.
///
/// The target of this relocation should be a pointer jump stub of the form:
@@ -343,7 +361,9 @@ enum EdgeKind_x86_64 : Edge::Kind {
/// - *ASSERTION* Failure to handle edges of this kind prior to the fixup
/// phase will result in an assert/unreachable during the fixup phase.
///
- RequestTLVPAndTransformToPCRel32TLVPLoadREXRelaxable
+ RequestTLVPAndTransformToPCRel32TLVPLoadREXRelaxable,
+ // First platform specific relocation.
+ FirstPlatformRelocation
};
/// Returns a string name for the given x86-64 edge. For debugging purposes
@@ -395,6 +415,7 @@ inline Error applyFixup(LinkGraph &G, Block &B, const Edge &E,
break;
}
+ case PCRel32:
case BranchPCRel32:
case BranchPCRel32ToPtrJumpStub:
case BranchPCRel32ToPtrJumpStubBypassable:
diff --git a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h
index 69d8cf5d2980..37fe5a98b093 100644
--- a/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h
+++ b/contrib/llvm-project/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_EXECUTORSHAREDMEMORYMAPPERSERVICE
-#define LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_EXECUTORSHAREDMEMORYMAPPERSERVICE
+#ifndef LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_EXECUTORSHAREDMEMORYMAPPERSERVICE_H
+#define LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_EXECUTORSHAREDMEMORYMAPPERSERVICE_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.h"
@@ -75,4 +75,4 @@ private:
} // namespace rt_bootstrap
} // namespace orc
} // namespace llvm
-#endif // LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_EXECUTORSHAREDMEMORYMAPPERSERVICE
+#endif // LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_EXECUTORSHAREDMEMORYMAPPERSERVICE_H
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/Instructions.h b/contrib/llvm-project/llvm/include/llvm/IR/Instructions.h
index 083fed5de4a3..480a559e2226 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/Instructions.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/Instructions.h
@@ -848,6 +848,8 @@ public:
void setOrdering(AtomicOrdering Ordering) {
assert(Ordering != AtomicOrdering::NotAtomic &&
"atomicrmw instructions can only be atomic.");
+ assert(Ordering != AtomicOrdering::Unordered &&
+ "atomicrmw instructions cannot be unordered.");
setSubclassData<AtomicOrderingField>(Ordering);
}
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicInst.h b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicInst.h
index fc9111a4f512..4ff48c3669d5 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicInst.h
@@ -84,7 +84,7 @@ public:
}
}
- // Checks if the intrinsic is an annotation.
+ /// Checks if the intrinsic is an annotation.
bool isAssumeLikeIntrinsic() const {
switch (getIntrinsicID()) {
default: break;
@@ -107,7 +107,11 @@ public:
return false;
}
- // Methods for support type inquiry through isa, cast, and dyn_cast:
+ /// Check if the intrinsic might lower into a regular function call in the
+ /// course of IR transformations
+ static bool mayLowerToFunctionCall(Intrinsic::ID IID);
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const CallInst *I) {
if (const Function *CF = I->getCalledFunction())
return CF->isIntrinsic();
@@ -1356,9 +1360,6 @@ public:
}
};
-// Defined in Statepoint.h -- NOT a subclass of IntrinsicInst
-class GCStatepointInst;
-
/// Common base class for representing values projected from a statepoint.
/// Currently, the only projections available are gc.result and gc.relocate.
class GCProjectionInst : public IntrinsicInst {
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/Intrinsics.td b/contrib/llvm-project/llvm/include/llvm/IR/Intrinsics.td
index c523e3773de4..d46fa4fbf5b5 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/Intrinsics.td
+++ b/contrib/llvm-project/llvm/include/llvm/IR/Intrinsics.td
@@ -1759,6 +1759,12 @@ def int_type_checked_load : DefaultAttrsIntrinsic<[llvm_ptr_ty, llvm_i1_ty],
[llvm_ptr_ty, llvm_i32_ty, llvm_metadata_ty],
[IntrNoMem, IntrWillReturn]>;
+// Test whether a pointer is associated with a type metadata identifier. Used
+// for public visibility classes that may later be refined to private
+// visibility.
+def int_public_type_test : DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_metadata_ty],
+ [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;
+
// Create a branch funnel that implements an indirect call to a limited set of
// callees. This needs to be a musttail call.
def int_icall_branch_funnel : DefaultAttrsIntrinsic<[], [llvm_vararg_ty], []>;
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/ModuleSummaryIndex.h b/contrib/llvm-project/llvm/include/llvm/IR/ModuleSummaryIndex.h
index f1dd29926278..468773ac5909 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/ModuleSummaryIndex.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/ModuleSummaryIndex.h
@@ -1122,6 +1122,9 @@ private:
/// every summary of a GV is synchronized.
bool WithDSOLocalPropagation = false;
+ /// Indicates that we have whole program visibility.
+ bool WithWholeProgramVisibility = false;
+
/// Indicates that summary-based synthetic entry count propagation has run
bool HasSyntheticEntryCounts = false;
@@ -1280,6 +1283,9 @@ public:
bool withDSOLocalPropagation() const { return WithDSOLocalPropagation; }
void setWithDSOLocalPropagation() { WithDSOLocalPropagation = true; }
+ bool withWholeProgramVisibility() const { return WithWholeProgramVisibility; }
+ void setWithWholeProgramVisibility() { WithWholeProgramVisibility = true; }
+
bool isReadOnly(const GlobalVarSummary *GVS) const {
return WithAttributePropagation && GVS->maybeReadOnly();
}
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/PrintPasses.h b/contrib/llvm-project/llvm/include/llvm/IR/PrintPasses.h
index 1fa7c1893e20..e721db999341 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/PrintPasses.h
+++ b/contrib/llvm-project/llvm/include/llvm/IR/PrintPasses.h
@@ -10,10 +10,25 @@
#define LLVM_IR_PRINTPASSES_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/CommandLine.h"
#include <vector>
namespace llvm {
+enum class ChangePrinter {
+ None,
+ Verbose,
+ Quiet,
+ DiffVerbose,
+ DiffQuiet,
+ ColourDiffVerbose,
+ ColourDiffQuiet,
+ DotCfgVerbose,
+ DotCfgQuiet
+};
+
+extern cl::opt<ChangePrinter> PrintChanged;
+
// Returns true if printing before/after some pass is enabled, whether all
// passes or a specific pass.
bool shouldPrintBeforeSomePass();
diff --git a/contrib/llvm-project/llvm/include/llvm/LTO/legacy/LTOCodeGenerator.h b/contrib/llvm-project/llvm/include/llvm/LTO/legacy/LTOCodeGenerator.h
index 96f82a9276e0..fdd6ec74bce6 100644
--- a/contrib/llvm-project/llvm/include/llvm/LTO/legacy/LTOCodeGenerator.h
+++ b/contrib/llvm-project/llvm/include/llvm/LTO/legacy/LTOCodeGenerator.h
@@ -102,6 +102,9 @@ struct LTOCodeGenerator {
void setShouldInternalize(bool Value) { ShouldInternalize = Value; }
void setShouldEmbedUselists(bool Value) { ShouldEmbedUselists = Value; }
+ void setSaveIRBeforeOptPath(std::string Value) {
+ SaveIRBeforeOptPath = Value;
+ }
/// Restore linkage of globals
///
@@ -237,6 +240,7 @@ private:
bool ShouldRestoreGlobalsLinkage = false;
std::unique_ptr<ToolOutputFile> DiagnosticOutputFile;
std::unique_ptr<ToolOutputFile> StatsFile = nullptr;
+ std::string SaveIRBeforeOptPath;
lto::Config Config;
};
diff --git a/contrib/llvm-project/llvm/include/llvm/MC/MCDisassembler/MCDisassembler.h b/contrib/llvm-project/llvm/include/llvm/MC/MCDisassembler/MCDisassembler.h
index de069ff95c2f..ebe81fd8d121 100644
--- a/contrib/llvm-project/llvm/include/llvm/MC/MCDisassembler/MCDisassembler.h
+++ b/contrib/llvm-project/llvm/include/llvm/MC/MCDisassembler/MCDisassembler.h
@@ -171,6 +171,29 @@ public:
// It should help move much of the target specific code from llvm-objdump to
// respective target disassemblers.
+ /// Suggest a distance to skip in a buffer of data to find the next
+ /// place to look for the start of an instruction. For example, if
+ /// all instructions have a fixed alignment, this might advance to
+ /// the next multiple of that alignment.
+ ///
+ /// If not overridden, the default is 1.
+ ///
+ /// \param Address - The address, in the memory space of region, of the
+ /// starting point (typically the first byte of something
+ /// that did not decode as a valid instruction at all).
+ /// \param Bytes - A reference to the actual bytes at Address. May be
+ /// needed in order to determine the width of an
+ /// unrecognized instruction (e.g. in Thumb this is a simple
+ /// consistent criterion that doesn't require knowing the
+ /// specific instruction). The caller can pass as much data
+ /// as they have available, and the function is required to
+ /// make a reasonable default choice if not enough data is
+ /// available to make a better one.
+ /// \return - A number of bytes to skip. Must always be greater than
+ /// zero. May be greater than the size of Bytes.
+ virtual uint64_t suggestBytesToSkip(ArrayRef<uint8_t> Bytes,
+ uint64_t Address) const;
+
private:
MCContext &Ctx;
diff --git a/contrib/llvm-project/llvm/include/llvm/Passes/StandardInstrumentations.h b/contrib/llvm-project/llvm/include/llvm/Passes/StandardInstrumentations.h
index 30287cde5de7..b4d2178f5724 100644
--- a/contrib/llvm-project/llvm/include/llvm/Passes/StandardInstrumentations.h
+++ b/contrib/llvm-project/llvm/include/llvm/Passes/StandardInstrumentations.h
@@ -381,13 +381,13 @@ public:
protected:
// Create a representation of the IR.
- virtual void generateIRRepresentation(Any IR, StringRef PassID,
- IRDataT<EmptyData> &Output) override;
+ void generateIRRepresentation(Any IR, StringRef PassID,
+ IRDataT<EmptyData> &Output) override;
// Called when an interesting IR has changed.
- virtual void handleAfter(StringRef PassID, std::string &Name,
- const IRDataT<EmptyData> &Before,
- const IRDataT<EmptyData> &After, Any) override;
+ void handleAfter(StringRef PassID, std::string &Name,
+ const IRDataT<EmptyData> &Before,
+ const IRDataT<EmptyData> &After, Any) override;
void handleFunctionCompare(StringRef Name, StringRef Prefix, StringRef PassID,
StringRef Divider, bool InModule, unsigned Minor,
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfReader.h b/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfReader.h
index 7da336b9f61b..b96d6c70dae4 100644
--- a/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfReader.h
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfReader.h
@@ -585,14 +585,14 @@ public:
: SampleProfileReader(std::move(B), C, Format) {}
/// Read and validate the file header.
- virtual std::error_code readHeader() override;
+ std::error_code readHeader() override;
/// Read sample profiles from the associated file.
std::error_code readImpl() override;
/// It includes all the names that have samples either in outline instance
/// or inline instance.
- virtual std::vector<StringRef> *getNameTable() override { return &NameTable; }
+ std::vector<StringRef> *getNameTable() override { return &NameTable; }
protected:
/// Read a numeric value of type T from the profile.
@@ -656,7 +656,7 @@ private:
class SampleProfileReaderRawBinary : public SampleProfileReaderBinary {
private:
- virtual std::error_code verifySPMagic(uint64_t Magic) override;
+ std::error_code verifySPMagic(uint64_t Magic) override;
public:
SampleProfileReaderRawBinary(std::unique_ptr<MemoryBuffer> B, LLVMContext &C,
@@ -710,14 +710,14 @@ protected:
std::error_code readCSNameTableSec();
std::error_code readProfileSymbolList();
- virtual std::error_code readHeader() override;
- virtual std::error_code verifySPMagic(uint64_t Magic) override = 0;
+ std::error_code readHeader() override;
+ std::error_code verifySPMagic(uint64_t Magic) override = 0;
virtual std::error_code readOneSection(const uint8_t *Start, uint64_t Size,
const SecHdrTableEntry &Entry);
// placeholder for subclasses to dispatch their own section readers.
virtual std::error_code readCustomSection(const SecHdrTableEntry &Entry) = 0;
- virtual ErrorOr<StringRef> readStringFromTable() override;
- virtual ErrorOr<SampleContext> readSampleContextFromTable() override;
+ ErrorOr<StringRef> readStringFromTable() override;
+ ErrorOr<SampleContext> readSampleContextFromTable() override;
ErrorOr<SampleContextFrames> readContextFromTable();
std::unique_ptr<ProfileSymbolList> ProfSymList;
@@ -770,27 +770,26 @@ public:
uint64_t getSectionSize(SecType Type);
/// Get the total size of header and all sections.
uint64_t getFileSize();
- virtual bool dumpSectionInfo(raw_ostream &OS = dbgs()) override;
+ bool dumpSectionInfo(raw_ostream &OS = dbgs()) override;
/// Collect functions with definitions in Module M. Return true if
/// the reader has been given a module.
bool collectFuncsFromModule() override;
/// Return whether names in the profile are all MD5 numbers.
- virtual bool useMD5() override { return MD5StringBuf.get(); }
+ bool useMD5() override { return MD5StringBuf.get(); }
- virtual std::unique_ptr<ProfileSymbolList> getProfileSymbolList() override {
+ std::unique_ptr<ProfileSymbolList> getProfileSymbolList() override {
return std::move(ProfSymList);
};
- virtual void setSkipFlatProf(bool Skip) override { SkipFlatProf = Skip; }
+ void setSkipFlatProf(bool Skip) override { SkipFlatProf = Skip; }
};
class SampleProfileReaderExtBinary : public SampleProfileReaderExtBinaryBase {
private:
- virtual std::error_code verifySPMagic(uint64_t Magic) override;
- virtual std::error_code
- readCustomSection(const SecHdrTableEntry &Entry) override {
+ std::error_code verifySPMagic(uint64_t Magic) override;
+ std::error_code readCustomSection(const SecHdrTableEntry &Entry) override {
// Update the data reader pointer to the end of the section.
Data = End;
return sampleprof_error::success;
@@ -814,11 +813,11 @@ private:
DenseMap<StringRef, uint64_t> FuncOffsetTable;
/// The set containing the functions to use when compiling a module.
DenseSet<StringRef> FuncsToUse;
- virtual std::error_code verifySPMagic(uint64_t Magic) override;
- virtual std::error_code readNameTable() override;
+ std::error_code verifySPMagic(uint64_t Magic) override;
+ std::error_code readNameTable() override;
/// Read a string indirectly via the name table.
- virtual ErrorOr<StringRef> readStringFromTable() override;
- virtual std::error_code readHeader() override;
+ ErrorOr<StringRef> readStringFromTable() override;
+ std::error_code readHeader() override;
std::error_code readFuncOffsetTable();
public:
@@ -837,7 +836,7 @@ public:
bool collectFuncsFromModule() override;
/// Return whether names in the profile are all MD5 numbers.
- virtual bool useMD5() override { return true; }
+ bool useMD5() override { return true; }
};
using InlineCallStack = SmallVector<FunctionSamples *, 10>;
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfWriter.h b/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfWriter.h
index aa7f1cbdd7e8..b1ed0335e9c9 100644
--- a/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfWriter.h
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/SampleProfWriter.h
@@ -122,14 +122,13 @@ public:
SampleProfileWriterBinary(std::unique_ptr<raw_ostream> &OS)
: SampleProfileWriter(OS) {}
- virtual std::error_code writeSample(const FunctionSamples &S) override;
+ std::error_code writeSample(const FunctionSamples &S) override;
protected:
virtual MapVector<StringRef, uint32_t> &getNameTable() { return NameTable; }
virtual std::error_code writeMagicIdent(SampleProfileFormat Format);
virtual std::error_code writeNameTable();
- virtual std::error_code
- writeHeader(const SampleProfileMap &ProfileMap) override;
+ std::error_code writeHeader(const SampleProfileMap &ProfileMap) override;
std::error_code writeSummary();
virtual std::error_code writeContextIdx(const SampleContext &Context);
std::error_code writeNameIdx(StringRef FName);
@@ -187,14 +186,14 @@ const std::array<SmallVector<SecHdrTableEntry, 8>, NumOfLayout>
class SampleProfileWriterExtBinaryBase : public SampleProfileWriterBinary {
using SampleProfileWriterBinary::SampleProfileWriterBinary;
public:
- virtual std::error_code write(const SampleProfileMap &ProfileMap) override;
+ std::error_code write(const SampleProfileMap &ProfileMap) override;
- virtual void setToCompressAllSections() override;
+ void setToCompressAllSections() override;
void setToCompressSection(SecType Type);
- virtual std::error_code writeSample(const FunctionSamples &S) override;
+ std::error_code writeSample(const FunctionSamples &S) override;
// Set to use MD5 to represent string in NameTable.
- virtual void setUseMD5() override {
+ void setUseMD5() override {
UseMD5 = true;
addSectionFlag(SecNameTable, SecNameTableFlags::SecFlagMD5Name);
// MD5 will be stored as plain uint64_t instead of variable-length
@@ -205,15 +204,15 @@ public:
// Set the profile to be partial. It means the profile is for
// common/shared code. The common profile is usually merged from
// profiles collected from running other targets.
- virtual void setPartialProfile() override {
+ void setPartialProfile() override {
addSectionFlag(SecProfSummary, SecProfSummaryFlags::SecFlagPartial);
}
- virtual void setProfileSymbolList(ProfileSymbolList *PSL) override {
+ void setProfileSymbolList(ProfileSymbolList *PSL) override {
ProfSymList = PSL;
};
- virtual void resetSecLayout(SectionLayout SL) override {
+ void resetSecLayout(SectionLayout SL) override {
verifySecLayout(SL);
#ifndef NDEBUG
// Make sure resetSecLayout is called before any flag setting.
@@ -242,7 +241,7 @@ protected:
addSecFlag(SectionHdrLayout[SectionIdx], Flag);
}
- virtual void addContext(const SampleContext &Context) override;
+ void addContext(const SampleContext &Context) override;
// placeholder for subclasses to dispatch their own section writers.
virtual std::error_code writeCustomSection(SecType Type) = 0;
@@ -258,9 +257,8 @@ protected:
const SampleProfileMap &ProfileMap);
// Helper function to write name table.
- virtual std::error_code writeNameTable() override;
- virtual std::error_code
- writeContextIdx(const SampleContext &Context) override;
+ std::error_code writeNameTable() override;
+ std::error_code writeContextIdx(const SampleContext &Context) override;
std::error_code writeCSNameIdx(const SampleContext &Context);
std::error_code writeCSNameTableSection();
@@ -288,8 +286,7 @@ protected:
private:
void allocSecHdrTable();
std::error_code writeSecHdrTable();
- virtual std::error_code
- writeHeader(const SampleProfileMap &ProfileMap) override;
+ std::error_code writeHeader(const SampleProfileMap &ProfileMap) override;
std::error_code compressAndOutput();
// We will swap the raw_ostream held by LocalBufStream and that
@@ -334,14 +331,13 @@ private:
std::error_code writeDefaultLayout(const SampleProfileMap &ProfileMap);
std::error_code writeCtxSplitLayout(const SampleProfileMap &ProfileMap);
- virtual std::error_code
- writeSections(const SampleProfileMap &ProfileMap) override;
+ std::error_code writeSections(const SampleProfileMap &ProfileMap) override;
- virtual std::error_code writeCustomSection(SecType Type) override {
+ std::error_code writeCustomSection(SecType Type) override {
return sampleprof_error::success;
};
- virtual void verifySecLayout(SectionLayout SL) override {
+ void verifySecLayout(SectionLayout SL) override {
assert((SL == DefaultLayout || SL == CtxSplitLayout) &&
"Unsupported layout");
}
@@ -381,8 +377,8 @@ class SampleProfileWriterCompactBinary : public SampleProfileWriterBinary {
using SampleProfileWriterBinary::SampleProfileWriterBinary;
public:
- virtual std::error_code writeSample(const FunctionSamples &S) override;
- virtual std::error_code write(const SampleProfileMap &ProfileMap) override;
+ std::error_code writeSample(const FunctionSamples &S) override;
+ std::error_code write(const SampleProfileMap &ProfileMap) override;
protected:
/// The table mapping from function name to the offset of its FunctionSample
@@ -391,9 +387,8 @@ protected:
/// The offset of the slot to be filled with the offset of FuncOffsetTable
/// towards profile start.
uint64_t TableOffset;
- virtual std::error_code writeNameTable() override;
- virtual std::error_code
- writeHeader(const SampleProfileMap &ProfileMap) override;
+ std::error_code writeNameTable() override;
+ std::error_code writeHeader(const SampleProfileMap &ProfileMap) override;
std::error_code writeFuncOffsetTable();
};
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/BinaryByteStream.h b/contrib/llvm-project/llvm/include/llvm/Support/BinaryByteStream.h
index dc4adba26f16..9dc09b4e97f7 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/BinaryByteStream.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/BinaryByteStream.h
@@ -192,9 +192,7 @@ public:
Error commit() override { return Error::success(); }
/// Return the properties of this stream.
- virtual BinaryStreamFlags getFlags() const override {
- return BSF_Write | BSF_Append;
- }
+ BinaryStreamFlags getFlags() const override { return BSF_Write | BSF_Append; }
MutableArrayRef<uint8_t> data() { return Data; }
};
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/DXILOperationCommon.h b/contrib/llvm-project/llvm/include/llvm/Support/DXILOperationCommon.h
new file mode 100644
index 000000000000..09365411b3e7
--- /dev/null
+++ b/contrib/llvm-project/llvm/include/llvm/Support/DXILOperationCommon.h
@@ -0,0 +1,63 @@
+//===-- DXILOperationCommon.h - DXIL Operation ------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is created to share common definitions used by both the
+// DXILOpBuilder and the table
+// generator.
+// Documentation for DXIL can be found in
+// https://github.com/Microsoft/DirectXShaderCompiler/blob/main/docs/DXIL.rst.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_DXILOPERATIONCOMMON_H
+#define LLVM_SUPPORT_DXILOPERATIONCOMMON_H
+
+#include "llvm/ADT/StringSwitch.h"
+
+namespace llvm {
+namespace DXIL {
+
+enum class ParameterKind : uint8_t {
+ INVALID = 0,
+ VOID,
+ HALF,
+ FLOAT,
+ DOUBLE,
+ I1,
+ I8,
+ I16,
+ I32,
+ I64,
+ OVERLOAD,
+ CBUFFER_RET,
+ RESOURCE_RET,
+ DXIL_HANDLE,
+};
+
+inline ParameterKind parameterTypeNameToKind(StringRef Name) {
+ return StringSwitch<ParameterKind>(Name)
+ .Case("void", ParameterKind::VOID)
+ .Case("half", ParameterKind::HALF)
+ .Case("float", ParameterKind::FLOAT)
+ .Case("double", ParameterKind::DOUBLE)
+ .Case("i1", ParameterKind::I1)
+ .Case("i8", ParameterKind::I8)
+ .Case("i16", ParameterKind::I16)
+ .Case("i32", ParameterKind::I32)
+ .Case("i64", ParameterKind::I64)
+ .Case("$o", ParameterKind::OVERLOAD)
+ .Case("dx.types.Handle", ParameterKind::DXIL_HANDLE)
+ .Case("dx.types.CBufRet", ParameterKind::CBUFFER_RET)
+ .Case("dx.types.ResRet", ParameterKind::RESOURCE_RET)
+ .Default(ParameterKind::INVALID);
+}
+
+} // namespace DXIL
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/Error.h b/contrib/llvm-project/llvm/include/llvm/Support/Error.h
index 3c2c2c8b8ceb..f2d3388a328f 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/Error.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/Error.h
@@ -1141,7 +1141,7 @@ private:
class ECError : public ErrorInfo<ECError> {
friend Error errorCodeToError(std::error_code);
- virtual void anchor() override;
+ void anchor() override;
public:
void setErrorCode(std::error_code EC) { this->EC = EC; }
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/MathExtras.h b/contrib/llvm-project/llvm/include/llvm/Support/MathExtras.h
index 8079aa436933..94ecae9fc995 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/MathExtras.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/MathExtras.h
@@ -747,6 +747,12 @@ inline uint64_t alignTo(uint64_t Value, uint64_t Align) {
return (Value + Align - 1) / Align * Align;
}
+inline uint64_t alignToPowerOf2(uint64_t Value, uint64_t Align) {
+ assert(Align != 0 && (Align & (Align - 1)) == 0 &&
+ "Align must be a power of 2");
+ return (Value + Align - 1) & -Align;
+}
+
/// If non-zero \p Skew is specified, the return value will be a minimal integer
/// that is greater than or equal to \p Size and equal to \p A * N + \p Skew for
/// some integer N. If \p Skew is larger than \p A, its value is adjusted to '\p
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/raw_ostream.h b/contrib/llvm-project/llvm/include/llvm/Support/raw_ostream.h
index 58adb41cb0ef..f025cde4a16b 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/raw_ostream.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/raw_ostream.h
@@ -710,7 +710,7 @@ class buffer_ostream : public raw_svector_ostream {
raw_ostream &OS;
SmallVector<char, 0> Buffer;
- virtual void anchor() override;
+ void anchor() override;
public:
buffer_ostream(raw_ostream &OS) : raw_svector_ostream(Buffer), OS(OS) {}
@@ -721,7 +721,7 @@ class buffer_unique_ostream : public raw_svector_ostream {
std::unique_ptr<raw_ostream> OS;
SmallVector<char, 0> Buffer;
- virtual void anchor() override;
+ void anchor() override;
public:
buffer_unique_ostream(std::unique_ptr<raw_ostream> OS)
diff --git a/contrib/llvm-project/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/contrib/llvm-project/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
index ef4fc85b245d..5b8b852962f4 100644
--- a/contrib/llvm-project/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
+++ b/contrib/llvm-project/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
@@ -141,6 +141,8 @@ def : GINodeEquiv<G_FMINNUM, fminnum>;
def : GINodeEquiv<G_FMAXNUM, fmaxnum>;
def : GINodeEquiv<G_FMINNUM_IEEE, fminnum_ieee>;
def : GINodeEquiv<G_FMAXNUM_IEEE, fmaxnum_ieee>;
+def : GINodeEquiv<G_FMAXIMUM, fmaximum>;
+def : GINodeEquiv<G_FMINIMUM, fminimum>;
def : GINodeEquiv<G_READCYCLECOUNTER, readcyclecounter>;
def : GINodeEquiv<G_ROTR, rotr>;
def : GINodeEquiv<G_ROTL, rotl>;
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO.h b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO.h
index 0b0f30be4dc9..52f0bc9346bf 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO.h
@@ -21,8 +21,6 @@
namespace llvm {
struct InlineParams;
-class StringRef;
-class ModuleSummaryIndex;
class ModulePass;
class Pass;
class BasicBlock;
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/Attributor.h b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/Attributor.h
index 8466f5612d99..794e85f877be 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/Attributor.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/Attributor.h
@@ -2837,7 +2837,7 @@ struct IRAttribute : public BaseType {
IRAttribute(const IRPosition &IRP) : BaseType(IRP) {}
/// See AbstractAttribute::initialize(...).
- virtual void initialize(Attributor &A) override {
+ void initialize(Attributor &A) override {
const IRPosition &IRP = this->getIRPosition();
if (isa<UndefValue>(IRP.getAssociatedValue()) ||
this->hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ false,
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/WholeProgramDevirt.h b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/WholeProgramDevirt.h
index 47c137e70a7f..a2296a064213 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/WholeProgramDevirt.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/WholeProgramDevirt.h
@@ -239,7 +239,9 @@ struct VTableSlotSummary {
StringRef TypeID;
uint64_t ByteOffset;
};
-
+bool hasWholeProgramVisibility(bool WholeProgramVisibilityEnabledInLTO);
+void updatePublicTypeTestCalls(Module &M,
+ bool WholeProgramVisibilityEnabledInLTO);
void updateVCallVisibilityInModule(
Module &M, bool WholeProgramVisibilityEnabledInLTO,
const DenseSet<GlobalValue::GUID> &DynamicExportSymbols);
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/MatrixUtils.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/MatrixUtils.h
index 39a0d4bf40cc..ffad57002935 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/MatrixUtils.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/MatrixUtils.h
@@ -25,9 +25,9 @@ class IRBuilderBase;
/// A helper struct to create IR loop nests for tiling in IR of the following
/// form:
-/// for CurrentColumn = 0..NumColumns
-/// for CurrentRow = 0..NumRows
-/// for CurrentInner = 0..NumInner
+/// for ColumnLoop.Index = 0..NumColumns
+/// for RowLoop.Index = 0..NumRows
+/// for KLoop.Index = 0..NumInner
struct TileInfo {
/// Number of rows of the matrix.
unsigned NumRows;
@@ -42,26 +42,21 @@ struct TileInfo {
/// Number of rows/columns in a tile.
unsigned TileSize = -1;
- /// Start row of the current tile to compute.
- Value *CurrentRow;
-
- /// Start column of the current tile to compute.
- Value *CurrentCol;
-
- /// Current tile offset during the tile computation.
- Value *CurrentK;
-
- /// Header of the outermost loop iterating from 0..NumColumns.
- BasicBlock *ColumnLoopHeader = nullptr;
-
- /// Header of the second loop iterating from 0..NumRows.
- BasicBlock *RowLoopHeader = nullptr;
- /// Latch of the second loop iterating from 0..NumRows.
- BasicBlock *RowLoopLatch = nullptr;
- /// Header of the innermost loop iterating from 0..NumInner.
- BasicBlock *InnerLoopHeader = nullptr;
- /// Latch of the innermost loop iterating from 0..NumInner.
- BasicBlock *InnerLoopLatch = nullptr;
+ /// Properties of a single loop used when generating the tiled loop nest.
+ struct MatrixLoop {
+ /// The index updated on every iteration.
+ Value *Index = nullptr;
+ /// The header and latch of the loop.
+ BasicBlock *Header = nullptr;
+ BasicBlock *Latch = nullptr;
+ };
+
+ /// The loop iterating on the rows.
+ MatrixLoop RowLoop;
+ /// The loop iterating on the columns.
+ MatrixLoop ColumnLoop;
+ /// The loop iterating on k (inner dimension).
+ MatrixLoop KLoop;
TileInfo(unsigned NumRows, unsigned NumColumns, unsigned NumInner,
unsigned TileSize)
@@ -72,9 +67,9 @@ struct TileInfo {
/// for the inner loop body and sets {Column,Row,Inner}LoopHeader/Latch
/// fields.
///
- /// for CurrentColumn = 0..NumColumns
- /// for CurrentRow = 0..NumRows
- /// for CurrentInner = 0..NumInner
+ /// for ColumnLoop.Index = 0..NumColumns
+ /// for RowLoop.Index = 0..NumRows
+ /// for InnerLoop.Index = 0..NumInner
BasicBlock *CreateTiledLoops(BasicBlock *Start, BasicBlock *End,
IRBuilderBase &B, DomTreeUpdater &DTU,
LoopInfo &LI);
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/MemoryOpRemark.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/MemoryOpRemark.h
index 8dc0f1e26a92..56cc15658675 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/MemoryOpRemark.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/MemoryOpRemark.h
@@ -102,9 +102,9 @@ struct AutoInitRemark : public MemoryOpRemark {
static bool canHandle(const Instruction *I);
protected:
- virtual std::string explainSource(StringRef Type) const override;
- virtual StringRef remarkName(RemarkKind RK) const override;
- virtual DiagnosticKind diagnosticKind() const override {
+ std::string explainSource(StringRef Type) const override;
+ StringRef remarkName(RemarkKind RK) const override;
+ DiagnosticKind diagnosticKind() const override {
return DK_OptimizationRemarkMissed;
}
};
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h
index 79a44b667445..1b2482a2363d 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h
@@ -208,7 +208,7 @@ private:
Value *optimizeIsAscii(CallInst *CI, IRBuilderBase &B);
Value *optimizeToAscii(CallInst *CI, IRBuilderBase &B);
Value *optimizeAtoi(CallInst *CI, IRBuilderBase &B);
- Value *optimizeStrtol(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStrToInt(CallInst *CI, IRBuilderBase &B, bool AsSigned);
// Formatting and IO Library Call Optimizations
Value *optimizeErrorReporting(CallInst *CI, IRBuilderBase &B,
diff --git a/contrib/llvm-project/llvm/lib/Analysis/CodeMetrics.cpp b/contrib/llvm-project/llvm/lib/Analysis/CodeMetrics.cpp
index 6d9084215dee..ded842b92ae1 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/CodeMetrics.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/CodeMetrics.cpp
@@ -133,7 +133,8 @@ void CodeMetrics::analyzeBasicBlock(
// When preparing for LTO, liberally consider calls as inline
// candidates.
if (!Call->isNoInline() && IsLoweredToCall &&
- ((F->hasInternalLinkage() && F->hasOneUse()) || PrepareForLTO)) {
+ ((F->hasInternalLinkage() && F->hasOneLiveUse()) ||
+ PrepareForLTO)) {
++NumInlineCandidates;
}
diff --git a/contrib/llvm-project/llvm/lib/Analysis/InlineCost.cpp b/contrib/llvm-project/llvm/lib/Analysis/InlineCost.cpp
index 9f8a5e472f01..8192ed56caf0 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/InlineCost.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/InlineCost.cpp
@@ -185,8 +185,8 @@ private:
public:
InlineCostAnnotationWriter(InlineCostCallAnalyzer *ICCA) : ICCA(ICCA) {}
- virtual void emitInstructionAnnot(const Instruction *I,
- formatted_raw_ostream &OS) override;
+ void emitInstructionAnnot(const Instruction *I,
+ formatted_raw_ostream &OS) override;
};
/// Carry out call site analysis, in order to evaluate inlinability.
diff --git a/contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp b/contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp
index 4691aebbdfe1..21fe448218bc 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -1591,12 +1591,6 @@ static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
!match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
return nullptr;
- // We have (icmp Pred0, A, B) & (icmp Pred1, A, B).
- // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
- // can eliminate Op1 from this 'and'.
- if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
- return Op0;
-
// Check for any combination of predicates that are guaranteed to be disjoint.
if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
(Pred0 == ICmpInst::ICMP_EQ && ICmpInst::isFalseWhenEqual(Pred1)) ||
@@ -1616,12 +1610,6 @@ static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
!match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
return nullptr;
- // We have (icmp Pred0, A, B) | (icmp Pred1, A, B).
- // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
- // can eliminate Op0 from this 'or'.
- if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
- return Op1;
-
// Check for any combination of predicates that cover the entire range of
// possibilities.
if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
diff --git a/contrib/llvm-project/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/contrib/llvm-project/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index bed684b7652a..aa35f253bc5f 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -1500,9 +1500,7 @@ bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy,
Value *Ptr0 = VL[0];
using DistOrdPair = std::pair<int64_t, int>;
- auto Compare = [](const DistOrdPair &L, const DistOrdPair &R) {
- return L.first < R.first;
- };
+ auto Compare = llvm::less_first();
std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
Offsets.emplace(0, 0);
int Cnt = 1;
diff --git a/contrib/llvm-project/llvm/lib/Analysis/MemoryBuiltins.cpp b/contrib/llvm-project/llvm/lib/Analysis/MemoryBuiltins.cpp
index 31e4380e4379..413ec6dd4b42 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -115,9 +115,7 @@ struct AllocFnsTy {
// FIXME: certain users need more information. E.g., SimplifyLibCalls needs to
// know which functions are nounwind, noalias, nocapture parameters, etc.
static const std::pair<LibFunc, AllocFnsTy> AllocationFnData[] = {
- {LibFunc_malloc, {MallocLike, 1, 0, -1, -1, MallocFamily::Malloc}},
{LibFunc_vec_malloc, {MallocLike, 1, 0, -1, -1, MallocFamily::VecMalloc}},
- {LibFunc_valloc, {MallocLike, 1, 0, -1, -1, MallocFamily::Malloc}},
{LibFunc_Znwj, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned int)
{LibFunc_ZnwjRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned int, nothrow)
{LibFunc_ZnwjSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned int, align_val_t)
@@ -142,13 +140,9 @@ static const std::pair<LibFunc, AllocFnsTy> AllocationFnData[] = {
{LibFunc_msvc_new_array_int_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned int, nothrow)
{LibFunc_msvc_new_array_longlong, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned long long)
{LibFunc_msvc_new_array_longlong_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned long long, nothrow)
- {LibFunc_aligned_alloc, {AlignedAllocLike, 2, 1, -1, 0, MallocFamily::Malloc}},
{LibFunc_memalign, {AlignedAllocLike, 2, 1, -1, 0, MallocFamily::Malloc}},
- {LibFunc_calloc, {CallocLike, 2, 0, 1, -1, MallocFamily::Malloc}},
{LibFunc_vec_calloc, {CallocLike, 2, 0, 1, -1, MallocFamily::VecMalloc}},
- {LibFunc_realloc, {ReallocLike, 2, 1, -1, -1, MallocFamily::Malloc}},
{LibFunc_vec_realloc, {ReallocLike, 2, 1, -1, -1, MallocFamily::VecMalloc}},
- {LibFunc_reallocf, {ReallocLike, 2, 1, -1, -1, MallocFamily::Malloc}},
{LibFunc_strdup, {StrDupLike, 1, -1, -1, -1, MallocFamily::Malloc}},
{LibFunc_dunder_strdup, {StrDupLike, 1, -1, -1, -1, MallocFamily::Malloc}},
{LibFunc_strndup, {StrDupLike, 2, 1, -1, -1, MallocFamily::Malloc}},
@@ -488,7 +482,6 @@ struct FreeFnsTy {
// clang-format off
static const std::pair<LibFunc, FreeFnsTy> FreeFnData[] = {
- {LibFunc_free, {1, MallocFamily::Malloc}},
{LibFunc_vec_free, {1, MallocFamily::VecMalloc}},
{LibFunc_ZdlPv, {1, MallocFamily::CPPNew}}, // operator delete(void*)
{LibFunc_ZdaPv, {1, MallocFamily::CPPNewArray}}, // operator delete[](void*)
diff --git a/contrib/llvm-project/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp b/contrib/llvm-project/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
index c52b27a38fe9..efe60586979a 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
@@ -164,7 +164,8 @@ static void addIntrinsicToSummary(
SetVector<FunctionSummary::ConstVCall> &TypeCheckedLoadConstVCalls,
DominatorTree &DT) {
switch (CI->getCalledFunction()->getIntrinsicID()) {
- case Intrinsic::type_test: {
+ case Intrinsic::type_test:
+ case Intrinsic::public_type_test: {
auto *TypeMDVal = cast<MetadataAsValue>(CI->getArgOperand(1));
auto *TypeId = dyn_cast<MDString>(TypeMDVal->getMetadata());
if (!TypeId)
diff --git a/contrib/llvm-project/llvm/lib/Analysis/PHITransAddr.cpp b/contrib/llvm-project/llvm/lib/Analysis/PHITransAddr.cpp
index 7571bd0059cc..5b0fbca23891 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/PHITransAddr.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/PHITransAddr.cpp
@@ -21,6 +21,10 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+static cl::opt<bool> EnableAddPhiTranslation(
+ "gvn-add-phi-translation", cl::init(false), cl::Hidden,
+ cl::desc("Enable phi-translation of add instructions"));
+
static bool CanPHITrans(Instruction *Inst) {
if (isa<PHINode>(Inst) ||
isa<GetElementPtrInst>(Inst))
@@ -410,14 +414,14 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
return Result;
}
-#if 0
- // FIXME: This code works, but it is unclear that we actually want to insert
- // a big chain of computation in order to make a value available in a block.
- // This needs to be evaluated carefully to consider its cost trade offs.
-
// Handle add with a constant RHS.
- if (Inst->getOpcode() == Instruction::Add &&
+ if (EnableAddPhiTranslation && Inst->getOpcode() == Instruction::Add &&
isa<ConstantInt>(Inst->getOperand(1))) {
+
+ // FIXME: This code works, but it is unclear that we actually want to insert
+ // a big chain of computation in order to make a value available in a block.
+ // This needs to be evaluated carefully to consider its cost trade offs.
+
// PHI translate the LHS.
Value *OpVal = InsertPHITranslatedSubExpr(Inst->getOperand(0),
CurBB, PredBB, DT, NewInsts);
@@ -431,7 +435,6 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
NewInsts.push_back(Res);
return Res;
}
-#endif
return nullptr;
}
diff --git a/contrib/llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp b/contrib/llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp
index d46248aa3889..2958a5054afc 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -11153,20 +11153,6 @@ bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB,
return true;
}
- // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard.
- auto ProveViaGuard = [&](const BasicBlock *Block) {
- if (isImpliedViaGuard(Block, Pred, LHS, RHS))
- return true;
- if (ProvingStrictComparison) {
- auto ProofFn = [&](ICmpInst::Predicate P) {
- return isImpliedViaGuard(Block, P, LHS, RHS);
- };
- if (SplitAndProve(ProofFn))
- return true;
- }
- return false;
- };
-
// Try to prove (Pred, LHS, RHS) using isImpliedCond.
auto ProveViaCond = [&](const Value *Condition, bool Inverse) {
const Instruction *CtxI = &BB->front();
@@ -11193,9 +11179,6 @@ bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB,
PredBB = BB->getSinglePredecessor();
for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
- if (ProveViaGuard(Pair.first))
- return true;
-
const BranchInst *BlockEntryPredicate =
dyn_cast<BranchInst>(Pair.first->getTerminator());
if (!BlockEntryPredicate || BlockEntryPredicate->isUnconditional())
@@ -11218,6 +11201,15 @@ bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB,
return true;
}
+ // Check conditions due to any @llvm.experimental.guard intrinsics.
+ auto *GuardDecl = F.getParent()->getFunction(
+ Intrinsic::getName(Intrinsic::experimental_guard));
+ if (GuardDecl)
+ for (const auto *GU : GuardDecl->users())
+ if (const auto *Guard = dyn_cast<IntrinsicInst>(GU))
+ if (Guard->getFunction() == BB->getParent() && DT.dominates(Guard, BB))
+ if (ProveViaCond(Guard->getArgOperand(0), false))
+ return true;
return false;
}
diff --git a/contrib/llvm-project/llvm/lib/Analysis/TypeMetadataUtils.cpp b/contrib/llvm-project/llvm/lib/Analysis/TypeMetadataUtils.cpp
index 201e64770766..e128187bac49 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/TypeMetadataUtils.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/TypeMetadataUtils.cpp
@@ -75,7 +75,9 @@ void llvm::findDevirtualizableCallsForTypeTest(
SmallVectorImpl<DevirtCallSite> &DevirtCalls,
SmallVectorImpl<CallInst *> &Assumes, const CallInst *CI,
DominatorTree &DT) {
- assert(CI->getCalledFunction()->getIntrinsicID() == Intrinsic::type_test);
+ assert(CI->getCalledFunction()->getIntrinsicID() == Intrinsic::type_test ||
+ CI->getCalledFunction()->getIntrinsicID() ==
+ Intrinsic::public_type_test);
const Module *M = CI->getParent()->getParent()->getParent();
diff --git a/contrib/llvm-project/llvm/lib/Analysis/ValueTracking.cpp b/contrib/llvm-project/llvm/lib/Analysis/ValueTracking.cpp
index 1f3798d1338e..2dd671b4ab9e 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/ValueTracking.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/ValueTracking.cpp
@@ -4266,9 +4266,10 @@ bool llvm::getConstantDataArrayInfo(const Value *V,
return true;
}
-/// This function computes the length of a null-terminated C string pointed to
-/// by V. If successful, it returns true and returns the string in Str.
-/// If unsuccessful, it returns false.
+/// Extract bytes from the initializer of the constant array V, which need
+/// not be a nul-terminated string. On success, store the bytes in Str and
+/// return true. When TrimAtNul is set, Str will contain only the bytes up
+/// to but not including the first nul. Return false on failure.
bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
uint64_t Offset, bool TrimAtNul) {
ConstantDataArraySlice Slice;
@@ -6543,7 +6544,6 @@ bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P,
static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
const Value *RHS, const DataLayout &DL,
unsigned Depth) {
- assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
return true;
@@ -6656,14 +6656,12 @@ static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
/// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
/// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
/// Otherwise, return None if we can't infer anything.
-static Optional<bool>
-isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
- const ConstantInt *C1,
- CmpInst::Predicate BPred,
- const ConstantInt *C2) {
- ConstantRange DomCR =
- ConstantRange::makeExactICmpRegion(APred, C1->getValue());
- ConstantRange CR = ConstantRange::makeExactICmpRegion(BPred, C2->getValue());
+static Optional<bool> isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
+ const APInt &C1,
+ CmpInst::Predicate BPred,
+ const APInt &C2) {
+ ConstantRange DomCR = ConstantRange::makeExactICmpRegion(APred, C1);
+ ConstantRange CR = ConstantRange::makeExactICmpRegion(BPred, C2);
ConstantRange Intersection = DomCR.intersectWith(CR);
ConstantRange Difference = DomCR.difference(CR);
if (Intersection.isEmptySet())
@@ -6701,14 +6699,9 @@ static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
// Can we infer anything when the LHS operands match and the RHS operands are
// constants (not necessarily matching)?
- if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
- if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
- APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
- return Implication;
- // No amount of additional analysis will infer the second condition, so
- // early exit.
- return None;
- }
+ const APInt *AC, *BC;
+ if (ALHS == BLHS && match(ARHS, m_APInt(AC)) && match(BRHS, m_APInt(BC)))
+ return isImpliedCondMatchingImmOperands(APred, *AC, BPred, *BC);
if (APred == BPred)
return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
@@ -6761,14 +6754,8 @@ llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
return None;
- Type *OpTy = LHS->getType();
- assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
-
- // FIXME: Extending the code below to handle vectors.
- if (OpTy->isVectorTy())
- return None;
-
- assert(OpTy->isIntegerTy(1) && "implied by above");
+ assert(LHS->getType()->isIntOrIntVectorTy(1) &&
+ "Expected integer type only!");
// Both LHS and RHS are icmps.
const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
diff --git a/contrib/llvm-project/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/contrib/llvm-project/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 1d6c21bd66d1..1943b5db94c3 100644
--- a/contrib/llvm-project/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/contrib/llvm-project/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -7788,7 +7788,7 @@ static Expected<bool> getEnableSplitLTOUnitFlag(BitstreamCursor &Stream,
case bitc::FS_FLAGS: { // [flags]
uint64_t Flags = Record[0];
// Scan flags.
- assert(Flags <= 0x7f && "Unexpected bits in flag");
+ assert(Flags <= 0xff && "Unexpected bits in flag");
return Flags & 0x8;
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index e0050a47a6f6..32a10ad41d1f 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -2795,12 +2795,7 @@ const MCExpr *AsmPrinter::lowerConstant(const Constant *CV) {
DL.getTypeAllocSize(Op->getType()).getFixedSize())
return OpExpr;
- // Otherwise the pointer is smaller than the resultant integer, mask off
- // the high bits so we are sure to get a proper truncation if the input is
- // a constant expr.
- unsigned InBits = DL.getTypeAllocSizeInBits(Op->getType());
- const MCExpr *MaskExpr = MCConstantExpr::create(~0ULL >> (64-InBits), Ctx);
- return MCBinaryExpr::createAnd(OpExpr, MaskExpr, Ctx);
+ break; // Error
}
case Instruction::Sub: {
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/WasmException.h b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/WasmException.h
index 2abbe37cb6d9..419b569d123c 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/WasmException.h
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/WasmException.h
@@ -28,7 +28,7 @@ public:
void endModule() override;
void beginFunction(const MachineFunction *MF) override {}
- virtual void markFunctionEnd() override;
+ void markFunctionEnd() override;
void endFunction(const MachineFunction *MF) override;
protected:
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AtomicExpandPass.cpp b/contrib/llvm-project/llvm/lib/CodeGen/AtomicExpandPass.cpp
index f21c1bf4e914..ad51bab8f30b 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -515,9 +515,14 @@ void AtomicExpand::expandAtomicStore(StoreInst *SI) {
// It is the responsibility of the target to only signal expansion via
// shouldExpandAtomicRMW in cases where this is required and possible.
IRBuilder<> Builder(SI);
+ AtomicOrdering Ordering = SI->getOrdering();
+ assert(Ordering != AtomicOrdering::NotAtomic);
+ AtomicOrdering RMWOrdering = Ordering == AtomicOrdering::Unordered
+ ? AtomicOrdering::Monotonic
+ : Ordering;
AtomicRMWInst *AI = Builder.CreateAtomicRMW(
AtomicRMWInst::Xchg, SI->getPointerOperand(), SI->getValueOperand(),
- SI->getAlign(), SI->getOrdering());
+ SI->getAlign(), RMWOrdering);
SI->eraseFromParent();
// Now we have an appropriate swap instruction, lower it as usual.
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp b/contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp
index b6c762b93ca5..b8f6fc9bbcde 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -2568,8 +2568,6 @@ struct ExtAddrMode : public TargetLowering::AddrMode {
}
};
-} // end anonymous namespace
-
#ifndef NDEBUG
static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
AM.print(OS);
@@ -2617,6 +2615,8 @@ LLVM_DUMP_METHOD void ExtAddrMode::dump() const {
}
#endif
+} // end anonymous namespace
+
namespace {
/// This class provides transaction based operation on the IR.
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index da054b9c14fb..05a25bc3078e 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -1142,7 +1142,8 @@ bool CombinerHelper::matchCombineDivRem(MachineInstr &MI,
if (MI.getParent() == UseMI.getParent() &&
((IsDiv && UseMI.getOpcode() == RemOpcode) ||
(!IsDiv && UseMI.getOpcode() == DivOpcode)) &&
- matchEqualDefs(MI.getOperand(2), UseMI.getOperand(2))) {
+ matchEqualDefs(MI.getOperand(2), UseMI.getOperand(2)) &&
+ matchEqualDefs(MI.getOperand(1), UseMI.getOperand(1))) {
OtherMI = &UseMI;
return true;
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index dbdcfe0b6f0b..2f9187bbf2ad 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -151,11 +151,11 @@ public:
LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
<< " was copied to " << MI);
#endif
- // We allow insts in the entry block to have a debug loc line of 0 because
+ // We allow insts in the entry block to have no debug loc because
// they could have originated from constants, and we don't want a jumpy
// debug experience.
assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||
- MI.getDebugLoc().getLine() == 0) &&
+ (MI.getParent()->isEntryBlock() && !MI.getDebugLoc())) &&
"Line info was not transferred to all instructions");
}
};
@@ -3020,11 +3020,9 @@ bool IRTranslator::translate(const Instruction &Inst) {
bool IRTranslator::translate(const Constant &C, Register Reg) {
// We only emit constants into the entry block from here. To prevent jumpy
- // debug behaviour set the line to 0.
+ // debug behaviour remove debug line.
if (auto CurrInstDL = CurBuilder->getDL())
- EntryBuilder->setDebugLoc(DILocation::get(C.getContext(), 0, 0,
- CurrInstDL.getScope(),
- CurrInstDL.getInlinedAt()));
+ EntryBuilder->setDebugLoc(DebugLoc());
if (auto CI = dyn_cast<ConstantInt>(&C))
EntryBuilder->buildConstant(Reg, *CI);
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp b/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp
index ef49d3888f2b..191596dbf53e 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp
@@ -1330,7 +1330,7 @@ bool InstrRefBasedLDV::transferDebugPHI(MachineInstr &MI) {
const MachineOperand &MO = MI.getOperand(0);
unsigned InstrNum = MI.getOperand(1).getImm();
- auto EmitBadPHI = [this, &MI, InstrNum](void) -> bool {
+ auto EmitBadPHI = [this, &MI, InstrNum]() -> bool {
// Helper lambda to do any accounting when we fail to find a location for
// a DBG_PHI. This can happen if DBG_PHIs are malformed, or refer to a
// dead stack slot, for example.
@@ -3136,8 +3136,7 @@ bool InstrRefBasedLDV::emitTransfers(
MI->getDebugLoc()->getInlinedAt());
Insts.emplace_back(AllVarsNumbering.find(Var)->second, MI);
}
- llvm::sort(Insts,
- [](const auto &A, const auto &B) { return A.first < B.first; });
+ llvm::sort(Insts, llvm::less_first());
// Insert either before or after the designated point...
if (P.MBB) {
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/LiveRangeEdit.cpp b/contrib/llvm-project/llvm/lib/CodeGen/LiveRangeEdit.cpp
index 2aafb746aa2c..abf36b3f4c67 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/LiveRangeEdit.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/LiveRangeEdit.cpp
@@ -300,13 +300,15 @@ void LiveRangeEdit::eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink) {
SmallVector<unsigned, 8> RegsToErase;
bool ReadsPhysRegs = false;
bool isOrigDef = false;
- unsigned Dest;
+ Register Dest;
+ unsigned DestSubReg;
// Only optimize rematerialize case when the instruction has one def, since
// otherwise we could leave some dead defs in the code. This case is
// extremely rare.
if (VRM && MI->getOperand(0).isReg() && MI->getOperand(0).isDef() &&
MI->getDesc().getNumDefs() == 1) {
Dest = MI->getOperand(0).getReg();
+ DestSubReg = MI->getOperand(0).getSubReg();
unsigned Original = VRM->getOriginal(Dest);
LiveInterval &OrigLI = LIS.getInterval(Original);
VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
@@ -384,8 +386,18 @@ void LiveRangeEdit::eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink) {
if (isOrigDef && DeadRemats && !HasLiveVRegUses &&
TII.isTriviallyReMaterializable(*MI)) {
LiveInterval &NewLI = createEmptyIntervalFrom(Dest, false);
- VNInfo *VNI = NewLI.getNextValue(Idx, LIS.getVNInfoAllocator());
+ VNInfo::Allocator &Alloc = LIS.getVNInfoAllocator();
+ VNInfo *VNI = NewLI.getNextValue(Idx, Alloc);
NewLI.addSegment(LiveInterval::Segment(Idx, Idx.getDeadSlot(), VNI));
+
+ if (DestSubReg) {
+ const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
+ auto *SR = NewLI.createSubRange(
+ Alloc, TRI->getSubRegIndexLaneMask(DestSubReg));
+ SR->addSegment(LiveInterval::Segment(Idx, Idx.getDeadSlot(),
+ SR->getNextValue(Idx, Alloc)));
+ }
+
pop_back();
DeadRemats->insert(MI);
const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/MachineFunctionPass.cpp b/contrib/llvm-project/llvm/lib/CodeGen/MachineFunctionPass.cpp
index 99494122d608..477310f59112 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/MachineFunctionPass.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/MachineFunctionPass.cpp
@@ -26,6 +26,7 @@
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/PrintPasses.h"
using namespace llvm;
using namespace ore;
@@ -70,6 +71,17 @@ bool MachineFunctionPass::runOnFunction(Function &F) {
if (ShouldEmitSizeRemarks)
CountBefore = MF.getInstructionCount();
+ // For --print-changed, if the function name is a candidate, save the
+ // serialized MF to be compared later.
+ // TODO Implement --filter-passes.
+ SmallString<0> BeforeStr, AfterStr;
+ bool ShouldPrintChanged = PrintChanged != ChangePrinter::None &&
+ isFunctionInPrintList(MF.getName());
+ if (ShouldPrintChanged) {
+ raw_svector_ostream OS(BeforeStr);
+ MF.print(OS);
+ }
+
bool RV = runOnMachineFunction(MF);
if (ShouldEmitSizeRemarks) {
@@ -97,6 +109,23 @@ bool MachineFunctionPass::runOnFunction(Function &F) {
MFProps.set(SetProperties);
MFProps.reset(ClearedProperties);
+
+ // For --print-changed, print if the serialized MF has changed. Modes other
+ // than quiet/verbose are unimplemented and treated the same as 'quiet'.
+ if (ShouldPrintChanged) {
+ raw_svector_ostream OS(AfterStr);
+ MF.print(OS);
+ if (BeforeStr != AfterStr) {
+ StringRef Arg;
+ if (const PassInfo *PI = Pass::lookupPassInfo(getPassID()))
+ Arg = PI->getPassArgument();
+ errs() << ("*** IR Dump After " + getPassName() + " (" + Arg + ") on " +
+ MF.getName() + " ***\n" + AfterStr);
+ } else if (PrintChanged == ChangePrinter::Verbose) {
+ errs() << ("*** IR Dump After " + getPassName() + " on " + MF.getName() +
+ " omitted because no change ***\n");
+ }
+ }
return RV;
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp b/contrib/llvm-project/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
index 1115c2a27956..87e2f9f20021 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
@@ -18,6 +18,7 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/InitializePasses.h"
@@ -69,6 +70,8 @@ static CallInst::TailCallKind getOverridingTailCallKind(const Function &F) {
static bool lowerObjCCall(Function &F, const char *NewFn,
bool setNonLazyBind = false) {
+ assert(IntrinsicInst::mayLowerToFunctionCall(F.getIntrinsicID()) &&
+ "Pre-ISel intrinsics do lower into regular function calls");
if (F.use_empty())
return false;
@@ -107,7 +110,9 @@ static bool lowerObjCCall(Function &F, const char *NewFn,
IRBuilder<> Builder(CI->getParent(), CI->getIterator());
SmallVector<Value *, 8> Args(CI->args());
- CallInst *NewCI = Builder.CreateCall(FCache, Args);
+ SmallVector<llvm::OperandBundleDef, 1> BundleList;
+ CI->getOperandBundlesAsDefs(BundleList);
+ CallInst *NewCI = Builder.CreateCall(FCache, Args, BundleList);
NewCI->setName(CI->getName());
// Try to set the most appropriate TailCallKind based on both the current
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/ProcessImplicitDefs.cpp b/contrib/llvm-project/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
index 7327f9e52efc..54bb4a31ef49 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
@@ -47,7 +47,7 @@ public:
bool runOnMachineFunction(MachineFunction &MF) override;
- virtual MachineFunctionProperties getRequiredProperties() const override {
+ MachineFunctionProperties getRequiredProperties() const override {
return MachineFunctionProperties().set(
MachineFunctionProperties::Property::IsSSA);
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.cpp b/contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.cpp
index 4a54d7ebf8a9..9c6cb7c3a4e2 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.cpp
@@ -135,6 +135,12 @@ static cl::opt<bool> GreedyRegClassPriorityTrumpsGlobalness(
"more important then whether the range is global"),
cl::Hidden);
+static cl::opt<bool> GreedyReverseLocalAssignment(
+ "greedy-reverse-local-assignment",
+ cl::desc("Reverse allocation order of local live ranges, such that "
+ "shorter local live ranges will tend to be allocated first"),
+ cl::Hidden);
+
static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
createGreedyRegisterAllocator);
@@ -297,11 +303,10 @@ void RAGreedy::enqueue(PQueue &CurQueue, const LiveInterval *LI) {
} else {
// Giant live ranges fall back to the global assignment heuristic, which
// prevents excessive spilling in pathological cases.
- bool ReverseLocal = TRI->reverseLocalAssignment();
const TargetRegisterClass &RC = *MRI->getRegClass(Reg);
- bool ForceGlobal =
- !ReverseLocal && (Size / SlotIndex::InstrDist) >
- (2 * RegClassInfo.getNumAllocatableRegs(&RC));
+ bool ForceGlobal = !ReverseLocalAssignment &&
+ (Size / SlotIndex::InstrDist) >
+ (2 * RegClassInfo.getNumAllocatableRegs(&RC));
unsigned GlobalBit = 0;
if (Stage == RS_Assign && !ForceGlobal && !LI->empty() &&
@@ -309,7 +314,7 @@ void RAGreedy::enqueue(PQueue &CurQueue, const LiveInterval *LI) {
// Allocate original local ranges in linear instruction order. Since they
// are singly defined, this produces optimal coloring in the absence of
// global interference and other constraints.
- if (!ReverseLocal)
+ if (!ReverseLocalAssignment)
Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex());
else {
// Allocating bottom up may allow many short LRGs to be assigned first
@@ -2528,6 +2533,10 @@ bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
? GreedyRegClassPriorityTrumpsGlobalness
: TRI->regClassPriorityTrumpsGlobalness(*MF);
+ ReverseLocalAssignment = GreedyReverseLocalAssignment.getNumOccurrences()
+ ? GreedyReverseLocalAssignment
+ : TRI->reverseLocalAssignment();
+
ExtraInfo.emplace();
EvictAdvisor =
getAnalysis<RegAllocEvictionAdvisorAnalysis>().getAdvisor(*MF, *this);
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.h b/contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.h
index 316b12d0213b..483f59ed8e8e 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.h
+++ b/contrib/llvm-project/llvm/lib/CodeGen/RegAllocGreedy.h
@@ -270,6 +270,8 @@ private:
/// machine function.
bool RegClassPriorityTrumpsGlobalness;
+ bool ReverseLocalAssignment;
+
public:
RAGreedy(const RegClassFilterFunc F = allocateAllRegClasses);
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index edb0756e8c3b..654879115ff9 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -4877,9 +4877,16 @@ SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) {
if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS))
return Res;
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
+ // canonicalize constant to RHS (vector doesn't have to splat)
+ if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
+ !DAG.isConstantIntBuildVectorOrConstantInt(N1))
+ return DAG.getNode(ISD::SMUL_LOHI, DL, N->getVTList(), N1, N0);
+
// If the type is twice as wide is legal, transform the mulhu to a wider
// multiply plus a shift.
if (VT.isSimple() && !VT.isVector()) {
@@ -4887,8 +4894,8 @@ SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) {
unsigned SimpleSize = Simple.getSizeInBits();
EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
- SDValue Lo = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(0));
- SDValue Hi = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(1));
+ SDValue Lo = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N0);
+ SDValue Hi = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N1);
Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
// Compute the high part as N1.
Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
@@ -4908,19 +4915,26 @@ SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) {
if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU))
return Res;
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
+ // canonicalize constant to RHS (vector doesn't have to splat)
+ if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
+ !DAG.isConstantIntBuildVectorOrConstantInt(N1))
+ return DAG.getNode(ISD::UMUL_LOHI, DL, N->getVTList(), N1, N0);
+
// (umul_lohi N0, 0) -> (0, 0)
- if (isNullConstant(N->getOperand(1))) {
+ if (isNullConstant(N1)) {
SDValue Zero = DAG.getConstant(0, DL, VT);
return CombineTo(N, Zero, Zero);
}
// (umul_lohi N0, 1) -> (N0, 0)
- if (isOneConstant(N->getOperand(1))) {
+ if (isOneConstant(N1)) {
SDValue Zero = DAG.getConstant(0, DL, VT);
- return CombineTo(N, N->getOperand(0), Zero);
+ return CombineTo(N, N0, Zero);
}
// If the type is twice as wide is legal, transform the mulhu to a wider
@@ -4930,8 +4944,8 @@ SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) {
unsigned SimpleSize = Simple.getSizeInBits();
EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2);
if (TLI.isOperationLegal(ISD::MUL, NewVT)) {
- SDValue Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(0));
- SDValue Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(1));
+ SDValue Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N0);
+ SDValue Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N1);
Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
// Compute the high part as N1.
Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
@@ -7247,6 +7261,7 @@ static SDValue extractShiftForRotate(SelectionDAG &DAG, SDValue OppShift,
// Otherwise if matching a general funnel shift, it should be clear.
static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned EltSize,
SelectionDAG &DAG, bool IsRotate) {
+ const auto &TLI = DAG.getTargetLoweringInfo();
// If EltSize is a power of 2 then:
//
// (a) (Pos == 0 ? 0 : EltSize - Pos) == (EltSize - Pos) & (EltSize - 1)
@@ -7278,19 +7293,20 @@ static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned EltSize,
// always invokes undefined behavior for 32-bit X.
//
// Below, Mask == EltSize - 1 when using [A] and is all-ones otherwise.
+ // This allows us to peek through any operations that only affect Mask's
+ // un-demanded bits.
//
- // NOTE: We can only do this when matching an AND and not a general
- // funnel shift.
+ // NOTE: We can only do this when matching operations which won't modify the
+ // least Log2(EltSize) significant bits and not a general funnel shift.
unsigned MaskLoBits = 0;
- if (IsRotate && Neg.getOpcode() == ISD::AND && isPowerOf2_64(EltSize)) {
- if (ConstantSDNode *NegC = isConstOrConstSplat(Neg.getOperand(1))) {
- KnownBits Known = DAG.computeKnownBits(Neg.getOperand(0));
- unsigned Bits = Log2_64(EltSize);
- if (NegC->getAPIntValue().getActiveBits() <= Bits &&
- ((NegC->getAPIntValue() | Known.Zero).countTrailingOnes() >= Bits)) {
- Neg = Neg.getOperand(0);
- MaskLoBits = Bits;
- }
+ if (IsRotate && isPowerOf2_64(EltSize)) {
+ unsigned Bits = Log2_64(EltSize);
+ APInt DemandedBits =
+ APInt::getLowBitsSet(Neg.getScalarValueSizeInBits(), Bits);
+ if (SDValue Inner =
+ TLI.SimplifyMultipleUseDemandedBits(Neg, DemandedBits, DAG)) {
+ Neg = Inner;
+ MaskLoBits = Bits;
}
}
@@ -7302,15 +7318,15 @@ static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned EltSize,
return false;
SDValue NegOp1 = Neg.getOperand(1);
- // On the RHS of [A], if Pos is Pos' & (EltSize - 1), just replace Pos with
- // Pos'. The truncation is redundant for the purpose of the equality.
- if (MaskLoBits && Pos.getOpcode() == ISD::AND) {
- if (ConstantSDNode *PosC = isConstOrConstSplat(Pos.getOperand(1))) {
- KnownBits Known = DAG.computeKnownBits(Pos.getOperand(0));
- if (PosC->getAPIntValue().getActiveBits() <= MaskLoBits &&
- ((PosC->getAPIntValue() | Known.Zero).countTrailingOnes() >=
- MaskLoBits))
- Pos = Pos.getOperand(0);
+ // On the RHS of [A], if Pos is the result of operation on Pos' that won't
+ // affect Mask's demanded bits, just replace Pos with Pos'. These operations
+ // are redundant for the purpose of the equality.
+ if (MaskLoBits) {
+ APInt DemandedBits =
+ APInt::getLowBitsSet(Pos.getScalarValueSizeInBits(), MaskLoBits);
+ if (SDValue Inner =
+ TLI.SimplifyMultipleUseDemandedBits(Pos, DemandedBits, DAG)) {
+ Pos = Inner;
}
}
@@ -14988,7 +15004,7 @@ SDValue DAGCombiner::visitFMA(SDNode *N) {
// FMA nodes have flags that propagate to the created nodes.
SelectionDAG::FlagInserter FlagsInserter(DAG, N);
- bool UnsafeFPMath =
+ bool CanReassociate =
Options.UnsafeFPMath || N->getFlags().hasAllowReassociation();
// Constant fold FMA.
@@ -15012,7 +15028,8 @@ SDValue DAGCombiner::visitFMA(SDNode *N) {
CostN1 == TargetLowering::NegatibleCost::Cheaper))
return DAG.getNode(ISD::FMA, DL, VT, NegN0, NegN1, N2);
- if (UnsafeFPMath) {
+ // FIXME: use fast math flags instead of Options.UnsafeFPMath
+ if (Options.UnsafeFPMath) {
if (N0CFP && N0CFP->isZero())
return N2;
if (N1CFP && N1CFP->isZero())
@@ -15029,7 +15046,7 @@ SDValue DAGCombiner::visitFMA(SDNode *N) {
!DAG.isConstantFPBuildVectorOrConstantFP(N1))
return DAG.getNode(ISD::FMA, SDLoc(N), VT, N1, N0, N2);
- if (UnsafeFPMath) {
+ if (CanReassociate) {
// (fma x, c1, (fmul x, c2)) -> (fmul x, c1+c2)
if (N2.getOpcode() == ISD::FMUL && N0 == N2.getOperand(0) &&
DAG.isConstantFPBuildVectorOrConstantFP(N1) &&
@@ -15070,7 +15087,7 @@ SDValue DAGCombiner::visitFMA(SDNode *N) {
}
}
- if (UnsafeFPMath) {
+ if (CanReassociate) {
// (fma x, c, x) -> (fmul x, (c+1))
if (N1CFP && N0 == N2) {
return DAG.getNode(
@@ -19697,8 +19714,11 @@ static SDValue scalarizeExtractedBinop(SDNode *ExtElt, SelectionDAG &DAG,
// extract.
SDValue Op0 = Vec.getOperand(0);
SDValue Op1 = Vec.getOperand(1);
+ APInt SplatVal;
if (isAnyConstantBuildVector(Op0, true) ||
- isAnyConstantBuildVector(Op1, true)) {
+ ISD::isConstantSplatVector(Op0.getNode(), SplatVal) ||
+ isAnyConstantBuildVector(Op1, true) ||
+ ISD::isConstantSplatVector(Op1.getNode(), SplatVal)) {
// extractelt (binop X, C), IndexC --> binop (extractelt X, IndexC), C'
// extractelt (binop C, X), IndexC --> binop C', (extractelt X, IndexC)
SDLoc DL(ExtElt);
@@ -19775,6 +19795,9 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
// converts.
}
+ if (SDValue BO = scalarizeExtractedBinop(N, DAG, LegalOperations))
+ return BO;
+
if (VecVT.isScalableVector())
return SDValue();
@@ -19820,9 +19843,6 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
}
}
- if (SDValue BO = scalarizeExtractedBinop(N, DAG, LegalOperations))
- return BO;
-
// Transform: (EXTRACT_VECTOR_ELT( VECTOR_SHUFFLE )) -> EXTRACT_VECTOR_ELT.
// We only perform this optimization before the op legalization phase because
// we may introduce new vector instructions which are not backed by TD
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 441437351852..195c0e6a836f 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -2529,8 +2529,7 @@ bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
/// DemandedElts. We use this predicate to simplify operations downstream.
bool SelectionDAG::MaskedVectorIsZero(SDValue V, const APInt &DemandedElts,
unsigned Depth /* = 0 */) const {
- APInt Mask = APInt::getAllOnes(V.getScalarValueSizeInBits());
- return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero);
+ return computeKnownBits(V, DemandedElts, Depth).isZero();
}
/// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'.
@@ -9089,6 +9088,15 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
}
break;
}
+ case ISD::SMUL_LOHI:
+ case ISD::UMUL_LOHI: {
+ assert(VTList.NumVTs == 2 && Ops.size() == 2 && "Invalid mul lo/hi op!");
+ assert(VTList.VTs[0].isInteger() && VTList.VTs[0] == VTList.VTs[1] &&
+ VTList.VTs[0] == Ops[0].getValueType() &&
+ VTList.VTs[0] == Ops[1].getValueType() &&
+ "Binary operator types must match!");
+ break;
+ }
case ISD::STRICT_FP_EXTEND:
assert(VTList.NumVTs == 2 && Ops.size() == 2 &&
"Invalid STRICT_FP_EXTEND!");
@@ -11682,6 +11690,35 @@ bool BuildVectorSDNode::isConstant() const {
return true;
}
+Optional<std::pair<APInt, APInt>>
+BuildVectorSDNode::isConstantSequence() const {
+ unsigned NumOps = getNumOperands();
+ if (NumOps < 2)
+ return None;
+
+ if (!isa<ConstantSDNode>(getOperand(0)) ||
+ !isa<ConstantSDNode>(getOperand(1)))
+ return None;
+
+ unsigned EltSize = getValueType(0).getScalarSizeInBits();
+ APInt Start = getConstantOperandAPInt(0).trunc(EltSize);
+ APInt Stride = getConstantOperandAPInt(1).trunc(EltSize) - Start;
+
+ if (Stride.isZero())
+ return None;
+
+ for (unsigned i = 2; i < NumOps; ++i) {
+ if (!isa<ConstantSDNode>(getOperand(i)))
+ return None;
+
+ APInt Val = getConstantOperandAPInt(i).trunc(EltSize);
+ if (Val != (Start + (Stride * i)))
+ return None;
+ }
+
+ return std::make_pair(Start, Stride);
+}
+
bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
// Find the first non-undef value in the shuffle mask.
unsigned i, e;
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index 4a3ab00614b3..d1915fd4e7ae 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -198,7 +198,7 @@ public:
SDAGSwitchLowering(SelectionDAGBuilder *sdb, FunctionLoweringInfo &funcinfo)
: SwitchCG::SwitchLowering(funcinfo), SDB(sdb) {}
- virtual void addSuccessorWithProb(
+ void addSuccessorWithProb(
MachineBasicBlock *Src, MachineBasicBlock *Dst,
BranchProbability Prob = BranchProbability::getUnknown()) override {
SDB->addSuccessorWithProb(Src, Dst, Prob);
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index cd4f0ae42bcd..6205e74837c0 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -654,6 +654,14 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
SDValue TargetLowering::SimplifyMultipleUseDemandedBits(
SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
SelectionDAG &DAG, unsigned Depth) const {
+ EVT VT = Op.getValueType();
+
+ // Pretend we don't know anything about scalable vectors for now.
+ // TODO: We can probably do more work on simplifying the operations for
+ // scalable vectors, but for now we just bail out.
+ if (VT.isScalableVector())
+ return SDValue();
+
// Limit search depth.
if (Depth >= SelectionDAG::MaxRecursionDepth)
return SDValue();
@@ -664,7 +672,7 @@ SDValue TargetLowering::SimplifyMultipleUseDemandedBits(
// Not demanding any bits/elts from Op.
if (DemandedBits == 0 || DemandedElts == 0)
- return DAG.getUNDEF(Op.getValueType());
+ return DAG.getUNDEF(VT);
bool IsLE = DAG.getDataLayout().isLittleEndian();
unsigned NumElts = DemandedElts.getBitWidth();
@@ -894,6 +902,13 @@ SDValue TargetLowering::SimplifyMultipleUseDemandedBits(
SDValue Op, const APInt &DemandedBits, SelectionDAG &DAG,
unsigned Depth) const {
EVT VT = Op.getValueType();
+
+ // Pretend we don't know anything about scalable vectors for now.
+ // TODO: We can probably do more work on simplifying the operations for
+ // scalable vectors, but for now we just bail out.
+ if (VT.isScalableVector())
+ return SDValue();
+
APInt DemandedElts = VT.isVector()
? APInt::getAllOnes(VT.getVectorNumElements())
: APInt(1, 1);
diff --git a/contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinker.cpp b/contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinker.cpp
index 62b7f629f403..3e14edb5f730 100644
--- a/contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinker.cpp
+++ b/contrib/llvm-project/llvm/lib/DWARFLinker/DWARFLinker.cpp
@@ -2343,7 +2343,7 @@ void DWARFLinker::addObjectFile(DWARFFile &File) {
updateAccelKind(*ObjectContexts.back().File.Dwarf);
}
-bool DWARFLinker::link() {
+Error DWARFLinker::link() {
assert(Options.NoOutput || TheDwarfEmitter);
// A unique ID that identifies each compile unit.
@@ -2410,6 +2410,55 @@ bool DWARFLinker::link() {
if (!OptContext.File.Dwarf)
continue;
+ // Check whether type units are presented.
+ if (!OptContext.File.Dwarf->types_section_units().empty()) {
+ reportWarning("type units are not currently supported: file will "
+ "be skipped",
+ OptContext.File);
+ OptContext.Skip = true;
+ continue;
+ }
+
+ // Check for unsupported sections. Following sections can be referenced
+ // from .debug_info section. Current DWARFLinker implementation does not
+ // support or update references to these tables. Thus we report warning
+ // and skip corresponding object file.
+ if (!OptContext.File.Dwarf->getDWARFObj()
+ .getRnglistsSection()
+ .Data.empty()) {
+ reportWarning("'.debug_rnglists' is not currently supported: file "
+ "will be skipped",
+ OptContext.File);
+ OptContext.Skip = true;
+ continue;
+ }
+
+ if (!OptContext.File.Dwarf->getDWARFObj()
+ .getLoclistsSection()
+ .Data.empty()) {
+ reportWarning("'.debug_loclists' is not currently supported: file "
+ "will be skipped",
+ OptContext.File);
+ OptContext.Skip = true;
+ continue;
+ }
+
+ if (!OptContext.File.Dwarf->getDWARFObj().getMacroSection().Data.empty()) {
+ reportWarning("'.debug_macro' is not currently supported: file "
+ "will be skipped",
+ OptContext.File);
+ OptContext.Skip = true;
+ continue;
+ }
+
+ if (OptContext.File.Dwarf->getDWARFObj().getMacinfoSection().size() > 1) {
+ reportWarning("'.debug_macinfo' is not currently supported: file "
+ "will be skipped",
+ OptContext.File);
+ OptContext.Skip = true;
+ continue;
+ }
+
// In a first phase, just read in the debug info and load all clang modules.
OptContext.CompileUnits.reserve(
OptContext.File.Dwarf->getNumCompileUnits());
@@ -2660,7 +2709,7 @@ bool DWARFLinker::link() {
"---------------\n\n";
}
- return true;
+ return Error::success();
}
bool DWARFLinker::verify(const DWARFFile &File) {
diff --git a/contrib/llvm-project/llvm/lib/DWP/DWP.cpp b/contrib/llvm-project/llvm/lib/DWP/DWP.cpp
index 44e39c019e0c..346f4dfd290d 100644
--- a/contrib/llvm-project/llvm/lib/DWP/DWP.cpp
+++ b/contrib/llvm-project/llvm/lib/DWP/DWP.cpp
@@ -18,6 +18,7 @@
#include "llvm/Object/Decompressor.h"
#include "llvm/Object/ELFObjectFile.h"
#include "llvm/Support/MemoryBuffer.h"
+#include <limits>
using namespace llvm;
using namespace llvm::object;
@@ -654,6 +655,12 @@ Error write(MCStreamer &Out, ArrayRef<std::string> Inputs) {
IndexVersion)];
C.Offset = InfoSectionOffset;
C.Length = Header.Length + 4;
+
+ if (std::numeric_limits<uint32_t>::max() - InfoSectionOffset <
+ C.Length)
+ return make_error<DWPError>(
+ "debug information section offset is greater than 4GB");
+
UnitOffset += C.Length;
if (Header.Version < 5 ||
Header.UnitType == dwarf::DW_UT_split_compile) {
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp
index dc07eaeaf615..3a6162db75c4 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp
@@ -18,13 +18,19 @@ static const char *CommonSectionName = "__common";
namespace llvm {
namespace jitlink {
+static Triple createTripleWithCOFFFormat(Triple T) {
+ T.setObjectFormat(Triple::COFF);
+ return T;
+}
+
COFFLinkGraphBuilder::COFFLinkGraphBuilder(
const object::COFFObjectFile &Obj, Triple TT,
LinkGraph::GetEdgeKindNameFunction GetEdgeKindName)
: Obj(Obj),
- G(std::make_unique<LinkGraph>(
- Obj.getFileName().str(), Triple(std::move(TT)), getPointerSize(Obj),
- getEndianness(Obj), std::move(GetEdgeKindName))) {
+ G(std::make_unique<LinkGraph>(Obj.getFileName().str(),
+ createTripleWithCOFFFormat(TT),
+ getPointerSize(Obj), getEndianness(Obj),
+ std::move(GetEdgeKindName))) {
LLVM_DEBUG({
dbgs() << "Created COFFLinkGraphBuilder for \"" << Obj.getFileName()
<< "\"\n";
@@ -128,16 +134,6 @@ Error COFFLinkGraphBuilder::graphifySections() {
if (Expected<StringRef> SecNameOrErr = Obj.getSectionName(*Sec))
SectionName = *SecNameOrErr;
- bool IsDiscardable =
- (*Sec)->Characteristics &
- (COFF::IMAGE_SCN_MEM_DISCARDABLE | COFF::IMAGE_SCN_LNK_INFO);
- if (IsDiscardable) {
- LLVM_DEBUG(dbgs() << " " << SecIndex << ": \"" << SectionName
- << "\" is discardable: "
- "No graph section will be created.\n");
- continue;
- }
-
// FIXME: Skip debug info sections
LLVM_DEBUG({
@@ -145,6 +141,8 @@ Error COFFLinkGraphBuilder::graphifySections() {
<< "Creating section for \"" << SectionName << "\"\n";
});
+ // FIXME: Revisit crash when dropping IMAGE_SCN_MEM_DISCARDABLE sections
+
// Get the section's memory protection flags.
MemProt Prot = MemProt::None;
if ((*Sec)->Characteristics & COFF::IMAGE_SCN_MEM_EXECUTE)
@@ -190,6 +188,7 @@ Error COFFLinkGraphBuilder::graphifySymbols() {
LLVM_DEBUG(dbgs() << " Creating graph symbols...\n");
SymbolSets.resize(Obj.getNumberOfSections() + 1);
+ PendingComdatExports.resize(Obj.getNumberOfSections() + 1);
GraphSymbols.resize(Obj.getNumberOfSymbols());
for (COFFSymbolIndex SymIndex = 0;
@@ -232,18 +231,16 @@ Error COFFLinkGraphBuilder::graphifySymbols() {
<< getCOFFSectionName(SectionIndex, Sec, *Sym)
<< " (index: " << SectionIndex << ") \n";
});
- GSym =
- &G->addExternalSymbol(SymbolName, Sym->getValue(), Linkage::Strong);
+ if (!ExternalSymbols.count(SymbolName))
+ ExternalSymbols[SymbolName] =
+ &G->addExternalSymbol(SymbolName, Sym->getValue(), Linkage::Strong);
+ GSym = ExternalSymbols[SymbolName];
} else if (Sym->isWeakExternal()) {
- COFFSymbolIndex TagIndex =
- Sym->getAux<object::coff_aux_weak_external>()->TagIndex;
- assert(Sym->getAux<object::coff_aux_weak_external>()->Characteristics !=
- COFF::IMAGE_WEAK_EXTERN_SEARCH_NOLIBRARY &&
- "IMAGE_WEAK_EXTERN_SEARCH_NOLIBRARY is not supported.");
- assert(Sym->getAux<object::coff_aux_weak_external>()->Characteristics !=
- COFF::IMAGE_WEAK_EXTERN_SEARCH_LIBRARY &&
- "IMAGE_WEAK_EXTERN_SEARCH_LIBRARY is not supported.");
- WeakAliasRequests.push_back({SymIndex, TagIndex, SymbolName});
+ auto *WeakExternal = Sym->getAux<object::coff_aux_weak_external>();
+ COFFSymbolIndex TagIndex = WeakExternal->TagIndex;
+ uint32_t Characteristics = WeakExternal->Characteristics;
+ WeakExternalRequests.push_back(
+ {SymIndex, TagIndex, Characteristics, SymbolName});
} else {
Expected<jitlink::Symbol *> NewGSym =
createDefinedSymbol(SymIndex, SymbolName, *Sym, Sec);
@@ -279,35 +276,41 @@ Error COFFLinkGraphBuilder::graphifySymbols() {
Error COFFLinkGraphBuilder::flushWeakAliasRequests() {
// Export the weak external symbols and alias it
- for (auto &WeakAlias : WeakAliasRequests) {
- if (auto *Target = getGraphSymbol(WeakAlias.Target)) {
+ for (auto &WeakExternal : WeakExternalRequests) {
+ if (auto *Target = getGraphSymbol(WeakExternal.Target)) {
Expected<object::COFFSymbolRef> AliasSymbol =
- Obj.getSymbol(WeakAlias.Alias);
+ Obj.getSymbol(WeakExternal.Alias);
if (!AliasSymbol)
return AliasSymbol.takeError();
+ // FIXME: IMAGE_WEAK_EXTERN_SEARCH_NOLIBRARY and
+ // IMAGE_WEAK_EXTERN_SEARCH_LIBRARY are handled in the same way.
+ Scope S =
+ WeakExternal.Characteristics == COFF::IMAGE_WEAK_EXTERN_SEARCH_ALIAS
+ ? Scope::Default
+ : Scope::Local;
+
// FIXME: Support this when there's a way to handle this.
if (!Target->isDefined())
return make_error<JITLinkError>("Weak external symbol with external "
"symbol as alternative not supported.");
jitlink::Symbol *NewSymbol = &G->addDefinedSymbol(
- Target->getBlock(), Target->getOffset(), WeakAlias.SymbolName,
- Target->getSize(), Linkage::Weak, Scope::Default,
- Target->isCallable(), false);
- setGraphSymbol(AliasSymbol->getSectionNumber(), WeakAlias.Alias,
+ Target->getBlock(), Target->getOffset(), WeakExternal.SymbolName,
+ Target->getSize(), Linkage::Weak, S, Target->isCallable(), false);
+ setGraphSymbol(AliasSymbol->getSectionNumber(), WeakExternal.Alias,
*NewSymbol);
LLVM_DEBUG({
- dbgs() << " " << WeakAlias.Alias
+ dbgs() << " " << WeakExternal.Alias
<< ": Creating weak external symbol for COFF symbol \""
- << WeakAlias.SymbolName << "\" in section "
+ << WeakExternal.SymbolName << "\" in section "
<< AliasSymbol->getSectionNumber() << "\n";
dbgs() << " " << *NewSymbol << "\n";
});
} else
return make_error<JITLinkError>("Weak symbol alias requested but actual "
"symbol not found for symbol " +
- formatv("{0:d}", WeakAlias.Alias));
+ formatv("{0:d}", WeakExternal.Alias));
}
return Error::success();
}
@@ -324,6 +327,8 @@ Error COFFLinkGraphBuilder::calculateImplicitSizeOfSymbols() {
SecIndex <= static_cast<COFFSectionIndex>(Obj.getNumberOfSections());
SecIndex++) {
auto &SymbolSet = SymbolSets[SecIndex];
+ if (SymbolSet.empty())
+ continue;
jitlink::Block *B = getGraphBlock(SecIndex);
orc::ExecutorAddrDiff LastOffset = B->getSize();
orc::ExecutorAddrDiff LastDifferentOffset = B->getSize();
@@ -394,25 +399,35 @@ Expected<Symbol *> COFFLinkGraphBuilder::createDefinedSymbol(
formatv("{0:d}", SymIndex));
Block *B = getGraphBlock(Symbol.getSectionNumber());
+ if (!B) {
+ LLVM_DEBUG({
+ dbgs() << " " << SymIndex
+ << ": Skipping graph symbol since section was not created for "
+ "COFF symbol \""
+ << SymbolName << "\" in section " << Symbol.getSectionNumber()
+ << "\n";
+ });
+ return nullptr;
+ }
+
if (Symbol.isExternal()) {
// This is not a comdat sequence, export the symbol as it is
- if (!isComdatSection(Section))
+ if (!isComdatSection(Section)) {
+
return &G->addDefinedSymbol(
*B, Symbol.getValue(), SymbolName, 0, Linkage::Strong, Scope::Default,
Symbol.getComplexType() == COFF::IMAGE_SYM_DTYPE_FUNCTION, false);
- else {
- if (!PendingComdatExport)
+ } else {
+ if (!PendingComdatExports[Symbol.getSectionNumber()])
return make_error<JITLinkError>("No pending COMDAT export for symbol " +
formatv("{0:d}", SymIndex));
- if (PendingComdatExport->SectionIndex != Symbol.getSectionNumber())
- return make_error<JITLinkError>(
- "COMDAT export section number mismatch for symbol " +
- formatv("{0:d}", SymIndex));
+
return exportCOMDATSymbol(SymIndex, SymbolName, Symbol);
}
}
- if (Symbol.getStorageClass() == COFF::IMAGE_SYM_CLASS_STATIC) {
+ if (Symbol.getStorageClass() == COFF::IMAGE_SYM_CLASS_STATIC ||
+ Symbol.getStorageClass() == COFF::IMAGE_SYM_CLASS_LABEL) {
const object::coff_aux_section_definition *Definition =
Symbol.getSectionDefinition();
if (!Definition || !isComdatSection(Section)) {
@@ -422,12 +437,14 @@ Expected<Symbol *> COFFLinkGraphBuilder::createDefinedSymbol(
Symbol.getComplexType() == COFF::IMAGE_SYM_DTYPE_FUNCTION, false);
}
if (Definition->Selection == COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE) {
- // FIXME: don't dead strip this when parent section is alive
- return &G->addDefinedSymbol(
+ auto Target = Definition->getNumber(Symbol.isBigObj());
+ auto GSym = &G->addDefinedSymbol(
*B, Symbol.getValue(), SymbolName, 0, Linkage::Strong, Scope::Local,
Symbol.getComplexType() == COFF::IMAGE_SYM_DTYPE_FUNCTION, false);
+ getGraphBlock(Target)->addEdge(Edge::KeepAlive, 0, *GSym, 0);
+ return GSym;
}
- if (PendingComdatExport)
+ if (PendingComdatExports[Symbol.getSectionNumber()])
return make_error<JITLinkError>(
"COMDAT export request already exists before symbol " +
formatv("{0:d}", SymIndex));
@@ -474,10 +491,16 @@ Expected<Symbol *> COFFLinkGraphBuilder::createCOMDATExportRequest(
break;
}
case COFF::IMAGE_COMDAT_SELECT_LARGEST: {
- // FIXME: Support IMAGE_COMDAT_SELECT_LARGEST when LinkGraph is able to
- // handle this.
- return make_error<JITLinkError>(
- "IMAGE_COMDAT_SELECT_LARGEST is not supported.");
+ // FIXME: Support IMAGE_COMDAT_SELECT_LARGEST properly when LinkGraph is
+ // able to handle this.
+ LLVM_DEBUG({
+ dbgs() << " " << SymIndex
+ << ": Partially supported IMAGE_COMDAT_SELECT_LARGEST was used"
+ " in section "
+ << Symbol.getSectionNumber() << "\n";
+ });
+ L = Linkage::Weak;
+ break;
}
case COFF::IMAGE_COMDAT_SELECT_NEWEST: {
// Even link.exe doesn't support this selection properly.
@@ -489,7 +512,7 @@ Expected<Symbol *> COFFLinkGraphBuilder::createCOMDATExportRequest(
formatv("{0:d}", Definition->Selection));
}
}
- PendingComdatExport = {SymIndex, Symbol.getSectionNumber(), L};
+ PendingComdatExports[Symbol.getSectionNumber()] = {SymIndex, L};
return &G->addAnonymousSymbol(*B, Symbol.getValue(), Definition->Length,
false, false);
}
@@ -499,6 +522,7 @@ Expected<Symbol *>
COFFLinkGraphBuilder::exportCOMDATSymbol(COFFSymbolIndex SymIndex,
StringRef SymbolName,
object::COFFSymbolRef Symbol) {
+ auto &PendingComdatExport = PendingComdatExports[Symbol.getSectionNumber()];
COFFSymbolIndex TargetIndex = PendingComdatExport->SymbolIndex;
Linkage L = PendingComdatExport->Linkage;
jitlink::Symbol *Target = getGraphSymbol(TargetIndex);
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.h
index 4dc1b14dc4a2..f925f6d7aeef 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.h
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.h
@@ -111,19 +111,19 @@ private:
// COMDAT sequence.
struct ComdatExportRequest {
COFFSymbolIndex SymbolIndex;
- COFFSectionIndex SectionIndex;
jitlink::Linkage Linkage;
};
- Optional<ComdatExportRequest> PendingComdatExport;
+ std::vector<Optional<ComdatExportRequest>> PendingComdatExports;
// This represents a pending request to create a weak external symbol with a
// name.
- struct WeakAliasRequest {
+ struct WeakExternalRequest {
COFFSymbolIndex Alias;
COFFSymbolIndex Target;
+ uint32_t Characteristics;
StringRef SymbolName;
};
- std::vector<WeakAliasRequest> WeakAliasRequests;
+ std::vector<WeakExternalRequest> WeakExternalRequests;
// Per COFF section jitlink symbol set sorted by offset.
// Used for calculating implicit size of defined symbols.
@@ -162,6 +162,8 @@ private:
Section *CommonSection = nullptr;
std::vector<Block *> GraphBlocks;
std::vector<Symbol *> GraphSymbols;
+
+ DenseMap<StringRef, Symbol *> ExternalSymbols;
};
template <typename RelocHandlerFunction>
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFF_x86_64.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFF_x86_64.cpp
index 3d36ad1ed767..e2040dc95acc 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFF_x86_64.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/COFF_x86_64.cpp
@@ -12,8 +12,8 @@
#include "llvm/ExecutionEngine/JITLink/COFF_x86_64.h"
#include "COFFLinkGraphBuilder.h"
-#include "EHFrameSupportImpl.h"
#include "JITLinkGeneric.h"
+#include "SEHFrameSupport.h"
#include "llvm/BinaryFormat/COFF.h"
#include "llvm/ExecutionEngine/JITLink/x86_64.h"
#include "llvm/Object/COFF.h"
@@ -26,6 +26,11 @@ using namespace llvm::jitlink;
namespace {
+enum EdgeKind_coff_x86_64 : Edge::Kind {
+ PCRel32 = x86_64::FirstPlatformRelocation,
+ Pointer32NB,
+};
+
class COFFJITLinker_x86_64 : public JITLinker<COFFJITLinker_x86_64> {
friend class JITLinker<COFFJITLinker_x86_64>;
@@ -43,27 +48,7 @@ private:
class COFFLinkGraphBuilder_x86_64 : public COFFLinkGraphBuilder {
private:
- uint64_t ImageBase = 0;
- enum COFFX86RelocationKind {
- COFFAddr32NB,
- COFFRel32,
- };
-
- static Expected<COFFX86RelocationKind>
- getRelocationKind(const uint32_t Type) {
- switch (Type) {
- case COFF::RelocationTypeAMD64::IMAGE_REL_AMD64_ADDR32NB:
- return COFFAddr32NB;
- case COFF::RelocationTypeAMD64::IMAGE_REL_AMD64_REL32:
- return COFFRel32;
- }
-
- return make_error<JITLinkError>("Unsupported x86_64 relocation:" +
- formatv("{0:d}", Type));
- }
-
Error addRelocations() override {
-
LLVM_DEBUG(dbgs() << "Processing relocations:\n");
for (const auto &RelSect : sections())
@@ -74,21 +59,9 @@ private:
return Error::success();
}
- uint64_t getImageBase() {
- if (!ImageBase) {
- ImageBase = std::numeric_limits<uint64_t>::max();
- for (const auto &Block : getGraph().blocks()) {
- if (Block->getAddress().getValue())
- ImageBase = std::min(ImageBase, Block->getAddress().getValue());
- }
- }
- return ImageBase;
- }
-
Error addSingleRelocation(const object::RelocationRef &Rel,
const object::SectionRef &FixupSect,
Block &BlockToFix) {
-
const object::coff_relocation *COFFRel = getObject().getCOFFRelocation(Rel);
auto SymbolIt = Rel.getSymbol();
if (SymbolIt == getObject().symbol_end()) {
@@ -110,62 +83,122 @@ private:
SymIndex, FixupSect.getIndex()),
inconvertibleErrorCode());
- Expected<COFFX86RelocationKind> RelocKind =
- getRelocationKind(Rel.getType());
- if (!RelocKind)
- return RelocKind.takeError();
-
int64_t Addend = 0;
orc::ExecutorAddr FixupAddress =
orc::ExecutorAddr(FixupSect.getAddress()) + Rel.getOffset();
Edge::OffsetT Offset = FixupAddress - BlockToFix.getAddress();
Edge::Kind Kind = Edge::Invalid;
+ const char *FixupPtr = BlockToFix.getContent().data() + Offset;
- switch (*RelocKind) {
- case COFFAddr32NB: {
- Kind = x86_64::Pointer32;
- Offset -= getImageBase();
+ switch (Rel.getType()) {
+ case COFF::RelocationTypeAMD64::IMAGE_REL_AMD64_ADDR32NB: {
+ Kind = EdgeKind_coff_x86_64::Pointer32NB;
+ Addend = *reinterpret_cast<const support::little32_t *>(FixupPtr);
break;
}
- case COFFRel32: {
- Kind = x86_64::BranchPCRel32;
+ case COFF::RelocationTypeAMD64::IMAGE_REL_AMD64_REL32: {
+ Kind = EdgeKind_coff_x86_64::PCRel32;
+ Addend = *reinterpret_cast<const support::little32_t *>(FixupPtr);
break;
}
+ case COFF::RelocationTypeAMD64::IMAGE_REL_AMD64_REL32_1: {
+ Kind = EdgeKind_coff_x86_64::PCRel32;
+ Addend = *reinterpret_cast<const support::little32_t *>(FixupPtr);
+ Addend -= 1;
+ break;
+ }
+ default: {
+ return make_error<JITLinkError>("Unsupported x86_64 relocation:" +
+ formatv("{0:d}", Rel.getType()));
+ }
};
Edge GE(Kind, Offset, *GraphSymbol, Addend);
LLVM_DEBUG({
dbgs() << " ";
- printEdge(dbgs(), BlockToFix, GE, x86_64::getEdgeKindName(Kind));
+ printEdge(dbgs(), BlockToFix, GE, getCOFFX86RelocationKindName(Kind));
dbgs() << "\n";
});
BlockToFix.addEdge(std::move(GE));
+
return Error::success();
}
- /// Return the string name of the given COFF x86_64 edge kind.
- const char *getCOFFX86RelocationKindName(COFFX86RelocationKind R) {
- switch (R) {
- case COFFAddr32NB:
- return "COFFAddr32NB";
- case COFFRel32:
- return "COFFRel32";
+public:
+ COFFLinkGraphBuilder_x86_64(const object::COFFObjectFile &Obj, const Triple T)
+ : COFFLinkGraphBuilder(Obj, std::move(T), getCOFFX86RelocationKindName) {}
+};
+
+class COFFLinkGraphLowering_x86_64 {
+public:
+ // Lowers COFF x86_64 specific edges to generic x86_64 edges.
+ Error lowerCOFFRelocationEdges(LinkGraph &G, JITLinkContext &Ctx) {
+ for (auto *B : G.blocks()) {
+ for (auto &E : B->edges()) {
+ switch (E.getKind()) {
+ case EdgeKind_coff_x86_64::Pointer32NB: {
+ auto ImageBase = getImageBaseAddress(G, Ctx);
+ if (!ImageBase)
+ return ImageBase.takeError();
+ E.setAddend(E.getAddend() - *ImageBase);
+ E.setKind(x86_64::Pointer32);
+ break;
+ }
+ case EdgeKind_coff_x86_64::PCRel32: {
+ E.setKind(x86_64::PCRel32);
+ break;
+ }
+ default:
+ break;
+ }
+ }
}
+ return Error::success();
}
-public:
- COFFLinkGraphBuilder_x86_64(const object::COFFObjectFile &Obj, const Triple T)
- : COFFLinkGraphBuilder(Obj, std::move(T), x86_64::getEdgeKindName) {}
+private:
+ static StringRef getImageBaseSymbolName() { return "__ImageBase"; }
+ Expected<JITTargetAddress> getImageBaseAddress(LinkGraph &G,
+ JITLinkContext &Ctx) {
+ if (this->ImageBase)
+ return this->ImageBase;
+ for (auto *S : G.defined_symbols())
+ if (S->getName() == getImageBaseSymbolName()) {
+ this->ImageBase = S->getAddress().getValue();
+ return this->ImageBase;
+ }
+
+ JITLinkContext::LookupMap Symbols;
+ Symbols[getImageBaseSymbolName()] = SymbolLookupFlags::RequiredSymbol;
+ JITTargetAddress ImageBase;
+ Error Err = Error::success();
+ Ctx.lookup(Symbols,
+ createLookupContinuation([&](Expected<AsyncLookupResult> LR) {
+ ErrorAsOutParameter EAO(&Err);
+ if (!LR) {
+ Err = LR.takeError();
+ return;
+ }
+ auto &ImageBaseSymbol = LR->begin()->second;
+ ImageBase = ImageBaseSymbol.getAddress();
+ }));
+ if (Err)
+ return std::move(Err);
+ this->ImageBase = ImageBase;
+ return ImageBase;
+ }
+ JITTargetAddress ImageBase = 0;
};
-Error buildTables_COFF_x86_64(LinkGraph &G) {
- LLVM_DEBUG(dbgs() << "Visiting edges in graph:\n");
+Error lowerEdges_COFF_x86_64(LinkGraph &G, JITLinkContext *Ctx) {
+ LLVM_DEBUG(dbgs() << "Lowering COFF x86_64 edges:\n");
+ COFFLinkGraphLowering_x86_64 GraphLowering;
+
+ if (auto Err = GraphLowering.lowerCOFFRelocationEdges(G, *Ctx))
+ return Err;
- x86_64::GOTTableManager GOT;
- x86_64::PLTTableManager PLT(GOT);
- visitExistingEdges(G, GOT, PLT);
return Error::success();
}
} // namespace
@@ -173,6 +206,18 @@ Error buildTables_COFF_x86_64(LinkGraph &G) {
namespace llvm {
namespace jitlink {
+/// Return the string name of the given COFF x86_64 edge kind.
+const char *getCOFFX86RelocationKindName(Edge::Kind R) {
+ switch (R) {
+ case PCRel32:
+ return "PCRel32";
+ case Pointer32NB:
+ return "Pointer32NB";
+ default:
+ return x86_64::getEdgeKindName(R);
+ }
+}
+
Expected<std::unique_ptr<LinkGraph>>
createLinkGraphFromCOFFObject_x86_64(MemoryBufferRef ObjectBuffer) {
LLVM_DEBUG({
@@ -194,16 +239,16 @@ void link_COFF_x86_64(std::unique_ptr<LinkGraph> G,
const Triple &TT = G->getTargetTriple();
if (Ctx->shouldAddDefaultTargetPasses(TT)) {
// Add a mark-live pass.
- if (auto MarkLive = Ctx->getMarkLivePass(TT))
+ if (auto MarkLive = Ctx->getMarkLivePass(TT)) {
Config.PrePrunePasses.push_back(std::move(MarkLive));
- else
+ Config.PrePrunePasses.push_back(SEHFrameKeepAlivePass(".pdata"));
+ } else
Config.PrePrunePasses.push_back(markAllSymbolsLive);
- // Add an in-place GOT/Stubs/TLSInfoEntry build pass.
- Config.PostPrunePasses.push_back(buildTables_COFF_x86_64);
-
- // Add GOT/Stubs optimizer pass.
- Config.PreFixupPasses.push_back(x86_64::optimizeGOTAndStubAccesses);
+ // Add COFF edge lowering passes.
+ JITLinkContext *CtxPtr = Ctx.get();
+ Config.PreFixupPasses.push_back(
+ [CtxPtr](LinkGraph &G) { return lowerEdges_COFF_x86_64(G, CtxPtr); });
}
if (auto Err = Ctx->modifyPassConfig(*G, Config))
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/SEHFrameSupport.h b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/SEHFrameSupport.h
new file mode 100644
index 000000000000..f7689e4e4043
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/SEHFrameSupport.h
@@ -0,0 +1,61 @@
+//===------- SEHFrameSupport.h - JITLink seh-frame utils --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// SEHFrame utils for JITLink.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_JITLINK_SEHFRAMESUPPORT_H
+#define LLVM_EXECUTIONENGINE_JITLINK_SEHFRAMESUPPORT_H
+
+#include "llvm/ADT/Triple.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace jitlink {
+/// This pass adds keep-alive edge from SEH frame sections
+/// to the parent function content block.
+class SEHFrameKeepAlivePass {
+public:
+ SEHFrameKeepAlivePass(StringRef SEHFrameSectionName)
+ : SEHFrameSectionName(SEHFrameSectionName) {}
+
+ Error operator()(LinkGraph &G) {
+ auto *S = G.findSectionByName(SEHFrameSectionName);
+ if (!S)
+ return Error::success();
+
+ // Simply consider every block pointed by seh frame block as parants.
+ // This adds some unnecessary keep-alive edges to unwind info blocks,
+ // (xdata) but these blocks are usually dead by default, so they wouldn't
+ // count for the fate of seh frame block.
+ for (auto *B : S->blocks()) {
+ auto &DummySymbol = G.addAnonymousSymbol(*B, 0, 0, false, false);
+ DenseSet<Block *> Children;
+ for (auto &E : B->edges()) {
+ auto &Sym = E.getTarget();
+ if (!Sym.isDefined())
+ continue;
+ Children.insert(&Sym.getBlock());
+ }
+ for (auto *Child : Children)
+ Child->addEdge(Edge(Edge::KeepAlive, 0, DummySymbol, 0));
+ }
+ return Error::success();
+ }
+
+private:
+ StringRef SEHFrameSectionName;
+};
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_JITLINK_SEHFRAMESUPPORT_H \ No newline at end of file
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp
index df9979b47e88..393250a5578b 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/x86_64.cpp
@@ -36,6 +36,8 @@ const char *getEdgeKindName(Edge::Kind K) {
return "NegDelta32";
case Delta64FromGOT:
return "Delta64FromGOT";
+ case PCRel32:
+ return "PCRel32";
case BranchPCRel32:
return "BranchPCRel32";
case BranchPCRel32ToPtrJumpStub:
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectFileInterface.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectFileInterface.cpp
index 356b81b4f1c5..3de15db3f1c6 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectFileInterface.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/ObjectFileInterface.cpp
@@ -150,16 +150,39 @@ static Expected<MaterializationUnit::Interface>
getCOFFObjectFileSymbolInfo(ExecutionSession &ES,
const object::COFFObjectFile &Obj) {
MaterializationUnit::Interface I;
-
+ std::vector<Optional<object::coff_aux_section_definition>> ComdatDefs(
+ Obj.getNumberOfSections() + 1);
for (auto &Sym : Obj.symbols()) {
Expected<uint32_t> SymFlagsOrErr = Sym.getFlags();
if (!SymFlagsOrErr)
// TODO: Test this error.
return SymFlagsOrErr.takeError();
- // Skip symbols not defined in this object file.
- if (*SymFlagsOrErr & object::BasicSymbolRef::SF_Undefined)
- continue;
+ // Handle comdat symbols
+ auto COFFSym = Obj.getCOFFSymbol(Sym);
+ bool IsWeak = false;
+ if (auto *Def = COFFSym.getSectionDefinition()) {
+ auto Sec = Obj.getSection(COFFSym.getSectionNumber());
+ if (!Sec)
+ return Sec.takeError();
+ if (((*Sec)->Characteristics & COFF::IMAGE_SCN_LNK_COMDAT) &&
+ Def->Selection != COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE) {
+ ComdatDefs[COFFSym.getSectionNumber()] = *Def;
+ continue;
+ }
+ }
+ if (!COFF::isReservedSectionNumber(COFFSym.getSectionNumber()) &&
+ ComdatDefs[COFFSym.getSectionNumber()]) {
+ auto Def = ComdatDefs[COFFSym.getSectionNumber()];
+ if (Def->Selection != COFF::IMAGE_COMDAT_SELECT_NODUPLICATES) {
+ IsWeak = true;
+ }
+ ComdatDefs[COFFSym.getSectionNumber()] = None;
+ } else {
+ // Skip symbols not defined in this object file.
+ if (*SymFlagsOrErr & object::BasicSymbolRef::SF_Undefined)
+ continue;
+ }
// Skip symbols that are not global.
if (!(*SymFlagsOrErr & object::BasicSymbolRef::SF_Global))
@@ -180,12 +203,13 @@ getCOFFObjectFileSymbolInfo(ExecutionSession &ES,
if (!SymFlags)
return SymFlags.takeError();
*SymFlags |= JITSymbolFlags::Exported;
- auto COFFSym = Obj.getCOFFSymbol(Sym);
// Weak external is always a function
- if (COFFSym.isWeakExternal()) {
+ if (COFFSym.isWeakExternal())
*SymFlags |= JITSymbolFlags::Callable;
- }
+
+ if (IsWeak)
+ *SymFlags |= JITSymbolFlags::Weak;
I.SymbolFlags[ES.intern(*Name)] = std::move(*SymFlags);
}
diff --git a/contrib/llvm-project/llvm/lib/FileCheck/FileCheck.cpp b/contrib/llvm-project/llvm/lib/FileCheck/FileCheck.cpp
index bf13b6c325ec..5d4cfceefb3e 100644
--- a/contrib/llvm-project/llvm/lib/FileCheck/FileCheck.cpp
+++ b/contrib/llvm-project/llvm/lib/FileCheck/FileCheck.cpp
@@ -1424,6 +1424,8 @@ void Pattern::printVariableDefs(const SourceMgr &SM,
// Sort variable captures by the order in which they matched the input.
// Ranges shouldn't be overlapping, so we can just compare the start.
llvm::sort(VarCaptures, [](const VarCapture &A, const VarCapture &B) {
+ if (&A == &B)
+ return false;
assert(A.Range.Start != B.Range.Start &&
"unexpected overlapping variable captures");
return A.Range.Start.getPointer() < B.Range.Start.getPointer();
diff --git a/contrib/llvm-project/llvm/lib/IR/Instructions.cpp b/contrib/llvm-project/llvm/lib/IR/Instructions.cpp
index 26171f537244..f5039eb5126c 100644
--- a/contrib/llvm-project/llvm/lib/IR/Instructions.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/Instructions.cpp
@@ -1627,6 +1627,10 @@ AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
Align Alignment, AtomicOrdering Ordering,
SyncScope::ID SSID) {
+ assert(Ordering != AtomicOrdering::NotAtomic &&
+ "atomicrmw instructions can only be atomic.");
+ assert(Ordering != AtomicOrdering::Unordered &&
+ "atomicrmw instructions cannot be unordered.");
Op<0>() = Ptr;
Op<1>() = Val;
setOperation(Operation);
diff --git a/contrib/llvm-project/llvm/lib/IR/IntrinsicInst.cpp b/contrib/llvm-project/llvm/lib/IR/IntrinsicInst.cpp
index c50d6901c9da..8ca75f58e403 100644
--- a/contrib/llvm-project/llvm/lib/IR/IntrinsicInst.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/IntrinsicInst.cpp
@@ -32,6 +32,39 @@
using namespace llvm;
+bool IntrinsicInst::mayLowerToFunctionCall(Intrinsic::ID IID) {
+ switch (IID) {
+ case Intrinsic::objc_autorelease:
+ case Intrinsic::objc_autoreleasePoolPop:
+ case Intrinsic::objc_autoreleasePoolPush:
+ case Intrinsic::objc_autoreleaseReturnValue:
+ case Intrinsic::objc_copyWeak:
+ case Intrinsic::objc_destroyWeak:
+ case Intrinsic::objc_initWeak:
+ case Intrinsic::objc_loadWeak:
+ case Intrinsic::objc_loadWeakRetained:
+ case Intrinsic::objc_moveWeak:
+ case Intrinsic::objc_release:
+ case Intrinsic::objc_retain:
+ case Intrinsic::objc_retainAutorelease:
+ case Intrinsic::objc_retainAutoreleaseReturnValue:
+ case Intrinsic::objc_retainAutoreleasedReturnValue:
+ case Intrinsic::objc_retainBlock:
+ case Intrinsic::objc_storeStrong:
+ case Intrinsic::objc_storeWeak:
+ case Intrinsic::objc_unsafeClaimAutoreleasedReturnValue:
+ case Intrinsic::objc_retainedObject:
+ case Intrinsic::objc_unretainedObject:
+ case Intrinsic::objc_unretainedPointer:
+ case Intrinsic::objc_retain_autorelease:
+ case Intrinsic::objc_sync_enter:
+ case Intrinsic::objc_sync_exit:
+ return true;
+ default:
+ return false;
+ }
+}
+
//===----------------------------------------------------------------------===//
/// DbgVariableIntrinsic - This is the common base class for debug info
/// intrinsics for variables.
diff --git a/contrib/llvm-project/llvm/lib/IR/ModuleSummaryIndex.cpp b/contrib/llvm-project/llvm/lib/IR/ModuleSummaryIndex.cpp
index 0ca40a675fe4..3e82987801c7 100644
--- a/contrib/llvm-project/llvm/lib/IR/ModuleSummaryIndex.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/ModuleSummaryIndex.cpp
@@ -105,11 +105,13 @@ uint64_t ModuleSummaryIndex::getFlags() const {
Flags |= 0x20;
if (withDSOLocalPropagation())
Flags |= 0x40;
+ if (withWholeProgramVisibility())
+ Flags |= 0x80;
return Flags;
}
void ModuleSummaryIndex::setFlags(uint64_t Flags) {
- assert(Flags <= 0x7f && "Unexpected bits in flag");
+ assert(Flags <= 0xff && "Unexpected bits in flag");
// 1 bit: WithGlobalValueDeadStripping flag.
// Set on combined index only.
if (Flags & 0x1)
@@ -139,6 +141,10 @@ void ModuleSummaryIndex::setFlags(uint64_t Flags) {
// Set on combined index only.
if (Flags & 0x40)
setWithDSOLocalPropagation();
+ // 1 bit: WithWholeProgramVisibility flag.
+ // Set on combined index only.
+ if (Flags & 0x80)
+ setWithWholeProgramVisibility();
}
// Collect for the given module the list of function it defines
diff --git a/contrib/llvm-project/llvm/lib/IR/PrintPasses.cpp b/contrib/llvm-project/llvm/lib/IR/PrintPasses.cpp
index 83b8c93e766f..fe2da5ca114f 100644
--- a/contrib/llvm-project/llvm/lib/IR/PrintPasses.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/PrintPasses.cpp
@@ -29,6 +29,50 @@ static cl::opt<bool> PrintAfterAll("print-after-all",
llvm::cl::desc("Print IR after each pass"),
cl::init(false), cl::Hidden);
+// Print out the IR after passes, similar to -print-after-all except that it
+// only prints the IR after passes that change the IR. Those passes that do not
+// make changes to the IR are reported as not making any changes. In addition,
+// the initial IR is also reported. Other hidden options affect the output from
+// this option. -filter-passes will limit the output to the named passes that
+// actually change the IR and other passes are reported as filtered out. The
+// specified passes will either be reported as making no changes (with no IR
+// reported) or the changed IR will be reported. Also, the -filter-print-funcs
+// and -print-module-scope options will do similar filtering based on function
+// name, reporting changed IRs as functions(or modules if -print-module-scope is
+// specified) for a particular function or indicating that the IR has been
+// filtered out. The extra options can be combined, allowing only changed IRs
+// for certain passes on certain functions to be reported in different formats,
+// with the rest being reported as filtered out. The -print-before-changed
+// option will print the IR as it was before each pass that changed it. The
+// optional value of quiet will only report when the IR changes, suppressing all
+// other messages, including the initial IR. The values "diff" and "diff-quiet"
+// will present the changes in a form similar to a patch, in either verbose or
+// quiet mode, respectively. The lines that are removed and added are prefixed
+// with '-' and '+', respectively. The -filter-print-funcs and -filter-passes
+// can be used to filter the output. This reporter relies on the linux diff
+// utility to do comparisons and insert the prefixes. For systems that do not
+// have the necessary facilities, the error message will be shown in place of
+// the expected output.
+cl::opt<ChangePrinter> llvm::PrintChanged(
+ "print-changed", cl::desc("Print changed IRs"), cl::Hidden,
+ cl::ValueOptional, cl::init(ChangePrinter::None),
+ cl::values(
+ clEnumValN(ChangePrinter::Quiet, "quiet", "Run in quiet mode"),
+ clEnumValN(ChangePrinter::DiffVerbose, "diff",
+ "Display patch-like changes"),
+ clEnumValN(ChangePrinter::DiffQuiet, "diff-quiet",
+ "Display patch-like changes in quiet mode"),
+ clEnumValN(ChangePrinter::ColourDiffVerbose, "cdiff",
+ "Display patch-like changes with color"),
+ clEnumValN(ChangePrinter::ColourDiffQuiet, "cdiff-quiet",
+ "Display patch-like changes in quiet mode with color"),
+ clEnumValN(ChangePrinter::DotCfgVerbose, "dot-cfg",
+ "Create a website with graphical changes"),
+ clEnumValN(ChangePrinter::DotCfgQuiet, "dot-cfg-quiet",
+ "Create a website with graphical changes in quiet mode"),
+ // Sentinel value for unspecified option.
+ clEnumValN(ChangePrinter::Verbose, "", "")));
+
static cl::opt<bool>
PrintModuleScope("print-module-scope",
cl::desc("When printing IR for print-[before|after]{-all} "
diff --git a/contrib/llvm-project/llvm/lib/LTO/LTO.cpp b/contrib/llvm-project/llvm/lib/LTO/LTO.cpp
index a9e04ba760ca..cc7be24c1dbd 100644
--- a/contrib/llvm-project/llvm/lib/LTO/LTO.cpp
+++ b/contrib/llvm-project/llvm/lib/LTO/LTO.cpp
@@ -1103,6 +1103,8 @@ Error LTO::runRegularLTO(AddStreamFn AddStream) {
updateVCallVisibilityInModule(*RegularLTO.CombinedModule,
Conf.HasWholeProgramVisibility,
DynamicExportSymbols);
+ updatePublicTypeTestCalls(*RegularLTO.CombinedModule,
+ Conf.HasWholeProgramVisibility);
if (Conf.PreOptModuleHook &&
!Conf.PreOptModuleHook(0, *RegularLTO.CombinedModule))
@@ -1482,6 +1484,8 @@ Error LTO::runThinLTO(AddStreamFn AddStream, FileCache Cache,
std::set<GlobalValue::GUID> ExportedGUIDs;
+ if (hasWholeProgramVisibility(Conf.HasWholeProgramVisibility))
+ ThinLTO.CombinedIndex.setWithWholeProgramVisibility();
// If allowed, upgrade public vcall visibility to linkage unit visibility in
// the summaries before whole program devirtualization below.
updateVCallVisibilityInIndex(ThinLTO.CombinedIndex,
diff --git a/contrib/llvm-project/llvm/lib/LTO/LTOBackend.cpp b/contrib/llvm-project/llvm/lib/LTO/LTOBackend.cpp
index e248e58e4e4e..2e32469b4926 100644
--- a/contrib/llvm-project/llvm/lib/LTO/LTOBackend.cpp
+++ b/contrib/llvm-project/llvm/lib/LTO/LTOBackend.cpp
@@ -40,6 +40,7 @@
#include "llvm/Support/ToolOutputFile.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Transforms/IPO/WholeProgramDevirt.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
#include "llvm/Transforms/Utils/FunctionImportUtils.h"
#include "llvm/Transforms/Utils/SplitModule.h"
@@ -560,6 +561,8 @@ Error lto::thinBackend(const Config &Conf, unsigned Task, AddStreamFn AddStream,
// the module, if applicable.
Mod.setPartialSampleProfileRatio(CombinedIndex);
+ updatePublicTypeTestCalls(Mod, CombinedIndex.withWholeProgramVisibility());
+
if (Conf.CodeGenOnly) {
codegen(Conf, TM.get(), AddStream, Task, Mod, CombinedIndex);
return finalizeOptimizationRemarks(std::move(DiagnosticOutputFile));
diff --git a/contrib/llvm-project/llvm/lib/LTO/LTOCodeGenerator.cpp b/contrib/llvm-project/llvm/lib/LTO/LTOCodeGenerator.cpp
index 2abf249cbd62..2f7c485b9fc8 100644
--- a/contrib/llvm-project/llvm/lib/LTO/LTOCodeGenerator.cpp
+++ b/contrib/llvm-project/llvm/lib/LTO/LTOCodeGenerator.cpp
@@ -520,6 +520,8 @@ bool LTOCodeGenerator::optimize() {
// linker option in the old LTO API, but this call allows it to be specified
// via the internal option. Must be done before WPD invoked via the optimizer
// pipeline run below.
+ updatePublicTypeTestCalls(*MergedModule,
+ /* WholeProgramVisibilityEnabledInLTO */ false);
updateVCallVisibilityInModule(*MergedModule,
/* WholeProgramVisibilityEnabledInLTO */ false,
// FIXME: This needs linker information via a
@@ -539,6 +541,16 @@ bool LTOCodeGenerator::optimize() {
// Add an appropriate DataLayout instance for this module...
MergedModule->setDataLayout(TargetMach->createDataLayout());
+ if (!SaveIRBeforeOptPath.empty()) {
+ std::error_code EC;
+ raw_fd_ostream OS(SaveIRBeforeOptPath, EC, sys::fs::OF_None);
+ if (EC)
+ report_fatal_error(Twine("Failed to open ") + SaveIRBeforeOptPath +
+ " to save optimized bitcode\n");
+ WriteBitcodeToFile(*MergedModule, OS,
+ /* ShouldPreserveUseListOrder */ true);
+ }
+
ModuleSummaryIndex CombinedIndex(false);
TargetMach = createTargetMachine();
if (!opt(Config, TargetMach.get(), 0, *MergedModule, /*IsThinLTO=*/false,
diff --git a/contrib/llvm-project/llvm/lib/LTO/ThinLTOCodeGenerator.cpp b/contrib/llvm-project/llvm/lib/LTO/ThinLTOCodeGenerator.cpp
index a1041b3c85f5..2c723bef7d12 100644
--- a/contrib/llvm-project/llvm/lib/LTO/ThinLTOCodeGenerator.cpp
+++ b/contrib/llvm-project/llvm/lib/LTO/ThinLTOCodeGenerator.cpp
@@ -452,6 +452,10 @@ ProcessThinLTOModule(Module &TheModule, ModuleSummaryIndex &Index,
bool DisableCodeGen, StringRef SaveTempsDir,
bool Freestanding, unsigned OptLevel, unsigned count,
bool DebugPassManager) {
+ // See comment at call to updateVCallVisibilityInIndex() for why
+ // WholeProgramVisibilityEnabledInLTO is false.
+ updatePublicTypeTestCalls(TheModule,
+ /* WholeProgramVisibilityEnabledInLTO */ false);
// "Benchmark"-like optimization: single-source case
bool SingleModule = (ModuleMap.size() == 1);
@@ -1047,6 +1051,8 @@ void ThinLTOCodeGenerator::run() {
// Currently there is no support for enabling whole program visibility via a
// linker option in the old LTO API, but this call allows it to be specified
// via the internal option. Must be done before WPD below.
+ if (hasWholeProgramVisibility(/* WholeProgramVisibilityEnabledInLTO */ false))
+ Index->setWithWholeProgramVisibility();
updateVCallVisibilityInIndex(*Index,
/* WholeProgramVisibilityEnabledInLTO */ false,
// FIXME: This needs linker information via a
diff --git a/contrib/llvm-project/llvm/lib/MC/ELFObjectWriter.cpp b/contrib/llvm-project/llvm/lib/MC/ELFObjectWriter.cpp
index 0b4e9866d50a..f6360c4e2f21 100644
--- a/contrib/llvm-project/llvm/lib/MC/ELFObjectWriter.cpp
+++ b/contrib/llvm-project/llvm/lib/MC/ELFObjectWriter.cpp
@@ -293,9 +293,8 @@ public:
: ELFObjectWriter(std::move(MOTW)), OS(OS), DwoOS(DwoOS),
IsLittleEndian(IsLittleEndian) {}
- virtual bool checkRelocation(MCContext &Ctx, SMLoc Loc,
- const MCSectionELF *From,
- const MCSectionELF *To) override {
+ bool checkRelocation(MCContext &Ctx, SMLoc Loc, const MCSectionELF *From,
+ const MCSectionELF *To) override {
if (isDwoSection(*From)) {
Ctx.reportError(Loc, "A dwo section may not contain relocations");
return false;
diff --git a/contrib/llvm-project/llvm/lib/MC/MCDisassembler/MCDisassembler.cpp b/contrib/llvm-project/llvm/lib/MC/MCDisassembler/MCDisassembler.cpp
index cf98cb8ff59f..3ee43398ff65 100644
--- a/contrib/llvm-project/llvm/lib/MC/MCDisassembler/MCDisassembler.cpp
+++ b/contrib/llvm-project/llvm/lib/MC/MCDisassembler/MCDisassembler.cpp
@@ -20,6 +20,11 @@ MCDisassembler::onSymbolStart(SymbolInfoTy &Symbol, uint64_t &Size,
return None;
}
+uint64_t MCDisassembler::suggestBytesToSkip(ArrayRef<uint8_t> Bytes,
+ uint64_t Address) const {
+ return 1;
+}
+
bool MCDisassembler::tryAddingSymbolicOperand(MCInst &Inst, int64_t Value,
uint64_t Address, bool IsBranch,
uint64_t Offset, uint64_t OpSize,
diff --git a/contrib/llvm-project/llvm/lib/MC/XCOFFObjectWriter.cpp b/contrib/llvm-project/llvm/lib/MC/XCOFFObjectWriter.cpp
index d46ae2247535..8a43a477c1c7 100644
--- a/contrib/llvm-project/llvm/lib/MC/XCOFFObjectWriter.cpp
+++ b/contrib/llvm-project/llvm/lib/MC/XCOFFObjectWriter.cpp
@@ -253,7 +253,7 @@ class XCOFFObjectWriter : public MCObjectWriter {
CsectGroup &getCsectGroup(const MCSectionXCOFF *MCSec);
- virtual void reset() override;
+ void reset() override;
void executePostLayoutBinding(MCAssembler &, const MCAsmLayout &) override;
diff --git a/contrib/llvm-project/llvm/lib/ObjCopy/ELF/ELFObject.cpp b/contrib/llvm-project/llvm/lib/ObjCopy/ELF/ELFObject.cpp
index 8b44c09023f1..b127e1b43b8e 100644
--- a/contrib/llvm-project/llvm/lib/ObjCopy/ELF/ELFObject.cpp
+++ b/contrib/llvm-project/llvm/lib/ObjCopy/ELF/ELFObject.cpp
@@ -434,41 +434,13 @@ Error SectionWriter::visit(const OwnedDataSection &Sec) {
return Error::success();
}
-static constexpr std::array<uint8_t, 4> ZlibGnuMagic = {{'Z', 'L', 'I', 'B'}};
-
-static bool isDataGnuCompressed(ArrayRef<uint8_t> Data) {
- return Data.size() > ZlibGnuMagic.size() &&
- std::equal(ZlibGnuMagic.begin(), ZlibGnuMagic.end(), Data.data());
-}
-
-template <class ELFT>
-static std::tuple<uint64_t, uint64_t>
-getDecompressedSizeAndAlignment(ArrayRef<uint8_t> Data) {
- const bool IsGnuDebug = isDataGnuCompressed(Data);
- const uint64_t DecompressedSize =
- IsGnuDebug
- ? support::endian::read64be(Data.data() + ZlibGnuMagic.size())
- : reinterpret_cast<const Elf_Chdr_Impl<ELFT> *>(Data.data())->ch_size;
- const uint64_t DecompressedAlign =
- IsGnuDebug ? 1
- : reinterpret_cast<const Elf_Chdr_Impl<ELFT> *>(Data.data())
- ->ch_addralign;
-
- return std::make_tuple(DecompressedSize, DecompressedAlign);
-}
-
template <class ELFT>
Error ELFSectionWriter<ELFT>::visit(const DecompressedSection &Sec) {
- const size_t DataOffset = isDataGnuCompressed(Sec.OriginalData)
- ? (ZlibGnuMagic.size() + sizeof(Sec.Size))
- : sizeof(Elf_Chdr_Impl<ELFT>);
-
- ArrayRef<uint8_t> CompressedContent(Sec.OriginalData.data() + DataOffset,
- Sec.OriginalData.size() - DataOffset);
+ ArrayRef<uint8_t> Compressed =
+ Sec.OriginalData.slice(sizeof(Elf_Chdr_Impl<ELFT>));
SmallVector<uint8_t, 128> DecompressedContent;
- if (Error Err =
- compression::zlib::uncompress(CompressedContent, DecompressedContent,
- static_cast<size_t>(Sec.Size)))
+ if (Error Err = compression::zlib::uncompress(Compressed, DecompressedContent,
+ static_cast<size_t>(Sec.Size)))
return createStringError(errc::invalid_argument,
"'" + Sec.Name + "': " + toString(std::move(Err)));
@@ -518,7 +490,7 @@ Error BinarySectionWriter::visit(const CompressedSection &Sec) {
template <class ELFT>
Error ELFSectionWriter<ELFT>::visit(const CompressedSection &Sec) {
uint8_t *Buf = reinterpret_cast<uint8_t *>(Out.getBufferStart()) + Sec.Offset;
- Elf_Chdr_Impl<ELFT> Chdr;
+ Elf_Chdr_Impl<ELFT> Chdr = {};
switch (Sec.CompressionType) {
case DebugCompressionType::None:
std::copy(Sec.OriginalData.begin(), Sec.OriginalData.end(), Buf);
@@ -1731,15 +1703,11 @@ Expected<SectionBase &> ELFBuilder<ELFT>::makeSection(const Elf_Shdr &Shdr) {
if (!Name)
return Name.takeError();
- if (Name->startswith(".zdebug") || (Shdr.sh_flags & ELF::SHF_COMPRESSED)) {
- uint64_t DecompressedSize, DecompressedAlign;
- std::tie(DecompressedSize, DecompressedAlign) =
- getDecompressedSizeAndAlignment<ELFT>(*Data);
- return Obj.addSection<CompressedSection>(
- CompressedSection(*Data, DecompressedSize, DecompressedAlign));
- }
-
- return Obj.addSection<Section>(*Data);
+ if (!(Shdr.sh_flags & ELF::SHF_COMPRESSED))
+ return Obj.addSection<Section>(*Data);
+ auto *Chdr = reinterpret_cast<const Elf_Chdr_Impl<ELFT> *>(Data->data());
+ return Obj.addSection<CompressedSection>(
+ CompressedSection(*Data, Chdr->ch_size, Chdr->ch_addralign));
}
}
}
diff --git a/contrib/llvm-project/llvm/lib/ObjCopy/ELF/ELFObject.h b/contrib/llvm-project/llvm/lib/ObjCopy/ELF/ELFObject.h
index 799db5034532..2c3ea3a5f6d6 100644
--- a/contrib/llvm-project/llvm/lib/ObjCopy/ELF/ELFObject.h
+++ b/contrib/llvm-project/llvm/lib/ObjCopy/ELF/ELFObject.h
@@ -115,13 +115,13 @@ public:
Error visit(const OwnedDataSection &Sec) override;
Error visit(const StringTableSection &Sec) override;
Error visit(const DynamicRelocationSection &Sec) override;
- virtual Error visit(const SymbolTableSection &Sec) override = 0;
- virtual Error visit(const RelocationSection &Sec) override = 0;
- virtual Error visit(const GnuDebugLinkSection &Sec) override = 0;
- virtual Error visit(const GroupSection &Sec) override = 0;
- virtual Error visit(const SectionIndexSection &Sec) override = 0;
- virtual Error visit(const CompressedSection &Sec) override = 0;
- virtual Error visit(const DecompressedSection &Sec) override = 0;
+ Error visit(const SymbolTableSection &Sec) override = 0;
+ Error visit(const RelocationSection &Sec) override = 0;
+ Error visit(const GnuDebugLinkSection &Sec) override = 0;
+ Error visit(const GroupSection &Sec) override = 0;
+ Error visit(const SectionIndexSection &Sec) override = 0;
+ Error visit(const CompressedSection &Sec) override = 0;
+ Error visit(const DecompressedSection &Sec) override = 0;
explicit SectionWriter(WritableMemoryBuffer &Buf) : Out(Buf) {}
};
diff --git a/contrib/llvm-project/llvm/lib/ObjectYAML/ELFYAML.cpp b/contrib/llvm-project/llvm/lib/ObjectYAML/ELFYAML.cpp
index b778006cf66e..9ad2c4135167 100644
--- a/contrib/llvm-project/llvm/lib/ObjectYAML/ELFYAML.cpp
+++ b/contrib/llvm-project/llvm/lib/ObjectYAML/ELFYAML.cpp
@@ -518,6 +518,14 @@ void ScalarBitSetTraits<ELFYAML::ELF_EF>::bitset(IO &IO,
BCaseMask(EF_AVR_ARCH_XMEGA7, EF_AVR_ARCH_MASK);
BCase(EF_AVR_LINKRELAX_PREPARED);
break;
+ case ELF::EM_LOONGARCH:
+ BCaseMask(EF_LOONGARCH_BASE_ABI_ILP32S, EF_LOONGARCH_BASE_ABI_MASK);
+ BCaseMask(EF_LOONGARCH_BASE_ABI_ILP32F, EF_LOONGARCH_BASE_ABI_MASK);
+ BCaseMask(EF_LOONGARCH_BASE_ABI_ILP32D, EF_LOONGARCH_BASE_ABI_MASK);
+ BCaseMask(EF_LOONGARCH_BASE_ABI_LP64S, EF_LOONGARCH_BASE_ABI_MASK);
+ BCaseMask(EF_LOONGARCH_BASE_ABI_LP64F, EF_LOONGARCH_BASE_ABI_MASK);
+ BCaseMask(EF_LOONGARCH_BASE_ABI_LP64D, EF_LOONGARCH_BASE_ABI_MASK);
+ break;
case ELF::EM_RISCV:
BCase(EF_RISCV_RVC);
BCaseMask(EF_RISCV_FLOAT_ABI_SOFT, EF_RISCV_FLOAT_ABI);
diff --git a/contrib/llvm-project/llvm/lib/Passes/PassBuilderPipelines.cpp b/contrib/llvm-project/llvm/lib/Passes/PassBuilderPipelines.cpp
index 3b3eefcc29ca..945ef512391b 100644
--- a/contrib/llvm-project/llvm/lib/Passes/PassBuilderPipelines.cpp
+++ b/contrib/llvm-project/llvm/lib/Passes/PassBuilderPipelines.cpp
@@ -1249,6 +1249,9 @@ PassBuilder::buildModuleOptimizationPipeline(OptimizationLevel Level,
// flattening of blocks.
OptimizePM.addPass(DivRemPairsPass());
+ // Try to annotate calls that were created during optimization.
+ OptimizePM.addPass(TailCallElimPass());
+
// LoopSink (and other loop passes since the last simplifyCFG) might have
// resulted in single-entry-single-exit or empty blocks. Clean up the CFG.
OptimizePM.addPass(
diff --git a/contrib/llvm-project/llvm/lib/Passes/StandardInstrumentations.cpp b/contrib/llvm-project/llvm/lib/Passes/StandardInstrumentations.cpp
index baea0eb53ef9..a0c63fb33369 100644
--- a/contrib/llvm-project/llvm/lib/Passes/StandardInstrumentations.cpp
+++ b/contrib/llvm-project/llvm/lib/Passes/StandardInstrumentations.cpp
@@ -53,64 +53,6 @@ cl::opt<bool> PreservedCFGCheckerInstrumentation::VerifyPreservedCFG(
#endif
);
-// An option that prints out the IR after passes, similar to
-// -print-after-all except that it only prints the IR after passes that
-// change the IR. Those passes that do not make changes to the IR are
-// reported as not making any changes. In addition, the initial IR is
-// also reported. Other hidden options affect the output from this
-// option. -filter-passes will limit the output to the named passes
-// that actually change the IR and other passes are reported as filtered out.
-// The specified passes will either be reported as making no changes (with
-// no IR reported) or the changed IR will be reported. Also, the
-// -filter-print-funcs and -print-module-scope options will do similar
-// filtering based on function name, reporting changed IRs as functions(or
-// modules if -print-module-scope is specified) for a particular function
-// or indicating that the IR has been filtered out. The extra options
-// can be combined, allowing only changed IRs for certain passes on certain
-// functions to be reported in different formats, with the rest being
-// reported as filtered out. The -print-before-changed option will print
-// the IR as it was before each pass that changed it. The optional
-// value of quiet will only report when the IR changes, suppressing
-// all other messages, including the initial IR. The values "diff" and
-// "diff-quiet" will present the changes in a form similar to a patch, in
-// either verbose or quiet mode, respectively. The lines that are removed
-// and added are prefixed with '-' and '+', respectively. The
-// -filter-print-funcs and -filter-passes can be used to filter the output.
-// This reporter relies on the linux diff utility to do comparisons and
-// insert the prefixes. For systems that do not have the necessary
-// facilities, the error message will be shown in place of the expected output.
-//
-enum class ChangePrinter {
- None,
- Verbose,
- Quiet,
- DiffVerbose,
- DiffQuiet,
- ColourDiffVerbose,
- ColourDiffQuiet,
- DotCfgVerbose,
- DotCfgQuiet,
-};
-static cl::opt<ChangePrinter> PrintChanged(
- "print-changed", cl::desc("Print changed IRs"), cl::Hidden,
- cl::ValueOptional, cl::init(ChangePrinter::None),
- cl::values(
- clEnumValN(ChangePrinter::Quiet, "quiet", "Run in quiet mode"),
- clEnumValN(ChangePrinter::DiffVerbose, "diff",
- "Display patch-like changes"),
- clEnumValN(ChangePrinter::DiffQuiet, "diff-quiet",
- "Display patch-like changes in quiet mode"),
- clEnumValN(ChangePrinter::ColourDiffVerbose, "cdiff",
- "Display patch-like changes with color"),
- clEnumValN(ChangePrinter::ColourDiffQuiet, "cdiff-quiet",
- "Display patch-like changes in quiet mode with color"),
- clEnumValN(ChangePrinter::DotCfgVerbose, "dot-cfg",
- "Create a website with graphical changes"),
- clEnumValN(ChangePrinter::DotCfgQuiet, "dot-cfg-quiet",
- "Create a website with graphical changes in quiet mode"),
- // Sentinel value for unspecified option.
- clEnumValN(ChangePrinter::Verbose, "", "")));
-
// An option that supports the -print-changed option. See
// the description for -print-changed for an explanation of the use
// of this option. Note that this option has no effect without -print-changed.
diff --git a/contrib/llvm-project/llvm/lib/Support/ARMAttributeParser.cpp b/contrib/llvm-project/llvm/lib/Support/ARMAttributeParser.cpp
index adb5d3f0964d..03c0c7aac423 100644
--- a/contrib/llvm-project/llvm/lib/Support/ARMAttributeParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/ARMAttributeParser.cpp
@@ -85,7 +85,7 @@ Error ARMAttributeParser::CPU_arch(AttrType tag) {
static const char *strings[] = {
"Pre-v4", "ARM v4", "ARM v4T", "ARM v5T", "ARM v5TE", "ARM v5TEJ", "ARM v6",
"ARM v6KZ", "ARM v6T2", "ARM v6K", "ARM v7", "ARM v6-M", "ARM v6S-M",
- "ARM v7E-M", "ARM v8", nullptr,
+ "ARM v7E-M", "ARM v8-A", "ARM v8-R",
"ARM v8-M Baseline", "ARM v8-M Mainline", nullptr, nullptr, nullptr,
"ARM v8.1-M Mainline", "ARM v9-A"
};
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index e070ce2efa6b..72f0fc94940c 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -255,6 +255,12 @@ static bool isZeroingInactiveLanes(SDValue Op) {
return false;
case Intrinsic::aarch64_sve_ptrue:
case Intrinsic::aarch64_sve_pnext:
+ case Intrinsic::aarch64_sve_cmpeq:
+ case Intrinsic::aarch64_sve_cmpne:
+ case Intrinsic::aarch64_sve_cmpge:
+ case Intrinsic::aarch64_sve_cmpgt:
+ case Intrinsic::aarch64_sve_cmphs:
+ case Intrinsic::aarch64_sve_cmphi:
case Intrinsic::aarch64_sve_cmpeq_wide:
case Intrinsic::aarch64_sve_cmpne_wide:
case Intrinsic::aarch64_sve_cmpge_wide:
@@ -265,6 +271,11 @@ static bool isZeroingInactiveLanes(SDValue Op) {
case Intrinsic::aarch64_sve_cmphi_wide:
case Intrinsic::aarch64_sve_cmplo_wide:
case Intrinsic::aarch64_sve_cmpls_wide:
+ case Intrinsic::aarch64_sve_fcmpeq:
+ case Intrinsic::aarch64_sve_fcmpne:
+ case Intrinsic::aarch64_sve_fcmpge:
+ case Intrinsic::aarch64_sve_fcmpgt:
+ case Intrinsic::aarch64_sve_fcmpuo:
return true;
}
}
@@ -879,6 +890,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
if (Subtarget->supportsAddressTopByteIgnored())
setTargetDAGCombine(ISD::LOAD);
+ setTargetDAGCombine(ISD::MSTORE);
+
setTargetDAGCombine(ISD::MUL);
setTargetDAGCombine({ISD::SELECT, ISD::VSELECT});
@@ -974,6 +987,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(Op, VT, Custom);
if (Subtarget->hasFullFP16()) {
+ setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
+
setOperationAction(ISD::SINT_TO_FP, MVT::v8i8, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Custom);
@@ -1619,6 +1634,7 @@ void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
setOperationAction(ISD::ANY_EXTEND, VT, Custom);
setOperationAction(ISD::BITCAST, VT, Custom);
setOperationAction(ISD::BITREVERSE, VT, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::BSWAP, VT, Custom);
setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
setOperationAction(ISD::CTLZ, VT, Custom);
@@ -11126,6 +11142,20 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
+ if (useSVEForFixedLengthVectorVT(VT)) {
+ if (auto SeqInfo = cast<BuildVectorSDNode>(Op)->isConstantSequence()) {
+ SDLoc DL(Op);
+ EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
+ SDValue Start = DAG.getConstant(SeqInfo->first, DL, ContainerVT);
+ SDValue Steps = DAG.getStepVector(DL, ContainerVT, SeqInfo->second);
+ SDValue Seq = DAG.getNode(ISD::ADD, DL, ContainerVT, Start, Steps);
+ return convertFromScalableVector(DAG, Op.getValueType(), Seq);
+ }
+
+ // Revert to common legalisation for all other variants.
+ return SDValue();
+ }
+
// Try to build a simple constant vector.
Op = NormalizeBuildVector(Op, DAG);
if (VT.isInteger()) {
@@ -12772,6 +12802,12 @@ bool AArch64TargetLowering::shouldSinkOperands(
if (isSplatShuffle(II->getOperand(1)))
Ops.push_back(&II->getOperandUse(1));
return !Ops.empty();
+ case Intrinsic::aarch64_sve_ptest_first:
+ case Intrinsic::aarch64_sve_ptest_last:
+ if (auto *IIOp = dyn_cast<IntrinsicInst>(II->getOperand(0)))
+ if (IIOp->getIntrinsicID() == Intrinsic::aarch64_sve_ptrue)
+ Ops.push_back(&II->getOperandUse(0));
+ return !Ops.empty();
case Intrinsic::aarch64_sme_write_horiz:
case Intrinsic::aarch64_sme_write_vert:
case Intrinsic::aarch64_sme_writeq_horiz:
@@ -17142,7 +17178,8 @@ static SDValue performSpliceCombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
-static SDValue performUnpackCombine(SDNode *N, SelectionDAG &DAG) {
+static SDValue performUnpackCombine(SDNode *N, SelectionDAG &DAG,
+ const AArch64Subtarget *Subtarget) {
assert((N->getOpcode() == AArch64ISD::UUNPKHI ||
N->getOpcode() == AArch64ISD::UUNPKLO) &&
"Unexpected Opcode!");
@@ -17151,6 +17188,42 @@ static SDValue performUnpackCombine(SDNode *N, SelectionDAG &DAG) {
if (N->getOperand(0).isUndef())
return DAG.getUNDEF(N->getValueType(0));
+ // If this is a masked load followed by an UUNPKLO, fold this into a masked
+ // extending load. We can do this even if this is already a masked
+ // {z,}extload.
+ if (N->getOperand(0).getOpcode() == ISD::MLOAD &&
+ N->getOpcode() == AArch64ISD::UUNPKLO) {
+ MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N->getOperand(0));
+ SDValue Mask = MLD->getMask();
+ SDLoc DL(N);
+
+ if (MLD->isUnindexed() && MLD->getExtensionType() != ISD::SEXTLOAD &&
+ SDValue(MLD, 0).hasOneUse() && Mask->getOpcode() == AArch64ISD::PTRUE &&
+ (MLD->getPassThru()->isUndef() ||
+ isZerosVector(MLD->getPassThru().getNode()))) {
+ unsigned MinSVESize = Subtarget->getMinSVEVectorSizeInBits();
+ unsigned PgPattern = Mask->getConstantOperandVal(0);
+ EVT VT = N->getValueType(0);
+
+ // Ensure we can double the size of the predicate pattern
+ unsigned NumElts = getNumElementsFromSVEPredPattern(PgPattern);
+ if (NumElts &&
+ NumElts * VT.getVectorElementType().getSizeInBits() <= MinSVESize) {
+ Mask =
+ getPTrue(DAG, DL, VT.changeVectorElementType(MVT::i1), PgPattern);
+ SDValue PassThru = DAG.getConstant(0, DL, VT);
+ SDValue NewLoad = DAG.getMaskedLoad(
+ VT, DL, MLD->getChain(), MLD->getBasePtr(), MLD->getOffset(), Mask,
+ PassThru, MLD->getMemoryVT(), MLD->getMemOperand(),
+ MLD->getAddressingMode(), ISD::ZEXTLOAD);
+
+ DAG.ReplaceAllUsesOfValueWith(SDValue(MLD, 1), NewLoad.getValue(1));
+
+ return NewLoad;
+ }
+ }
+ }
+
return SDValue();
}
@@ -17484,6 +17557,50 @@ static SDValue performSTORECombine(SDNode *N,
return SDValue();
}
+static SDValue performMSTORECombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ SelectionDAG &DAG,
+ const AArch64Subtarget *Subtarget) {
+ MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
+ SDValue Value = MST->getValue();
+ SDValue Mask = MST->getMask();
+ SDLoc DL(N);
+
+ // If this is a UZP1 followed by a masked store, fold this into a masked
+ // truncating store. We can do this even if this is already a masked
+ // truncstore.
+ if (Value.getOpcode() == AArch64ISD::UZP1 && Value->hasOneUse() &&
+ MST->isUnindexed() && Mask->getOpcode() == AArch64ISD::PTRUE &&
+ Value.getValueType().isInteger()) {
+ Value = Value.getOperand(0);
+ if (Value.getOpcode() == ISD::BITCAST) {
+ EVT HalfVT =
+ Value.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
+ EVT InVT = Value.getOperand(0).getValueType();
+
+ if (HalfVT.widenIntegerVectorElementType(*DAG.getContext()) == InVT) {
+ unsigned MinSVESize = Subtarget->getMinSVEVectorSizeInBits();
+ unsigned PgPattern = Mask->getConstantOperandVal(0);
+
+ // Ensure we can double the size of the predicate pattern
+ unsigned NumElts = getNumElementsFromSVEPredPattern(PgPattern);
+ if (NumElts && NumElts * InVT.getVectorElementType().getSizeInBits() <=
+ MinSVESize) {
+ Mask = getPTrue(DAG, DL, InVT.changeVectorElementType(MVT::i1),
+ PgPattern);
+ return DAG.getMaskedStore(MST->getChain(), DL, Value.getOperand(0),
+ MST->getBasePtr(), MST->getOffset(), Mask,
+ MST->getMemoryVT(), MST->getMemOperand(),
+ MST->getAddressingMode(),
+ /*IsTruncating=*/true);
+ }
+ }
+ }
+ }
+
+ return SDValue();
+}
+
/// \return true if part of the index was folded into the Base.
static bool foldIndexIntoBase(SDValue &BasePtr, SDValue &Index, SDValue Scale,
SDLoc DL, SelectionDAG &DAG) {
@@ -18191,7 +18308,9 @@ static SDValue tryToWidenSetCCOperands(SDNode *Op, SelectionDAG &DAG) {
Op0ExtV, Op1ExtV, Op->getOperand(2));
}
-static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG) {
+static SDValue performSETCCCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ SelectionDAG &DAG) {
assert(N->getOpcode() == ISD::SETCC && "Unexpected opcode!");
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
@@ -18234,6 +18353,21 @@ static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG) {
}
}
+ // setcc (iN (bitcast (vNi1 X))), 0, (eq|ne)
+ // ==> setcc (iN (zext (i1 (vecreduce_or (vNi1 X))))), 0, (eq|ne)
+ if (DCI.isBeforeLegalize() && VT.isScalarInteger() &&
+ (Cond == ISD::SETEQ || Cond == ISD::SETNE) && isNullConstant(RHS) &&
+ LHS->getOpcode() == ISD::BITCAST) {
+ EVT ToVT = LHS->getValueType(0);
+ EVT FromVT = LHS->getOperand(0).getValueType();
+ if (FromVT.isFixedLengthVector() &&
+ FromVT.getVectorElementType() == MVT::i1) {
+ LHS = DAG.getNode(ISD::VECREDUCE_OR, DL, MVT::i1, LHS->getOperand(0));
+ LHS = DAG.getNode(ISD::ZERO_EXTEND, DL, ToVT, LHS);
+ return DAG.getSetCC(DL, VT, LHS, RHS, Cond);
+ }
+ }
+
return SDValue();
}
@@ -19376,13 +19510,15 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
case ISD::VSELECT:
return performVSelectCombine(N, DCI.DAG);
case ISD::SETCC:
- return performSETCCCombine(N, DAG);
+ return performSETCCCombine(N, DCI, DAG);
case ISD::LOAD:
if (performTBISimplification(N->getOperand(1), DCI, DAG))
return SDValue(N, 0);
break;
case ISD::STORE:
return performSTORECombine(N, DCI, DAG, Subtarget);
+ case ISD::MSTORE:
+ return performMSTORECombine(N, DCI, DAG, Subtarget);
case ISD::MGATHER:
case ISD::MSCATTER:
return performMaskedGatherScatterCombine(N, DCI, DAG);
@@ -19407,7 +19543,7 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
return performSpliceCombine(N, DAG);
case AArch64ISD::UUNPKLO:
case AArch64ISD::UUNPKHI:
- return performUnpackCombine(N, DAG);
+ return performUnpackCombine(N, DAG, Subtarget);
case AArch64ISD::UZP1:
return performUzpCombine(N, DAG);
case AArch64ISD::SETCC_MERGE_ZERO:
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index a7b7e5270888..926e7305bab9 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -4052,6 +4052,12 @@ def : InstAlias<"fmov $Rd, #0.0", (FMOVWHr FPR16:$Rd, WZR), 0>,
def : InstAlias<"fmov $Rd, #0.0", (FMOVWSr FPR32:$Rd, WZR), 0>;
def : InstAlias<"fmov $Rd, #0.0", (FMOVXDr FPR64:$Rd, XZR), 0>;
+// Pattern for FP16 immediates
+let Predicates = [HasFullFP16] in {
+ def : Pat<(f16 fpimm:$in),
+ (FMOVWHr (MOVi32imm (bitcast_fpimm_to_i32 f16:$in)))>;
+}
+
//===----------------------------------------------------------------------===//
// Floating point conversion instruction.
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 4032c4667bc7..9b040860cc3c 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -287,6 +287,8 @@ def AArch64fadda_p_node : SDNode<"AArch64ISD::FADDA_PRED", SDT_AArch64ReduceWith
def AArch64fadda_p : PatFrags<(ops node:$op1, node:$op2, node:$op3),
[(AArch64fadda_p_node node:$op1, node:$op2, node:$op3),
(AArch64fadda_p_node (SVEAllActive), node:$op2,
+ (vselect node:$op1, node:$op3, (splat_vector (f16 fpimm_minus0)))),
+ (AArch64fadda_p_node (SVEAllActive), node:$op2,
(vselect node:$op1, node:$op3, (splat_vector (f32 fpimm_minus0)))),
(AArch64fadda_p_node (SVEAllActive), node:$op2,
(vselect node:$op1, node:$op3, (splat_vector (f64 fpimm_minus0))))]>;
@@ -337,6 +339,22 @@ def AArch64bic : PatFrags<(ops node:$op1, node:$op2),
def AArch64subr : PatFrag<(ops node:$op1, node:$op2),
(sub node:$op2, node:$op1)>;
+def AArch64add_m1 : PatFrags<(ops node:$pred, node:$op1, node:$op2),
+ [(int_aarch64_sve_add node:$pred, node:$op1, node:$op2),
+ (add node:$op1, (vselect node:$pred, node:$op2, (SVEDup0)))]>;
+def AArch64sub_m1 : PatFrags<(ops node:$pred, node:$op1, node:$op2),
+ [(int_aarch64_sve_sub node:$pred, node:$op1, node:$op2),
+ (sub node:$op1, (vselect node:$pred, node:$op2, (SVEDup0)))]>;
+def AArch64mla_m1 : PatFrags<(ops node:$pred, node:$op1, node:$op2, node:$op3),
+ [(int_aarch64_sve_mla node:$pred, node:$op1, node:$op2, node:$op3),
+ (add node:$op1, (AArch64mul_p_oneuse node:$pred, node:$op2, node:$op3)),
+ // add(a, select(mask, mul(b, c), splat(0))) -> mla(a, mask, b, c)
+ (add node:$op1, (vselect node:$pred, (AArch64mul_p_oneuse (SVEAllActive), node:$op2, node:$op3), (SVEDup0)))]>;
+def AArch64mls_m1 : PatFrags<(ops node:$pred, node:$op1, node:$op2, node:$op3),
+ [(int_aarch64_sve_mls node:$pred, node:$op1, node:$op2, node:$op3),
+ (sub node:$op1, (AArch64mul_p_oneuse node:$pred, node:$op2, node:$op3)),
+ // sub(a, select(mask, mul(b, c), splat(0))) -> mls(a, mask, b, c)
+ (sub node:$op1, (vselect node:$pred, (AArch64mul_p_oneuse (SVEAllActive), node:$op2, node:$op3), (SVEDup0)))]>;
let Predicates = [HasSVE] in {
defm RDFFR_PPz : sve_int_rdffr_pred<0b0, "rdffr", int_aarch64_sve_rdffr_z>;
@@ -359,8 +377,8 @@ let Predicates = [HasSVEorSME] in {
defm EOR_ZZZ : sve_int_bin_cons_log<0b10, "eor", xor>;
defm BIC_ZZZ : sve_int_bin_cons_log<0b11, "bic", AArch64bic>;
- defm ADD_ZPmZ : sve_int_bin_pred_arit_0<0b000, "add", "ADD_ZPZZ", int_aarch64_sve_add, DestructiveBinaryComm>;
- defm SUB_ZPmZ : sve_int_bin_pred_arit_0<0b001, "sub", "SUB_ZPZZ", int_aarch64_sve_sub, DestructiveBinaryCommWithRev, "SUBR_ZPmZ">;
+ defm ADD_ZPmZ : sve_int_bin_pred_arit_0<0b000, "add", "ADD_ZPZZ", AArch64add_m1, DestructiveBinaryComm>;
+ defm SUB_ZPmZ : sve_int_bin_pred_arit_0<0b001, "sub", "SUB_ZPZZ", AArch64sub_m1, DestructiveBinaryCommWithRev, "SUBR_ZPmZ">;
defm SUBR_ZPmZ : sve_int_bin_pred_arit_0<0b011, "subr", "SUBR_ZPZZ", int_aarch64_sve_subr, DestructiveBinaryCommWithRev, "SUB_ZPmZ", /*isReverseInstr*/ 1>;
defm ORR_ZPmZ : sve_int_bin_pred_log<0b000, "orr", "ORR_ZPZZ", int_aarch64_sve_orr, DestructiveBinaryComm>;
@@ -391,8 +409,8 @@ let Predicates = [HasSVEorSME] in {
defm MAD_ZPmZZ : sve_int_mladdsub_vvv_pred<0b0, "mad", int_aarch64_sve_mad>;
defm MSB_ZPmZZ : sve_int_mladdsub_vvv_pred<0b1, "msb", int_aarch64_sve_msb>;
- defm MLA_ZPmZZ : sve_int_mlas_vvv_pred<0b0, "mla", int_aarch64_sve_mla, add, AArch64mul_p_oneuse>;
- defm MLS_ZPmZZ : sve_int_mlas_vvv_pred<0b1, "mls", int_aarch64_sve_mls, sub, AArch64mul_p_oneuse>;
+ defm MLA_ZPmZZ : sve_int_mlas_vvv_pred<0b0, "mla", AArch64mla_m1>;
+ defm MLS_ZPmZZ : sve_int_mlas_vvv_pred<0b1, "mls", AArch64mls_m1>;
// SVE predicated integer reductions.
defm SADDV_VPZ : sve_int_reduce_0_saddv<0b000, "saddv", AArch64saddv_p>;
@@ -712,6 +730,12 @@ let Predicates = [HasSVEorSME] in {
(DUP_ZI_D $a, $b)>;
// Duplicate immediate FP into all vector elements.
+ def : Pat<(nxv2f16 (splat_vector (f16 fpimm:$val))),
+ (DUP_ZR_H (MOVi32imm (bitcast_fpimm_to_i32 f16:$val)))>;
+ def : Pat<(nxv4f16 (splat_vector (f16 fpimm:$val))),
+ (DUP_ZR_H (MOVi32imm (bitcast_fpimm_to_i32 f16:$val)))>;
+ def : Pat<(nxv8f16 (splat_vector (f16 fpimm:$val))),
+ (DUP_ZR_H (MOVi32imm (bitcast_fpimm_to_i32 f16:$val)))>;
def : Pat<(nxv2f32 (splat_vector (f32 fpimm:$val))),
(DUP_ZR_S (MOVi32imm (bitcast_fpimm_to_i32 f32:$val)))>;
def : Pat<(nxv4f32 (splat_vector (f32 fpimm:$val))),
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
index 1b65589416c3..2f20232e452d 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
@@ -350,6 +350,14 @@ DecodeStatus AArch64Disassembler::getInstruction(MCInst &MI, uint64_t &Size,
return MCDisassembler::Fail;
}
+uint64_t AArch64Disassembler::suggestBytesToSkip(ArrayRef<uint8_t> Bytes,
+ uint64_t Address) const {
+ // AArch64 instructions are always 4 bytes wide, so there's no point
+ // in skipping any smaller number of bytes if an instruction can't
+ // be decoded.
+ return 4;
+}
+
static MCSymbolizer *
createAArch64ExternalSymbolizer(const Triple &TT, LLVMOpInfoCallback GetOpInfo,
LLVMSymbolLookupCallback SymbolLookUp,
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.h b/contrib/llvm-project/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.h
index 6761d449a7f4..b9f78546b89b 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.h
@@ -30,6 +30,9 @@ public:
MCDisassembler::DecodeStatus
getInstruction(MCInst &Instr, uint64_t &Size, ArrayRef<uint8_t> Bytes,
uint64_t Address, raw_ostream &CStream) const override;
+
+ uint64_t suggestBytesToSkip(ArrayRef<uint8_t> Bytes,
+ uint64_t Address) const override;
};
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp
index 04bc91318da8..d655caa80ba8 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp
@@ -66,8 +66,8 @@ public:
report_fatal_error("Invalid rule identifier");
}
- virtual bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
- MachineIRBuilder &B) const override;
+ bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
+ MachineIRBuilder &B) const override;
};
bool AArch64O0PreLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
index ba206bac68d1..dfb531cda7e9 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
@@ -355,8 +355,8 @@ public:
report_fatal_error("Invalid rule identifier");
}
- virtual bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
- MachineIRBuilder &B) const override;
+ bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
+ MachineIRBuilder &B) const override;
};
bool AArch64PostLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
index d7959a82c484..eab1de94e9c8 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
@@ -997,8 +997,8 @@ public:
report_fatal_error("Invalid rule identifier");
}
- virtual bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
- MachineIRBuilder &B) const override;
+ bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
+ MachineIRBuilder &B) const override;
};
bool AArch64PostLegalizerLoweringInfo::combine(GISelChangeObserver &Observer,
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
index 275949c5ee64..50bae68b4d33 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
@@ -370,8 +370,8 @@ public:
report_fatal_error("Invalid rule identifier");
}
- virtual bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
- MachineIRBuilder &B) const override;
+ bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
+ MachineIRBuilder &B) const override;
};
bool AArch64PreLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/SVEInstrFormats.td b/contrib/llvm-project/llvm/lib/Target/AArch64/SVEInstrFormats.td
index 7cdd4c4af95e..36daecf634d7 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -2958,8 +2958,7 @@ class sve_int_mlas_vvv_pred<bits<2> sz8_64, bits<1> opc, string asm,
let ElementSize = zprty.ElementSize;
}
-multiclass sve_int_mlas_vvv_pred<bits<1> opc, string asm, SDPatternOperator op,
- SDPatternOperator outerop, SDPatternOperator mulop> {
+multiclass sve_int_mlas_vvv_pred<bits<1> opc, string asm, SDPatternOperator op> {
def _B : sve_int_mlas_vvv_pred<0b00, opc, asm, ZPR8>;
def _H : sve_int_mlas_vvv_pred<0b01, opc, asm, ZPR16>;
def _S : sve_int_mlas_vvv_pred<0b10, opc, asm, ZPR32>;
@@ -2969,15 +2968,6 @@ multiclass sve_int_mlas_vvv_pred<bits<1> opc, string asm, SDPatternOperator op,
def : SVE_4_Op_Pat<nxv8i16, op, nxv8i1, nxv8i16, nxv8i16, nxv8i16, !cast<Instruction>(NAME # _H)>;
def : SVE_4_Op_Pat<nxv4i32, op, nxv4i1, nxv4i32, nxv4i32, nxv4i32, !cast<Instruction>(NAME # _S)>;
def : SVE_4_Op_Pat<nxv2i64, op, nxv2i1, nxv2i64, nxv2i64, nxv2i64, !cast<Instruction>(NAME # _D)>;
-
- def : Pat<(outerop nxv16i8:$Op1, (mulop nxv16i1:$pred, nxv16i8:$Op2, nxv16i8:$Op3)),
- (!cast<Instruction>(NAME # _B) $pred, $Op1, $Op2, $Op3)>;
- def : Pat<(outerop nxv8i16:$Op1, (mulop nxv8i1:$pred, nxv8i16:$Op2, nxv8i16:$Op3)),
- (!cast<Instruction>(NAME # _H) $pred, $Op1, $Op2, $Op3)>;
- def : Pat<(outerop nxv4i32:$Op1, (mulop nxv4i1:$pred, nxv4i32:$Op2, nxv4i32:$Op3)),
- (!cast<Instruction>(NAME # _S) $pred, $Op1, $Op2, $Op3)>;
- def : Pat<(outerop nxv2i64:$Op1, (mulop nxv2i1:$pred, nxv2i64:$Op2, nxv2i64:$Op3)),
- (!cast<Instruction>(NAME # _D) $pred, $Op1, $Op2, $Op3)>;
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
index b4a8766d682e..56a9a30bc59a 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
@@ -29,6 +29,8 @@
#include "AMDGPU.h"
#include "Utils/AMDGPUBaseInfo.h"
#include "Utils/AMDGPUMemoryUtils.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/IR/Constants.h"
@@ -43,6 +45,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/OptimizedStructLayout.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
+#include <tuple>
#include <vector>
#define DEBUG_TYPE "amdgpu-lower-module-lds"
@@ -97,6 +100,9 @@ class AMDGPULowerModuleLDS : public ModulePass {
static void
removeFromUsedLists(Module &M,
const std::vector<GlobalVariable *> &LocalVars) {
+ // The verifier rejects used lists containing an inttoptr of a constant
+ // so remove the variables from these lists before replaceAllUsesWith
+
SmallPtrSet<Constant *, 32> LocalVarsSet;
for (GlobalVariable *LocalVar : LocalVars)
if (Constant *C = dyn_cast<Constant>(LocalVar->stripPointerCasts()))
@@ -146,12 +152,59 @@ public:
}
bool runOnModule(Module &M) override {
+ LLVMContext &Ctx = M.getContext();
CallGraph CG = CallGraph(M);
bool Changed = superAlignLDSGlobals(M);
+
+ // Move variables used by functions into amdgcn.module.lds
std::vector<GlobalVariable *> ModuleScopeVariables =
AMDGPU::findVariablesToLower(M, nullptr);
- Changed |= processUsedLDS(CG, M, ModuleScopeVariables);
+ if (!ModuleScopeVariables.empty()) {
+ std::string VarName = "llvm.amdgcn.module.lds";
+
+ GlobalVariable *SGV;
+ DenseMap<GlobalVariable *, Constant *> LDSVarsToConstantGEP;
+ std::tie(SGV, LDSVarsToConstantGEP) =
+ createLDSVariableReplacement(M, VarName, ModuleScopeVariables);
+
+ appendToCompilerUsed(
+ M, {static_cast<GlobalValue *>(
+ ConstantExpr::getPointerBitCastOrAddrSpaceCast(
+ cast<Constant>(SGV), Type::getInt8PtrTy(Ctx)))});
+
+ removeFromUsedLists(M, ModuleScopeVariables);
+ replaceLDSVariablesWithStruct(M, ModuleScopeVariables, SGV,
+ LDSVarsToConstantGEP,
+ [](Use &) { return true; });
+
+ // This ensures the variable is allocated when called functions access it.
+ // It also lets other passes, specifically PromoteAlloca, accurately
+ // calculate how much LDS will be used by the kernel after lowering.
+ IRBuilder<> Builder(Ctx);
+ for (Function &Func : M.functions()) {
+ if (!Func.isDeclaration() && AMDGPU::isKernelCC(&Func)) {
+ const CallGraphNode *N = CG[&Func];
+ const bool CalleesRequireModuleLDS = N->size() > 0;
+
+ if (CalleesRequireModuleLDS) {
+ // If a function this kernel might call requires module LDS,
+ // annotate the kernel to let later passes know it will allocate
+ // this structure, even if not apparent from the IR.
+ markUsedByKernel(Builder, &Func, SGV);
+ } else {
+ // However if we are certain this kernel cannot call a function that
+ // requires module LDS, annotate the kernel so the backend can elide
+ // the allocation without repeating callgraph walks.
+ Func.addFnAttr("amdgpu-elide-module-lds");
+ }
+ }
+ }
+
+ Changed = true;
+ }
+
+ // Move variables used by kernels into per-kernel instances
for (Function &F : M.functions()) {
if (F.isDeclaration())
continue;
@@ -159,9 +212,37 @@ public:
// Only lower compute kernels' LDS.
if (!AMDGPU::isKernel(F.getCallingConv()))
continue;
+
std::vector<GlobalVariable *> KernelUsedVariables =
AMDGPU::findVariablesToLower(M, &F);
- Changed |= processUsedLDS(CG, M, KernelUsedVariables, &F);
+
+ // Replace all constant uses with instructions if they belong to the
+ // current kernel. Unnecessary, removing will cause test churn.
+ for (size_t I = 0; I < KernelUsedVariables.size(); I++) {
+ GlobalVariable *GV = KernelUsedVariables[I];
+ for (User *U : make_early_inc_range(GV->users())) {
+ if (ConstantExpr *C = dyn_cast<ConstantExpr>(U))
+ AMDGPU::replaceConstantUsesInFunction(C, &F);
+ }
+ GV->removeDeadConstantUsers();
+ }
+
+ if (!KernelUsedVariables.empty()) {
+ std::string VarName =
+ (Twine("llvm.amdgcn.kernel.") + F.getName() + ".lds").str();
+ GlobalVariable *SGV;
+ DenseMap<GlobalVariable *, Constant *> LDSVarsToConstantGEP;
+ std::tie(SGV, LDSVarsToConstantGEP) =
+ createLDSVariableReplacement(M, VarName, KernelUsedVariables);
+
+ removeFromUsedLists(M, KernelUsedVariables);
+ replaceLDSVariablesWithStruct(
+ M, KernelUsedVariables, SGV, LDSVarsToConstantGEP, [&F](Use &U) {
+ Instruction *I = dyn_cast<Instruction>(U.getUser());
+ return I && I->getFunction() == &F;
+ });
+ Changed = true;
+ }
}
return Changed;
@@ -212,16 +293,18 @@ private:
return Changed;
}
- bool processUsedLDS(CallGraph const &CG, Module &M,
- std::vector<GlobalVariable *> const &LDSVarsToTransform,
- Function *F = nullptr) {
+ std::tuple<GlobalVariable *, DenseMap<GlobalVariable *, Constant *>>
+ createLDSVariableReplacement(
+ Module &M, std::string VarName,
+ std::vector<GlobalVariable *> const &LDSVarsToTransform) {
+ // Create a struct instance containing LDSVarsToTransform and map from those
+ // variables to ConstantExprGEP
+ // Variables may be introduced to meet alignment requirements. No aliasing
+ // metadata is useful for these as they have no uses. Erased before return.
+
LLVMContext &Ctx = M.getContext();
const DataLayout &DL = M.getDataLayout();
-
- if (LDSVarsToTransform.empty()) {
- // No variables to rewrite, no changes made.
- return false;
- }
+ assert(!LDSVarsToTransform.empty());
SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
LayoutFields.reserve(LDSVarsToTransform.size());
@@ -234,9 +317,10 @@ private:
performOptimizedStructLayout(LayoutFields);
std::vector<GlobalVariable *> LocalVars;
+ BitVector IsPaddingField;
LocalVars.reserve(LDSVarsToTransform.size()); // will be at least this large
+ IsPaddingField.reserve(LDSVarsToTransform.size());
{
- // This usually won't need to insert any padding, perhaps avoid the alloc
uint64_t CurrentOffset = 0;
for (size_t I = 0; I < LayoutFields.size(); I++) {
GlobalVariable *FGV = static_cast<GlobalVariable *>(
@@ -256,10 +340,12 @@ private:
M, ATy, false, GlobalValue::InternalLinkage, UndefValue::get(ATy),
"", nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
false));
+ IsPaddingField.push_back(true);
CurrentOffset += Padding;
}
LocalVars.push_back(FGV);
+ IsPaddingField.push_back(false);
CurrentOffset += LayoutFields[I].Size;
}
}
@@ -270,9 +356,6 @@ private:
LocalVars.cbegin(), LocalVars.cend(), std::back_inserter(LocalVarTypes),
[](const GlobalVariable *V) -> Type * { return V->getValueType(); });
- std::string VarName(
- F ? (Twine("llvm.amdgcn.kernel.") + F->getName() + ".lds").str()
- : "llvm.amdgcn.module.lds");
StructType *LDSTy = StructType::create(Ctx, LocalVarTypes, VarName + ".t");
Align StructAlign =
@@ -283,62 +366,65 @@ private:
VarName, nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
false);
SGV->setAlignment(StructAlign);
- if (!F) {
- appendToCompilerUsed(
- M, {static_cast<GlobalValue *>(
- ConstantExpr::getPointerBitCastOrAddrSpaceCast(
- cast<Constant>(SGV), Type::getInt8PtrTy(Ctx)))});
+
+ DenseMap<GlobalVariable *, Constant *> Map;
+ Type *I32 = Type::getInt32Ty(Ctx);
+ for (size_t I = 0; I < LocalVars.size(); I++) {
+ GlobalVariable *GV = LocalVars[I];
+ Constant *GEPIdx[] = {ConstantInt::get(I32, 0), ConstantInt::get(I32, I)};
+ Constant *GEP = ConstantExpr::getGetElementPtr(LDSTy, SGV, GEPIdx, true);
+ if (IsPaddingField[I]) {
+ assert(GV->use_empty());
+ GV->eraseFromParent();
+ } else {
+ Map[GV] = GEP;
+ }
}
+ assert(Map.size() == LDSVarsToTransform.size());
+ return {SGV, std::move(Map)};
+ }
- // The verifier rejects used lists containing an inttoptr of a constant
- // so remove the variables from these lists before replaceAllUsesWith
- removeFromUsedLists(M, LocalVars);
+ template <typename PredicateTy>
+ void replaceLDSVariablesWithStruct(
+ Module &M, std::vector<GlobalVariable *> const &LDSVarsToTransform,
+ GlobalVariable *SGV,
+ DenseMap<GlobalVariable *, Constant *> &LDSVarsToConstantGEP,
+ PredicateTy Predicate) {
+ LLVMContext &Ctx = M.getContext();
+ const DataLayout &DL = M.getDataLayout();
// Create alias.scope and their lists. Each field in the new structure
// does not alias with all other fields.
SmallVector<MDNode *> AliasScopes;
SmallVector<Metadata *> NoAliasList;
- if (LocalVars.size() > 1) {
+ const size_t NumberVars = LDSVarsToTransform.size();
+ if (NumberVars > 1) {
MDBuilder MDB(Ctx);
- AliasScopes.reserve(LocalVars.size());
+ AliasScopes.reserve(NumberVars);
MDNode *Domain = MDB.createAnonymousAliasScopeDomain();
- for (size_t I = 0; I < LocalVars.size(); I++) {
+ for (size_t I = 0; I < NumberVars; I++) {
MDNode *Scope = MDB.createAnonymousAliasScope(Domain);
AliasScopes.push_back(Scope);
}
NoAliasList.append(&AliasScopes[1], AliasScopes.end());
}
- // Replace uses of ith variable with a constantexpr to the ith field of the
- // instance that will be allocated by AMDGPUMachineFunction
- Type *I32 = Type::getInt32Ty(Ctx);
- for (size_t I = 0; I < LocalVars.size(); I++) {
- GlobalVariable *GV = LocalVars[I];
- Constant *GEPIdx[] = {ConstantInt::get(I32, 0), ConstantInt::get(I32, I)};
- Constant *GEP = ConstantExpr::getGetElementPtr(LDSTy, SGV, GEPIdx);
- if (F) {
- // Replace all constant uses with instructions if they belong to the
- // current kernel.
- for (User *U : make_early_inc_range(GV->users())) {
- if (ConstantExpr *C = dyn_cast<ConstantExpr>(U))
- AMDGPU::replaceConstantUsesInFunction(C, F);
- }
-
- GV->removeDeadConstantUsers();
+ // Replace uses of ith variable with a constantexpr to the corresponding
+ // field of the instance that will be allocated by AMDGPUMachineFunction
+ for (size_t I = 0; I < NumberVars; I++) {
+ GlobalVariable *GV = LDSVarsToTransform[I];
+ Constant *GEP = LDSVarsToConstantGEP[GV];
- GV->replaceUsesWithIf(GEP, [F](Use &U) {
- Instruction *I = dyn_cast<Instruction>(U.getUser());
- return I && I->getFunction() == F;
- });
- } else {
- GV->replaceAllUsesWith(GEP);
- }
+ GV->replaceUsesWithIf(GEP, Predicate);
if (GV->use_empty()) {
GV->eraseFromParent();
}
- uint64_t Off = DL.getStructLayout(LDSTy)->getElementOffset(I);
- Align A = commonAlignment(StructAlign, Off);
+ APInt APOff(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
+ GEP->stripAndAccumulateInBoundsConstantOffsets(DL, APOff);
+ uint64_t Offset = APOff.getZExtValue();
+
+ Align A = commonAlignment(SGV->getAlign().valueOrOne(), Offset);
if (I)
NoAliasList[I - 1] = AliasScopes[I - 1];
@@ -349,32 +435,6 @@ private:
refineUsesAlignmentAndAA(GEP, A, DL, AliasScope, NoAlias);
}
-
- // This ensures the variable is allocated when called functions access it.
- // It also lets other passes, specifically PromoteAlloca, accurately
- // calculate how much LDS will be used by the kernel after lowering.
- if (!F) {
- IRBuilder<> Builder(Ctx);
- for (Function &Func : M.functions()) {
- if (!Func.isDeclaration() && AMDGPU::isKernelCC(&Func)) {
- const CallGraphNode *N = CG[&Func];
- const bool CalleesRequireModuleLDS = N->size() > 0;
-
- if (CalleesRequireModuleLDS) {
- // If a function this kernel might call requires module LDS,
- // annotate the kernel to let later passes know it will allocate
- // this structure, even if not apparent from the IR.
- markUsedByKernel(Builder, &Func, SGV);
- } else {
- // However if we are certain this kernel cannot call a function that
- // requires module LDS, annotate the kernel so the backend can elide
- // the allocation without repeating callgraph walks.
- Func.addFnAttr("amdgpu-elide-module-lds");
- }
- }
- }
- }
- return true;
}
void refineUsesAlignmentAndAA(Value *Ptr, Align A, const DataLayout &DL,
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUMIRFormatter.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUMIRFormatter.h
index 753f7edc9385..98b5031071cf 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUMIRFormatter.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUMIRFormatter.h
@@ -29,7 +29,7 @@ public:
virtual ~AMDGPUMIRFormatter() = default;
/// Implement target specific parsing of target custom pseudo source value.
- virtual bool
+ bool
parseCustomPseudoSourceValue(StringRef Src, MachineFunction &MF,
PerFunctionMIParsingState &PFS,
const PseudoSourceValue *&PSV,
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp
index bfe2e9b66ed4..98e9907068f2 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp
@@ -191,8 +191,8 @@ public:
report_fatal_error("Invalid rule identifier");
}
- virtual bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
- MachineIRBuilder &B) const override;
+ bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
+ MachineIRBuilder &B) const override;
};
bool AMDGPUPreLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index 04da14cc4916..859deae86f35 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -9,6 +9,18 @@
/// \file
/// This contains a MachineSchedStrategy implementation for maximizing wave
/// occupancy on GCN hardware.
+///
+/// This pass will apply multiple scheduling stages to the same function.
+/// Regions are first recorded in GCNScheduleDAGMILive::schedule. The actual
+/// entry point for the scheduling of those regions is
+/// GCNScheduleDAGMILive::runSchedStages.
+
+/// Generally, the reason for having multiple scheduling stages is to account
+/// for the kernel-wide effect of register usage on occupancy. Usually, only a
+/// few scheduling regions will have register pressure high enough to limit
+/// occupancy for the kernel, so constraints can be relaxed to improve ILP in
+/// other regions.
+///
//===----------------------------------------------------------------------===//
#include "GCNSchedStrategy.h"
@@ -20,9 +32,9 @@
using namespace llvm;
GCNMaxOccupancySchedStrategy::GCNMaxOccupancySchedStrategy(
- const MachineSchedContext *C) :
- GenericScheduler(C), TargetOccupancy(0), HasClusteredNodes(false),
- HasExcessPressure(false), MF(nullptr) { }
+ const MachineSchedContext *C)
+ : GenericScheduler(C), TargetOccupancy(0), MF(nullptr),
+ HasClusteredNodes(false), HasExcessPressure(false) {}
void GCNMaxOccupancySchedStrategy::initialize(ScheduleDAGMI *DAG) {
GenericScheduler::initialize(DAG);
@@ -302,210 +314,30 @@ SUnit *GCNMaxOccupancySchedStrategy::pickNode(bool &IsTopNode) {
return SU;
}
-GCNScheduleDAGMILive::GCNScheduleDAGMILive(MachineSchedContext *C,
- std::unique_ptr<MachineSchedStrategy> S) :
- ScheduleDAGMILive(C, std::move(S)),
- ST(MF.getSubtarget<GCNSubtarget>()),
- MFI(*MF.getInfo<SIMachineFunctionInfo>()),
- StartingOccupancy(MFI.getOccupancy()),
- MinOccupancy(StartingOccupancy), Stage(Collect), RegionIdx(0) {
+GCNScheduleDAGMILive::GCNScheduleDAGMILive(
+ MachineSchedContext *C, std::unique_ptr<MachineSchedStrategy> S)
+ : ScheduleDAGMILive(C, std::move(S)), ST(MF.getSubtarget<GCNSubtarget>()),
+ MFI(*MF.getInfo<SIMachineFunctionInfo>()),
+ StartingOccupancy(MFI.getOccupancy()), MinOccupancy(StartingOccupancy) {
LLVM_DEBUG(dbgs() << "Starting occupancy is " << StartingOccupancy << ".\n");
}
void GCNScheduleDAGMILive::schedule() {
- if (Stage == Collect) {
- // Just record regions at the first pass.
- Regions.push_back(std::make_pair(RegionBegin, RegionEnd));
- return;
- }
-
- std::vector<MachineInstr*> Unsched;
- Unsched.reserve(NumRegionInstrs);
- for (auto &I : *this) {
- Unsched.push_back(&I);
- }
-
- GCNRegPressure PressureBefore;
- if (LIS) {
- PressureBefore = Pressure[RegionIdx];
-
- LLVM_DEBUG(dbgs() << "Pressure before scheduling:\nRegion live-ins:";
- GCNRPTracker::printLiveRegs(dbgs(), LiveIns[RegionIdx], MRI);
- dbgs() << "Region live-in pressure: ";
- llvm::getRegPressure(MRI, LiveIns[RegionIdx]).print(dbgs());
- dbgs() << "Region register pressure: ";
- PressureBefore.print(dbgs()));
- }
-
- GCNMaxOccupancySchedStrategy &S = (GCNMaxOccupancySchedStrategy&)*SchedImpl;
- // Set HasClusteredNodes to true for late stages where we have already
- // collected it. That way pickNode() will not scan SDep's when not needed.
- S.HasClusteredNodes = Stage > InitialSchedule;
- S.HasExcessPressure = false;
- ScheduleDAGMILive::schedule();
- Regions[RegionIdx] = std::make_pair(RegionBegin, RegionEnd);
- RescheduleRegions[RegionIdx] = false;
- if (Stage == InitialSchedule && S.HasClusteredNodes)
- RegionsWithClusters[RegionIdx] = true;
- if (S.HasExcessPressure)
- RegionsWithHighRP[RegionIdx] = true;
-
- if (!LIS)
- return;
-
- // Check the results of scheduling.
- auto PressureAfter = getRealRegPressure();
-
- LLVM_DEBUG(dbgs() << "Pressure after scheduling: ";
- PressureAfter.print(dbgs()));
-
- if (PressureAfter.getSGPRNum() <= S.SGPRCriticalLimit &&
- PressureAfter.getVGPRNum(ST.hasGFX90AInsts()) <= S.VGPRCriticalLimit) {
- Pressure[RegionIdx] = PressureAfter;
- RegionsWithMinOcc[RegionIdx] =
- PressureAfter.getOccupancy(ST) == MinOccupancy;
-
- LLVM_DEBUG(dbgs() << "Pressure in desired limits, done.\n");
- return;
- }
-
- unsigned WavesAfter =
- std::min(S.TargetOccupancy, PressureAfter.getOccupancy(ST));
- unsigned WavesBefore =
- std::min(S.TargetOccupancy, PressureBefore.getOccupancy(ST));
- LLVM_DEBUG(dbgs() << "Occupancy before scheduling: " << WavesBefore
- << ", after " << WavesAfter << ".\n");
-
- // We may not be able to keep the current target occupancy because of the just
- // scheduled region. We might still be able to revert scheduling if the
- // occupancy before was higher, or if the current schedule has register
- // pressure higher than the excess limits which could lead to more spilling.
- unsigned NewOccupancy = std::max(WavesAfter, WavesBefore);
-
- // Allow memory bound functions to drop to 4 waves if not limited by an
- // attribute.
- if (WavesAfter < WavesBefore && WavesAfter < MinOccupancy &&
- WavesAfter >= MFI.getMinAllowedOccupancy()) {
- LLVM_DEBUG(dbgs() << "Function is memory bound, allow occupancy drop up to "
- << MFI.getMinAllowedOccupancy() << " waves\n");
- NewOccupancy = WavesAfter;
- }
-
- if (NewOccupancy < MinOccupancy) {
- MinOccupancy = NewOccupancy;
- MFI.limitOccupancy(MinOccupancy);
- RegionsWithMinOcc.reset();
- LLVM_DEBUG(dbgs() << "Occupancy lowered for the function to "
- << MinOccupancy << ".\n");
- }
-
- unsigned MaxVGPRs = ST.getMaxNumVGPRs(MF);
- unsigned MaxSGPRs = ST.getMaxNumSGPRs(MF);
- if (PressureAfter.getVGPRNum(false) > MaxVGPRs ||
- PressureAfter.getAGPRNum() > MaxVGPRs ||
- PressureAfter.getSGPRNum() > MaxSGPRs) {
- RescheduleRegions[RegionIdx] = true;
- RegionsWithHighRP[RegionIdx] = true;
- }
-
- // If this condition is true, then either the occupancy before and after
- // scheduling is the same, or we are allowing the occupancy to drop because
- // the function is memory bound. Even if we are OK with the current occupancy,
- // we still need to verify that we will not introduce any extra chance of
- // spilling.
- if (WavesAfter >= MinOccupancy) {
- if (Stage == UnclusteredReschedule &&
- !PressureAfter.less(ST, PressureBefore)) {
- LLVM_DEBUG(dbgs() << "Unclustered reschedule did not help.\n");
- } else if (WavesAfter > MFI.getMinWavesPerEU() ||
- PressureAfter.less(ST, PressureBefore) ||
- !RescheduleRegions[RegionIdx]) {
- Pressure[RegionIdx] = PressureAfter;
- RegionsWithMinOcc[RegionIdx] =
- PressureAfter.getOccupancy(ST) == MinOccupancy;
- if (!RegionsWithClusters[RegionIdx] &&
- (Stage + 1) == UnclusteredReschedule)
- RescheduleRegions[RegionIdx] = false;
- return;
- } else {
- LLVM_DEBUG(dbgs() << "New pressure will result in more spilling.\n");
- }
- }
-
- RegionsWithMinOcc[RegionIdx] =
- PressureBefore.getOccupancy(ST) == MinOccupancy;
- LLVM_DEBUG(dbgs() << "Attempting to revert scheduling.\n");
- RescheduleRegions[RegionIdx] = RegionsWithClusters[RegionIdx] ||
- (Stage + 1) != UnclusteredReschedule;
- RegionEnd = RegionBegin;
- int SkippedDebugInstr = 0;
- for (MachineInstr *MI : Unsched) {
- if (MI->isDebugInstr()) {
- ++SkippedDebugInstr;
- continue;
- }
-
- if (MI->getIterator() != RegionEnd) {
- BB->remove(MI);
- BB->insert(RegionEnd, MI);
- if (!MI->isDebugInstr())
- LIS->handleMove(*MI, true);
- }
- // Reset read-undef flags and update them later.
- for (auto &Op : MI->operands())
- if (Op.isReg() && Op.isDef())
- Op.setIsUndef(false);
- RegisterOperands RegOpers;
- RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
- if (!MI->isDebugInstr()) {
- if (ShouldTrackLaneMasks) {
- // Adjust liveness and add missing dead+read-undef flags.
- SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
- RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
- } else {
- // Adjust for missing dead-def flags.
- RegOpers.detectDeadDefs(*MI, *LIS);
- }
- }
- RegionEnd = MI->getIterator();
- ++RegionEnd;
- LLVM_DEBUG(dbgs() << "Scheduling " << *MI);
- }
-
- // After reverting schedule, debug instrs will now be at the end of the block
- // and RegionEnd will point to the first debug instr. Increment RegionEnd
- // pass debug instrs to the actual end of the scheduling region.
- while (SkippedDebugInstr-- > 0)
- ++RegionEnd;
-
- // If Unsched.front() instruction is a debug instruction, this will actually
- // shrink the region since we moved all debug instructions to the end of the
- // block. Find the first instruction that is not a debug instruction.
- RegionBegin = Unsched.front()->getIterator();
- if (RegionBegin->isDebugInstr()) {
- for (MachineInstr *MI : Unsched) {
- if (MI->isDebugInstr())
- continue;
- RegionBegin = MI->getIterator();
- break;
- }
- }
-
- // Then move the debug instructions back into their correct place and set
- // RegionBegin and RegionEnd if needed.
- placeDebugValues();
-
- Regions[RegionIdx] = std::make_pair(RegionBegin, RegionEnd);
+ // Collect all scheduling regions. The actual scheduling is performed in
+ // GCNScheduleDAGMILive::finalizeSchedule.
+ Regions.push_back(std::make_pair(RegionBegin, RegionEnd));
}
-GCNRegPressure GCNScheduleDAGMILive::getRealRegPressure() const {
+GCNRegPressure
+GCNScheduleDAGMILive::getRealRegPressure(unsigned RegionIdx) const {
GCNDownwardRPTracker RPTracker(*LIS);
RPTracker.advance(begin(), end(), &LiveIns[RegionIdx]);
return RPTracker.moveMaxPressure();
}
-void GCNScheduleDAGMILive::computeBlockPressure(const MachineBasicBlock *MBB) {
+void GCNScheduleDAGMILive::computeBlockPressure(unsigned RegionIdx,
+ const MachineBasicBlock *MBB) {
GCNDownwardRPTracker RPTracker(*LIS);
// If the block has the only successor then live-ins of that successor are
@@ -542,7 +374,7 @@ void GCNScheduleDAGMILive::computeBlockPressure(const MachineBasicBlock *MBB) {
RPTracker.reset(*I, &LRS);
}
- for ( ; ; ) {
+ for (;;) {
I = RPTracker.getNext();
if (Regions[CurRegion].first == I || NonDbgMI == I) {
@@ -588,8 +420,9 @@ GCNScheduleDAGMILive::getBBLiveInMap() const {
}
void GCNScheduleDAGMILive::finalizeSchedule() {
- LLVM_DEBUG(dbgs() << "All regions recorded, starting actual scheduling.\n");
-
+ // Start actual scheduling here. This function is called by the base
+ // MachineScheduler after all regions have been recorded by
+ // GCNScheduleDAGMILive::schedule().
LiveIns.resize(Regions.size());
Pressure.resize(Regions.size());
RescheduleRegions.resize(Regions.size());
@@ -601,142 +434,470 @@ void GCNScheduleDAGMILive::finalizeSchedule() {
RegionsWithHighRP.reset();
RegionsWithMinOcc.reset();
+ runSchedStages();
+}
+
+void GCNScheduleDAGMILive::runSchedStages() {
+ LLVM_DEBUG(dbgs() << "All regions recorded, starting actual scheduling.\n");
+ InitialScheduleStage S0(GCNSchedStageID::InitialSchedule, *this);
+ UnclusteredRescheduleStage S1(GCNSchedStageID::UnclusteredReschedule, *this);
+ ClusteredLowOccStage S2(GCNSchedStageID::ClusteredLowOccupancyReschedule,
+ *this);
+ PreRARematStage S3(GCNSchedStageID::PreRARematerialize, *this);
+ GCNSchedStage *SchedStages[] = {&S0, &S1, &S2, &S3};
+
if (!Regions.empty())
BBLiveInMap = getBBLiveInMap();
- std::vector<std::unique_ptr<ScheduleDAGMutation>> SavedMutations;
+ for (auto *Stage : SchedStages) {
+ if (!Stage->initGCNSchedStage())
+ continue;
- do {
- Stage++;
- RegionIdx = 0;
- MachineBasicBlock *MBB = nullptr;
+ for (auto Region : Regions) {
+ RegionBegin = Region.first;
+ RegionEnd = Region.second;
+ // Setup for scheduling the region and check whether it should be skipped.
+ if (!Stage->initGCNRegion()) {
+ Stage->advanceRegion();
+ exitRegion();
+ continue;
+ }
- if (Stage > InitialSchedule) {
- if (!LIS)
- break;
+ ScheduleDAGMILive::schedule();
+ Stage->finalizeGCNRegion();
+ }
- // Retry function scheduling if we found resulting occupancy and it is
- // lower than used for first pass scheduling. This will give more freedom
- // to schedule low register pressure blocks.
- // Code is partially copied from MachineSchedulerBase::scheduleRegions().
+ Stage->finalizeGCNSchedStage();
+ }
+}
- if (Stage == UnclusteredReschedule) {
- if (RescheduleRegions.none())
- continue;
- LLVM_DEBUG(dbgs() <<
- "Retrying function scheduling without clustering.\n");
- }
+#ifndef NDEBUG
+raw_ostream &llvm::operator<<(raw_ostream &OS, const GCNSchedStageID &StageID) {
+ switch (StageID) {
+ case GCNSchedStageID::InitialSchedule:
+ OS << "Initial Schedule";
+ break;
+ case GCNSchedStageID::UnclusteredReschedule:
+ OS << "Unclustered Reschedule";
+ break;
+ case GCNSchedStageID::ClusteredLowOccupancyReschedule:
+ OS << "Clustered Low Occupancy Reschedule";
+ break;
+ case GCNSchedStageID::PreRARematerialize:
+ OS << "Pre-RA Rematerialize";
+ break;
+ }
+ return OS;
+}
+#endif
- if (Stage == ClusteredLowOccupancyReschedule) {
- if (StartingOccupancy <= MinOccupancy)
- break;
+GCNSchedStage::GCNSchedStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
+ : DAG(DAG), S(static_cast<GCNMaxOccupancySchedStrategy &>(*DAG.SchedImpl)),
+ MF(DAG.MF), MFI(DAG.MFI), ST(DAG.ST), StageID(StageID) {}
- LLVM_DEBUG(
- dbgs()
- << "Retrying function scheduling with lowest recorded occupancy "
- << MinOccupancy << ".\n");
- }
+bool GCNSchedStage::initGCNSchedStage() {
+ if (!DAG.LIS)
+ return false;
- if (Stage == PreRARematerialize) {
- if (RegionsWithMinOcc.none() || Regions.size() == 1)
- break;
-
- const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
- const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
- // Check maximum occupancy
- if (ST.computeOccupancy(MF.getFunction(), MFI.getLDSSize()) ==
- MinOccupancy)
- break;
-
- // FIXME: This pass will invalidate cached MBBLiveIns for regions
- // inbetween the defs and region we sinked the def to. Cached pressure
- // for regions where a def is sinked from will also be invalidated. Will
- // need to be fixed if there is another pass after this pass.
- static_assert(LastStage == PreRARematerialize,
- "Passes after PreRARematerialize are not supported");
-
- collectRematerializableInstructions();
- if (RematerializableInsts.empty() || !sinkTriviallyRematInsts(ST, TII))
- break;
-
- LLVM_DEBUG(
- dbgs() << "Retrying function scheduling with improved occupancy of "
- << MinOccupancy << " from rematerializing\n");
- }
- }
+ LLVM_DEBUG(dbgs() << "Starting scheduling stage: " << StageID << "\n");
+ return true;
+}
- if (Stage == UnclusteredReschedule)
- SavedMutations.swap(Mutations);
+bool UnclusteredRescheduleStage::initGCNSchedStage() {
+ if (!GCNSchedStage::initGCNSchedStage())
+ return false;
- for (auto Region : Regions) {
- if (((Stage == UnclusteredReschedule || Stage == PreRARematerialize) &&
- !RescheduleRegions[RegionIdx]) ||
- (Stage == ClusteredLowOccupancyReschedule &&
- !RegionsWithClusters[RegionIdx] && !RegionsWithHighRP[RegionIdx])) {
+ if (DAG.RescheduleRegions.none())
+ return false;
- ++RegionIdx;
- continue;
- }
+ SavedMutations.swap(DAG.Mutations);
- RegionBegin = Region.first;
- RegionEnd = Region.second;
+ LLVM_DEBUG(dbgs() << "Retrying function scheduling without clustering.\n");
+ return true;
+}
- if (RegionBegin->getParent() != MBB) {
- if (MBB) finishBlock();
- MBB = RegionBegin->getParent();
- startBlock(MBB);
- if (Stage == InitialSchedule)
- computeBlockPressure(MBB);
- }
+bool ClusteredLowOccStage::initGCNSchedStage() {
+ if (!GCNSchedStage::initGCNSchedStage())
+ return false;
- unsigned NumRegionInstrs = std::distance(begin(), end());
- enterRegion(MBB, begin(), end(), NumRegionInstrs);
+ // Don't bother trying to improve ILP in lower RP regions if occupancy has not
+ // been dropped. All regions will have already been scheduled with the ideal
+ // occupancy targets.
+ if (DAG.StartingOccupancy <= DAG.MinOccupancy)
+ return false;
- // Skip empty scheduling regions (0 or 1 schedulable instructions).
- if (begin() == end() || begin() == std::prev(end())) {
- exitRegion();
- ++RegionIdx;
- continue;
- }
+ LLVM_DEBUG(
+ dbgs() << "Retrying function scheduling with lowest recorded occupancy "
+ << DAG.MinOccupancy << ".\n");
+ return true;
+}
- LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n");
- LLVM_DEBUG(dbgs() << MF.getName() << ":" << printMBBReference(*MBB) << " "
- << MBB->getName() << "\n From: " << *begin()
- << " To: ";
- if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
- else dbgs() << "End";
- dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
+bool PreRARematStage::initGCNSchedStage() {
+ if (!GCNSchedStage::initGCNSchedStage())
+ return false;
+
+ if (DAG.RegionsWithMinOcc.none() || DAG.Regions.size() == 1)
+ return false;
- schedule();
+ const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
+ // Check maximum occupancy
+ if (ST.computeOccupancy(MF.getFunction(), MFI.getLDSSize()) ==
+ DAG.MinOccupancy)
+ return false;
+
+ // FIXME: This pass will invalidate cached MBBLiveIns for regions
+ // inbetween the defs and region we sinked the def to. Cached pressure
+ // for regions where a def is sinked from will also be invalidated. Will
+ // need to be fixed if there is another pass after this pass.
+
+ collectRematerializableInstructions();
+ if (RematerializableInsts.empty() || !sinkTriviallyRematInsts(ST, TII))
+ return false;
- exitRegion();
- ++RegionIdx;
+ LLVM_DEBUG(
+ dbgs() << "Retrying function scheduling with improved occupancy of "
+ << DAG.MinOccupancy << " from rematerializing\n");
+ return true;
+}
+
+void GCNSchedStage::finalizeGCNSchedStage() {
+ DAG.finishBlock();
+ LLVM_DEBUG(dbgs() << "Ending scheduling stage: " << StageID << "\n");
+}
+
+void UnclusteredRescheduleStage::finalizeGCNSchedStage() {
+ SavedMutations.swap(DAG.Mutations);
+
+ GCNSchedStage::finalizeGCNSchedStage();
+}
+
+bool GCNSchedStage::initGCNRegion() {
+ // Check whether this new region is also a new block.
+ if (DAG.RegionBegin->getParent() != CurrentMBB)
+ setupNewBlock();
+
+ unsigned NumRegionInstrs = std::distance(DAG.begin(), DAG.end());
+ DAG.enterRegion(CurrentMBB, DAG.begin(), DAG.end(), NumRegionInstrs);
+
+ // Skip empty scheduling regions (0 or 1 schedulable instructions).
+ if (DAG.begin() == DAG.end() || DAG.begin() == std::prev(DAG.end()))
+ return false;
+
+ LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n");
+ LLVM_DEBUG(dbgs() << MF.getName() << ":" << printMBBReference(*CurrentMBB)
+ << " " << CurrentMBB->getName()
+ << "\n From: " << *DAG.begin() << " To: ";
+ if (DAG.RegionEnd != CurrentMBB->end()) dbgs() << *DAG.RegionEnd;
+ else dbgs() << "End";
+ dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
+
+ // Save original instruction order before scheduling for possible revert.
+ Unsched.clear();
+ Unsched.reserve(DAG.NumRegionInstrs);
+ for (auto &I : DAG)
+ Unsched.push_back(&I);
+
+ PressureBefore = DAG.Pressure[RegionIdx];
+
+ LLVM_DEBUG(
+ dbgs() << "Pressure before scheduling:\nRegion live-ins:";
+ GCNRPTracker::printLiveRegs(dbgs(), DAG.LiveIns[RegionIdx], DAG.MRI);
+ dbgs() << "Region live-in pressure: ";
+ llvm::getRegPressure(DAG.MRI, DAG.LiveIns[RegionIdx]).print(dbgs());
+ dbgs() << "Region register pressure: "; PressureBefore.print(dbgs()));
+
+ // Set HasClusteredNodes to true for late stages where we have already
+ // collected it. That way pickNode() will not scan SDep's when not needed.
+ S.HasClusteredNodes = StageID > GCNSchedStageID::InitialSchedule;
+ S.HasExcessPressure = false;
+
+ return true;
+}
+
+bool UnclusteredRescheduleStage::initGCNRegion() {
+ if (!DAG.RescheduleRegions[RegionIdx])
+ return false;
+
+ return GCNSchedStage::initGCNRegion();
+}
+
+bool ClusteredLowOccStage::initGCNRegion() {
+ // We may need to reschedule this region if it doesn't have clusters so it
+ // wasn't rescheduled in the last stage, or if we found it was testing
+ // critical register pressure limits in the unclustered reschedule stage. The
+ // later is because we may not have been able to raise the min occupancy in
+ // the previous stage so the region may be overly constrained even if it was
+ // already rescheduled.
+ if (!DAG.RegionsWithClusters[RegionIdx] && !DAG.RegionsWithHighRP[RegionIdx])
+ return false;
+
+ return GCNSchedStage::initGCNRegion();
+}
+
+bool PreRARematStage::initGCNRegion() {
+ if (!DAG.RescheduleRegions[RegionIdx])
+ return false;
+
+ return GCNSchedStage::initGCNRegion();
+}
+
+void GCNSchedStage::setupNewBlock() {
+ if (CurrentMBB)
+ DAG.finishBlock();
+
+ CurrentMBB = DAG.RegionBegin->getParent();
+ DAG.startBlock(CurrentMBB);
+ // Get real RP for the region if it hasn't be calculated before. After the
+ // initial schedule stage real RP will be collected after scheduling.
+ if (StageID == GCNSchedStageID::InitialSchedule)
+ DAG.computeBlockPressure(RegionIdx, CurrentMBB);
+}
+
+void GCNSchedStage::finalizeGCNRegion() {
+ DAG.Regions[RegionIdx] = std::make_pair(DAG.RegionBegin, DAG.RegionEnd);
+ DAG.RescheduleRegions[RegionIdx] = false;
+ if (S.HasExcessPressure)
+ DAG.RegionsWithHighRP[RegionIdx] = true;
+
+ // Revert scheduling if we have dropped occupancy or there is some other
+ // reason that the original schedule is better.
+ checkScheduling();
+
+ DAG.exitRegion();
+ RegionIdx++;
+}
+
+void InitialScheduleStage::finalizeGCNRegion() {
+ // Record which regions have clustered nodes for the next unclustered
+ // reschedule stage.
+ assert(nextStage(StageID) == GCNSchedStageID::UnclusteredReschedule);
+ if (S.HasClusteredNodes)
+ DAG.RegionsWithClusters[RegionIdx] = true;
+
+ GCNSchedStage::finalizeGCNRegion();
+}
+
+void GCNSchedStage::checkScheduling() {
+ // Check the results of scheduling.
+ PressureAfter = DAG.getRealRegPressure(RegionIdx);
+ LLVM_DEBUG(dbgs() << "Pressure after scheduling: ";
+ PressureAfter.print(dbgs()));
+
+ if (PressureAfter.getSGPRNum() <= S.SGPRCriticalLimit &&
+ PressureAfter.getVGPRNum(ST.hasGFX90AInsts()) <= S.VGPRCriticalLimit) {
+ DAG.Pressure[RegionIdx] = PressureAfter;
+ DAG.RegionsWithMinOcc[RegionIdx] =
+ PressureAfter.getOccupancy(ST) == DAG.MinOccupancy;
+
+ // Early out if we have achieve the occupancy target.
+ LLVM_DEBUG(dbgs() << "Pressure in desired limits, done.\n");
+ return;
+ }
+
+ unsigned WavesAfter =
+ std::min(S.getTargetOccupancy(), PressureAfter.getOccupancy(ST));
+ unsigned WavesBefore =
+ std::min(S.getTargetOccupancy(), PressureBefore.getOccupancy(ST));
+ LLVM_DEBUG(dbgs() << "Occupancy before scheduling: " << WavesBefore
+ << ", after " << WavesAfter << ".\n");
+
+ // We may not be able to keep the current target occupancy because of the just
+ // scheduled region. We might still be able to revert scheduling if the
+ // occupancy before was higher, or if the current schedule has register
+ // pressure higher than the excess limits which could lead to more spilling.
+ unsigned NewOccupancy = std::max(WavesAfter, WavesBefore);
+
+ // Allow memory bound functions to drop to 4 waves if not limited by an
+ // attribute.
+ if (WavesAfter < WavesBefore && WavesAfter < DAG.MinOccupancy &&
+ WavesAfter >= MFI.getMinAllowedOccupancy()) {
+ LLVM_DEBUG(dbgs() << "Function is memory bound, allow occupancy drop up to "
+ << MFI.getMinAllowedOccupancy() << " waves\n");
+ NewOccupancy = WavesAfter;
+ }
+
+ if (NewOccupancy < DAG.MinOccupancy) {
+ DAG.MinOccupancy = NewOccupancy;
+ MFI.limitOccupancy(DAG.MinOccupancy);
+ DAG.RegionsWithMinOcc.reset();
+ LLVM_DEBUG(dbgs() << "Occupancy lowered for the function to "
+ << DAG.MinOccupancy << ".\n");
+ }
+
+ unsigned MaxVGPRs = ST.getMaxNumVGPRs(MF);
+ unsigned MaxSGPRs = ST.getMaxNumSGPRs(MF);
+ if (PressureAfter.getVGPRNum(false) > MaxVGPRs ||
+ PressureAfter.getAGPRNum() > MaxVGPRs ||
+ PressureAfter.getSGPRNum() > MaxSGPRs) {
+ DAG.RescheduleRegions[RegionIdx] = true;
+ DAG.RegionsWithHighRP[RegionIdx] = true;
+ }
+
+ // Revert if this region's schedule would cause a drop in occupancy or
+ // spilling.
+ if (shouldRevertScheduling(WavesAfter)) {
+ revertScheduling();
+ } else {
+ DAG.Pressure[RegionIdx] = PressureAfter;
+ DAG.RegionsWithMinOcc[RegionIdx] =
+ PressureAfter.getOccupancy(ST) == DAG.MinOccupancy;
+ }
+}
+
+bool GCNSchedStage::shouldRevertScheduling(unsigned WavesAfter) {
+ if (WavesAfter < DAG.MinOccupancy)
+ return true;
+
+ return false;
+}
+
+bool InitialScheduleStage::shouldRevertScheduling(unsigned WavesAfter) {
+ if (GCNSchedStage::shouldRevertScheduling(WavesAfter))
+ return true;
+
+ if (mayCauseSpilling(WavesAfter))
+ return true;
+
+ assert(nextStage(StageID) == GCNSchedStageID::UnclusteredReschedule);
+ // Don't reschedule the region in the next stage if it doesn't have clusters.
+ if (!DAG.RegionsWithClusters[RegionIdx])
+ DAG.RescheduleRegions[RegionIdx] = false;
+
+ return false;
+}
+
+bool UnclusteredRescheduleStage::shouldRevertScheduling(unsigned WavesAfter) {
+ if (GCNSchedStage::shouldRevertScheduling(WavesAfter))
+ return true;
+
+ // If RP is not reduced in the unclustred reschedule stage, revert to the old
+ // schedule.
+ if (!PressureAfter.less(ST, PressureBefore)) {
+ LLVM_DEBUG(dbgs() << "Unclustered reschedule did not help.\n");
+ return true;
+ }
+
+ return false;
+}
+
+bool ClusteredLowOccStage::shouldRevertScheduling(unsigned WavesAfter) {
+ if (GCNSchedStage::shouldRevertScheduling(WavesAfter))
+ return true;
+
+ if (mayCauseSpilling(WavesAfter))
+ return true;
+
+ return false;
+}
+
+bool PreRARematStage::shouldRevertScheduling(unsigned WavesAfter) {
+ if (GCNSchedStage::shouldRevertScheduling(WavesAfter))
+ return true;
+
+ if (mayCauseSpilling(WavesAfter))
+ return true;
+
+ return false;
+}
+
+bool GCNSchedStage::mayCauseSpilling(unsigned WavesAfter) {
+ if (WavesAfter <= MFI.getMinWavesPerEU() &&
+ !PressureAfter.less(ST, PressureBefore) &&
+ DAG.RescheduleRegions[RegionIdx]) {
+ LLVM_DEBUG(dbgs() << "New pressure will result in more spilling.\n");
+ return true;
+ }
+
+ return false;
+}
+
+void GCNSchedStage::revertScheduling() {
+ DAG.RegionsWithMinOcc[RegionIdx] =
+ PressureBefore.getOccupancy(ST) == DAG.MinOccupancy;
+ LLVM_DEBUG(dbgs() << "Attempting to revert scheduling.\n");
+ DAG.RescheduleRegions[RegionIdx] =
+ DAG.RegionsWithClusters[RegionIdx] ||
+ (nextStage(StageID)) != GCNSchedStageID::UnclusteredReschedule;
+ DAG.RegionEnd = DAG.RegionBegin;
+ int SkippedDebugInstr = 0;
+ for (MachineInstr *MI : Unsched) {
+ if (MI->isDebugInstr()) {
+ ++SkippedDebugInstr;
+ continue;
+ }
+
+ if (MI->getIterator() != DAG.RegionEnd) {
+ DAG.BB->remove(MI);
+ DAG.BB->insert(DAG.RegionEnd, MI);
+ if (!MI->isDebugInstr())
+ DAG.LIS->handleMove(*MI, true);
+ }
+
+ // Reset read-undef flags and update them later.
+ for (auto &Op : MI->operands())
+ if (Op.isReg() && Op.isDef())
+ Op.setIsUndef(false);
+ RegisterOperands RegOpers;
+ RegOpers.collect(*MI, *DAG.TRI, DAG.MRI, DAG.ShouldTrackLaneMasks, false);
+ if (!MI->isDebugInstr()) {
+ if (DAG.ShouldTrackLaneMasks) {
+ // Adjust liveness and add missing dead+read-undef flags.
+ SlotIndex SlotIdx = DAG.LIS->getInstructionIndex(*MI).getRegSlot();
+ RegOpers.adjustLaneLiveness(*DAG.LIS, DAG.MRI, SlotIdx, MI);
+ } else {
+ // Adjust for missing dead-def flags.
+ RegOpers.detectDeadDefs(*MI, *DAG.LIS);
+ }
}
- finishBlock();
+ DAG.RegionEnd = MI->getIterator();
+ ++DAG.RegionEnd;
+ LLVM_DEBUG(dbgs() << "Scheduling " << *MI);
+ }
+
+ // After reverting schedule, debug instrs will now be at the end of the block
+ // and RegionEnd will point to the first debug instr. Increment RegionEnd
+ // pass debug instrs to the actual end of the scheduling region.
+ while (SkippedDebugInstr-- > 0)
+ ++DAG.RegionEnd;
+
+ // If Unsched.front() instruction is a debug instruction, this will actually
+ // shrink the region since we moved all debug instructions to the end of the
+ // block. Find the first instruction that is not a debug instruction.
+ DAG.RegionBegin = Unsched.front()->getIterator();
+ if (DAG.RegionBegin->isDebugInstr()) {
+ for (MachineInstr *MI : Unsched) {
+ if (MI->isDebugInstr())
+ continue;
+ DAG.RegionBegin = MI->getIterator();
+ break;
+ }
+ }
+
+ // Then move the debug instructions back into their correct place and set
+ // RegionBegin and RegionEnd if needed.
+ DAG.placeDebugValues();
- if (Stage == UnclusteredReschedule)
- SavedMutations.swap(Mutations);
- } while (Stage != LastStage);
+ DAG.Regions[RegionIdx] = std::make_pair(DAG.RegionBegin, DAG.RegionEnd);
}
-void GCNScheduleDAGMILive::collectRematerializableInstructions() {
- const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo *>(TRI);
- for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
+void PreRARematStage::collectRematerializableInstructions() {
+ const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo *>(DAG.TRI);
+ for (unsigned I = 0, E = DAG.MRI.getNumVirtRegs(); I != E; ++I) {
Register Reg = Register::index2VirtReg(I);
- if (!LIS->hasInterval(Reg))
+ if (!DAG.LIS->hasInterval(Reg))
continue;
// TODO: Handle AGPR and SGPR rematerialization
- if (!SRI->isVGPRClass(MRI.getRegClass(Reg)) || !MRI.hasOneDef(Reg) ||
- !MRI.hasOneNonDBGUse(Reg))
+ if (!SRI->isVGPRClass(DAG.MRI.getRegClass(Reg)) ||
+ !DAG.MRI.hasOneDef(Reg) || !DAG.MRI.hasOneNonDBGUse(Reg))
continue;
- MachineOperand *Op = MRI.getOneDef(Reg);
+ MachineOperand *Op = DAG.MRI.getOneDef(Reg);
MachineInstr *Def = Op->getParent();
if (Op->getSubReg() != 0 || !isTriviallyReMaterializable(*Def))
continue;
- MachineInstr *UseI = &*MRI.use_instr_nodbg_begin(Reg);
+ MachineInstr *UseI = &*DAG.MRI.use_instr_nodbg_begin(Reg);
if (Def->getParent() == UseI->getParent())
continue;
@@ -744,10 +905,10 @@ void GCNScheduleDAGMILive::collectRematerializableInstructions() {
// live-through or used inside regions at MinOccupancy. This means that the
// register must be in the live-in set for the region.
bool AddedToRematList = false;
- for (unsigned I = 0, E = Regions.size(); I != E; ++I) {
- auto It = LiveIns[I].find(Reg);
- if (It != LiveIns[I].end() && !It->second.none()) {
- if (RegionsWithMinOcc[I]) {
+ for (unsigned I = 0, E = DAG.Regions.size(); I != E; ++I) {
+ auto It = DAG.LiveIns[I].find(Reg);
+ if (It != DAG.LiveIns[I].end() && !It->second.none()) {
+ if (DAG.RegionsWithMinOcc[I]) {
RematerializableInsts[I][Def] = UseI;
AddedToRematList = true;
}
@@ -762,8 +923,8 @@ void GCNScheduleDAGMILive::collectRematerializableInstructions() {
}
}
-bool GCNScheduleDAGMILive::sinkTriviallyRematInsts(const GCNSubtarget &ST,
- const TargetInstrInfo *TII) {
+bool PreRARematStage::sinkTriviallyRematInsts(const GCNSubtarget &ST,
+ const TargetInstrInfo *TII) {
// Temporary copies of cached variables we will be modifying and replacing if
// sinking succeeds.
SmallVector<
@@ -772,9 +933,10 @@ bool GCNScheduleDAGMILive::sinkTriviallyRematInsts(const GCNSubtarget &ST,
DenseMap<unsigned, GCNRPTracker::LiveRegSet> NewLiveIns;
DenseMap<unsigned, GCNRegPressure> NewPressure;
BitVector NewRescheduleRegions;
+ LiveIntervals *LIS = DAG.LIS;
- NewRegions.resize(Regions.size());
- NewRescheduleRegions.resize(Regions.size());
+ NewRegions.resize(DAG.Regions.size());
+ NewRescheduleRegions.resize(DAG.Regions.size());
// Collect only regions that has a rematerializable def as a live-in.
SmallSet<unsigned, 16> ImpactedRegions;
@@ -784,16 +946,16 @@ bool GCNScheduleDAGMILive::sinkTriviallyRematInsts(const GCNSubtarget &ST,
// Make copies of register pressure and live-ins cache that will be updated
// as we rematerialize.
for (auto Idx : ImpactedRegions) {
- NewPressure[Idx] = Pressure[Idx];
- NewLiveIns[Idx] = LiveIns[Idx];
+ NewPressure[Idx] = DAG.Pressure[Idx];
+ NewLiveIns[Idx] = DAG.LiveIns[Idx];
}
- NewRegions = Regions;
+ NewRegions = DAG.Regions;
NewRescheduleRegions.reset();
DenseMap<MachineInstr *, MachineInstr *> InsertedMIToOldDef;
bool Improved = false;
for (auto I : ImpactedRegions) {
- if (!RegionsWithMinOcc[I])
+ if (!DAG.RegionsWithMinOcc[I])
continue;
Improved = false;
@@ -802,12 +964,12 @@ bool GCNScheduleDAGMILive::sinkTriviallyRematInsts(const GCNSubtarget &ST,
// TODO: Handle occupancy drop due to AGPR and SGPR.
// Check if cause of occupancy drop is due to VGPR usage and not SGPR.
- if (ST.getOccupancyWithNumSGPRs(SGPRUsage) == MinOccupancy)
+ if (ST.getOccupancyWithNumSGPRs(SGPRUsage) == DAG.MinOccupancy)
break;
// The occupancy of this region could have been improved by a previous
// iteration's sinking of defs.
- if (NewPressure[I].getOccupancy(ST) > MinOccupancy) {
+ if (NewPressure[I].getOccupancy(ST) > DAG.MinOccupancy) {
NewRescheduleRegions[I] = true;
Improved = true;
continue;
@@ -827,7 +989,7 @@ bool GCNScheduleDAGMILive::sinkTriviallyRematInsts(const GCNSubtarget &ST,
unsigned OptimisticOccupancy = ST.getOccupancyWithNumVGPRs(VGPRsAfterSink);
// If in the most optimistic scenario, we cannot improve occupancy, then do
// not attempt to sink any instructions.
- if (OptimisticOccupancy <= MinOccupancy)
+ if (OptimisticOccupancy <= DAG.MinOccupancy)
break;
unsigned ImproveOccupancy = 0;
@@ -842,7 +1004,7 @@ bool GCNScheduleDAGMILive::sinkTriviallyRematInsts(const GCNSubtarget &ST,
// call LiveRangeEdit::allUsesAvailableAt() and
// LiveRangeEdit::canRematerializeAt().
TII->reMaterialize(*InsertPos->getParent(), InsertPos, Reg,
- Def->getOperand(0).getSubReg(), *Def, *TRI);
+ Def->getOperand(0).getSubReg(), *Def, *DAG.TRI);
MachineInstr *NewMI = &*(--InsertPos);
LIS->InsertMachineInstrInMaps(*NewMI);
LIS->removeInterval(Reg);
@@ -851,11 +1013,11 @@ bool GCNScheduleDAGMILive::sinkTriviallyRematInsts(const GCNSubtarget &ST,
// Update region boundaries in scheduling region we sinked from since we
// may sink an instruction that was at the beginning or end of its region
- updateRegionBoundaries(NewRegions, Def, /*NewMI =*/nullptr,
- /*Removing =*/true);
+ DAG.updateRegionBoundaries(NewRegions, Def, /*NewMI =*/nullptr,
+ /*Removing =*/true);
// Update region boundaries in region we sinked to.
- updateRegionBoundaries(NewRegions, InsertPos, NewMI);
+ DAG.updateRegionBoundaries(NewRegions, InsertPos, NewMI);
LaneBitmask PrevMask = NewLiveIns[I][Reg];
// FIXME: Also update cached pressure for where the def was sinked from.
@@ -863,9 +1025,9 @@ bool GCNScheduleDAGMILive::sinkTriviallyRematInsts(const GCNSubtarget &ST,
// the reg from all regions as a live-in.
for (auto Idx : RematDefToLiveInRegions[Def]) {
NewLiveIns[Idx].erase(Reg);
- if (InsertPos->getParent() != Regions[Idx].first->getParent()) {
+ if (InsertPos->getParent() != DAG.Regions[Idx].first->getParent()) {
// Def is live-through and not used in this block.
- NewPressure[Idx].inc(Reg, PrevMask, LaneBitmask::getNone(), MRI);
+ NewPressure[Idx].inc(Reg, PrevMask, LaneBitmask::getNone(), DAG.MRI);
} else {
// Def is used and rematerialized into this block.
GCNDownwardRPTracker RPT(*LIS);
@@ -879,7 +1041,7 @@ bool GCNScheduleDAGMILive::sinkTriviallyRematInsts(const GCNSubtarget &ST,
SinkedDefs.push_back(Def);
ImproveOccupancy = NewPressure[I].getOccupancy(ST);
- if (ImproveOccupancy > MinOccupancy)
+ if (ImproveOccupancy > DAG.MinOccupancy)
break;
}
@@ -888,7 +1050,7 @@ bool GCNScheduleDAGMILive::sinkTriviallyRematInsts(const GCNSubtarget &ST,
for (auto TrackedIdx : RematDefToLiveInRegions[Def])
RematerializableInsts[TrackedIdx].erase(Def);
- if (ImproveOccupancy <= MinOccupancy)
+ if (ImproveOccupancy <= DAG.MinOccupancy)
break;
NewRescheduleRegions[I] = true;
@@ -917,7 +1079,7 @@ bool GCNScheduleDAGMILive::sinkTriviallyRematInsts(const GCNSubtarget &ST,
MachineInstr *OldMI = Entry.second;
// Remove OldMI from BBLiveInMap since we are sinking it from its MBB.
- BBLiveInMap.erase(OldMI);
+ DAG.BBLiveInMap.erase(OldMI);
// Remove OldMI and update LIS
Register Reg = MI->getOperand(0).getReg();
@@ -929,22 +1091,22 @@ bool GCNScheduleDAGMILive::sinkTriviallyRematInsts(const GCNSubtarget &ST,
// Update live-ins, register pressure, and regions caches.
for (auto Idx : ImpactedRegions) {
- LiveIns[Idx] = NewLiveIns[Idx];
- Pressure[Idx] = NewPressure[Idx];
- MBBLiveIns.erase(Regions[Idx].first->getParent());
+ DAG.LiveIns[Idx] = NewLiveIns[Idx];
+ DAG.Pressure[Idx] = NewPressure[Idx];
+ DAG.MBBLiveIns.erase(DAG.Regions[Idx].first->getParent());
}
- Regions = NewRegions;
- RescheduleRegions = NewRescheduleRegions;
+ DAG.Regions = NewRegions;
+ DAG.RescheduleRegions = NewRescheduleRegions;
SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
- MFI.increaseOccupancy(MF, ++MinOccupancy);
+ MFI.increaseOccupancy(MF, ++DAG.MinOccupancy);
return true;
}
// Copied from MachineLICM
-bool GCNScheduleDAGMILive::isTriviallyReMaterializable(const MachineInstr &MI) {
- if (!TII->isTriviallyReMaterializable(MI))
+bool PreRARematStage::isTriviallyReMaterializable(const MachineInstr &MI) {
+ if (!DAG.TII->isTriviallyReMaterializable(MI))
return false;
for (const MachineOperand &MO : MI.operands())
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
index c3db849cf81a..7aadf89e0bf7 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
@@ -28,8 +28,6 @@ class GCNSubtarget;
/// heuristics to determine excess/critical pressure sets. Its goal is to
/// maximize kernel occupancy (i.e. maximum number of waves per simd).
class GCNMaxOccupancySchedStrategy final : public GenericScheduler {
- friend class GCNScheduleDAGMILive;
-
SUnit *pickNodeBidirectional(bool &IsTopNode);
void pickNodeFromQueue(SchedBoundary &Zone, const CandPolicy &ZonePolicy,
@@ -42,15 +40,18 @@ class GCNMaxOccupancySchedStrategy final : public GenericScheduler {
unsigned SGPRPressure, unsigned VGPRPressure);
std::vector<unsigned> Pressure;
+
std::vector<unsigned> MaxPressure;
unsigned SGPRExcessLimit;
+
unsigned VGPRExcessLimit;
- unsigned SGPRCriticalLimit;
- unsigned VGPRCriticalLimit;
unsigned TargetOccupancy;
+ MachineFunction *MF;
+
+public:
// schedule() have seen a clustered memory operation. Set it to false
// before a region scheduling to know if the region had such clusters.
bool HasClusteredNodes;
@@ -59,28 +60,53 @@ class GCNMaxOccupancySchedStrategy final : public GenericScheduler {
// register pressure for actual scheduling heuristics.
bool HasExcessPressure;
- MachineFunction *MF;
+ unsigned SGPRCriticalLimit;
+
+ unsigned VGPRCriticalLimit;
-public:
GCNMaxOccupancySchedStrategy(const MachineSchedContext *C);
SUnit *pickNode(bool &IsTopNode) override;
void initialize(ScheduleDAGMI *DAG) override;
+ unsigned getTargetOccupancy() { return TargetOccupancy; }
+
void setTargetOccupancy(unsigned Occ) { TargetOccupancy = Occ; }
};
-class GCNScheduleDAGMILive final : public ScheduleDAGMILive {
+enum class GCNSchedStageID : unsigned {
+ InitialSchedule = 0,
+ UnclusteredReschedule = 1,
+ ClusteredLowOccupancyReschedule = 2,
+ PreRARematerialize = 3,
+ LastStage = PreRARematerialize
+};
+
+#ifndef NDEBUG
+raw_ostream &operator<<(raw_ostream &OS, const GCNSchedStageID &StageID);
+#endif
+
+inline GCNSchedStageID &operator++(GCNSchedStageID &Stage, int) {
+ assert(Stage != GCNSchedStageID::PreRARematerialize);
+ Stage = static_cast<GCNSchedStageID>(static_cast<unsigned>(Stage) + 1);
+ return Stage;
+}
+
+inline GCNSchedStageID nextStage(const GCNSchedStageID Stage) {
+ return static_cast<GCNSchedStageID>(static_cast<unsigned>(Stage) + 1);
+}
- enum : unsigned {
- Collect,
- InitialSchedule,
- UnclusteredReschedule,
- ClusteredLowOccupancyReschedule,
- PreRARematerialize,
- LastStage = PreRARematerialize
- };
+inline bool operator>(GCNSchedStageID &LHS, GCNSchedStageID &RHS) {
+ return static_cast<unsigned>(LHS) > static_cast<unsigned>(RHS);
+}
+
+class GCNScheduleDAGMILive final : public ScheduleDAGMILive {
+ friend class GCNSchedStage;
+ friend class InitialScheduleStage;
+ friend class UnclusteredRescheduleStage;
+ friend class ClusteredLowOccStage;
+ friend class PreRARematStage;
const GCNSubtarget &ST;
@@ -92,12 +118,6 @@ class GCNScheduleDAGMILive final : public ScheduleDAGMILive {
// Minimal real occupancy recorder for the function.
unsigned MinOccupancy;
- // Scheduling stage number.
- unsigned Stage;
-
- // Current region index.
- size_t RegionIdx;
-
// Vector of regions recorder for later rescheduling
SmallVector<std::pair<MachineBasicBlock::iterator,
MachineBasicBlock::iterator>, 32> Regions;
@@ -121,6 +141,148 @@ class GCNScheduleDAGMILive final : public ScheduleDAGMILive {
// Region pressure cache.
SmallVector<GCNRegPressure, 32> Pressure;
+ // Temporary basic block live-in cache.
+ DenseMap<const MachineBasicBlock *, GCNRPTracker::LiveRegSet> MBBLiveIns;
+
+ DenseMap<MachineInstr *, GCNRPTracker::LiveRegSet> BBLiveInMap;
+
+ DenseMap<MachineInstr *, GCNRPTracker::LiveRegSet> getBBLiveInMap() const;
+
+ // Return current region pressure.
+ GCNRegPressure getRealRegPressure(unsigned RegionIdx) const;
+
+ // Compute and cache live-ins and pressure for all regions in block.
+ void computeBlockPressure(unsigned RegionIdx, const MachineBasicBlock *MBB);
+
+ // Update region boundaries when removing MI or inserting NewMI before MI.
+ void updateRegionBoundaries(
+ SmallVectorImpl<std::pair<MachineBasicBlock::iterator,
+ MachineBasicBlock::iterator>> &RegionBoundaries,
+ MachineBasicBlock::iterator MI, MachineInstr *NewMI,
+ bool Removing = false);
+
+ void runSchedStages();
+
+public:
+ GCNScheduleDAGMILive(MachineSchedContext *C,
+ std::unique_ptr<MachineSchedStrategy> S);
+
+ void schedule() override;
+
+ void finalizeSchedule() override;
+};
+
+// GCNSchedStrategy applies multiple scheduling stages to a function.
+class GCNSchedStage {
+protected:
+ GCNScheduleDAGMILive &DAG;
+
+ GCNMaxOccupancySchedStrategy &S;
+
+ MachineFunction &MF;
+
+ SIMachineFunctionInfo &MFI;
+
+ const GCNSubtarget &ST;
+
+ const GCNSchedStageID StageID;
+
+ // The current block being scheduled.
+ MachineBasicBlock *CurrentMBB = nullptr;
+
+ // Current region index.
+ unsigned RegionIdx = 0;
+
+ // Record the original order of instructions before scheduling.
+ std::vector<MachineInstr *> Unsched;
+
+ // RP before scheduling the current region.
+ GCNRegPressure PressureBefore;
+
+ // RP after scheduling the current region.
+ GCNRegPressure PressureAfter;
+
+ GCNSchedStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG);
+
+public:
+ // Initialize state for a scheduling stage. Returns false if the current stage
+ // should be skipped.
+ virtual bool initGCNSchedStage();
+
+ // Finalize state after finishing a scheduling pass on the function.
+ virtual void finalizeGCNSchedStage();
+
+ // Setup for scheduling a region. Returns false if the current region should
+ // be skipped.
+ virtual bool initGCNRegion();
+
+ // Track whether a new region is also a new MBB.
+ void setupNewBlock();
+
+ // Finalize state after scheudling a region.
+ virtual void finalizeGCNRegion();
+
+ // Check result of scheduling.
+ void checkScheduling();
+
+ // Returns true if scheduling should be reverted.
+ virtual bool shouldRevertScheduling(unsigned WavesAfter);
+
+ // Returns true if the new schedule may result in more spilling.
+ bool mayCauseSpilling(unsigned WavesAfter);
+
+ // Attempt to revert scheduling for this region.
+ void revertScheduling();
+
+ void advanceRegion() { RegionIdx++; }
+
+ virtual ~GCNSchedStage() = default;
+};
+
+class InitialScheduleStage : public GCNSchedStage {
+public:
+ void finalizeGCNRegion() override;
+
+ bool shouldRevertScheduling(unsigned WavesAfter) override;
+
+ InitialScheduleStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
+ : GCNSchedStage(StageID, DAG) {}
+};
+
+class UnclusteredRescheduleStage : public GCNSchedStage {
+private:
+ std::vector<std::unique_ptr<ScheduleDAGMutation>> SavedMutations;
+
+public:
+ bool initGCNSchedStage() override;
+
+ void finalizeGCNSchedStage() override;
+
+ bool initGCNRegion() override;
+
+ bool shouldRevertScheduling(unsigned WavesAfter) override;
+
+ UnclusteredRescheduleStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
+ : GCNSchedStage(StageID, DAG) {}
+};
+
+// Retry function scheduling if we found resulting occupancy and it is
+// lower than used for other scheduling passes. This will give more freedom
+// to schedule low register pressure blocks.
+class ClusteredLowOccStage : public GCNSchedStage {
+public:
+ bool initGCNSchedStage() override;
+
+ bool initGCNRegion() override;
+
+ bool shouldRevertScheduling(unsigned WavesAfter) override;
+
+ ClusteredLowOccStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
+ : GCNSchedStage(StageID, DAG) {}
+};
+
+class PreRARematStage : public GCNSchedStage {
+private:
// Each region at MinOccupancy will have their own list of trivially
// rematerializable instructions we can remat to reduce RP. The list maps an
// instruction to the position we should remat before, usually the MI using
@@ -132,12 +294,6 @@ class GCNScheduleDAGMILive final : public ScheduleDAGMILive {
// that has the defined reg as a live-in.
DenseMap<MachineInstr *, SmallVector<unsigned, 4>> RematDefToLiveInRegions;
- // Temporary basic block live-in cache.
- DenseMap<const MachineBasicBlock*, GCNRPTracker::LiveRegSet> MBBLiveIns;
-
- DenseMap<MachineInstr *, GCNRPTracker::LiveRegSet> BBLiveInMap;
- DenseMap<MachineInstr *, GCNRPTracker::LiveRegSet> getBBLiveInMap() const;
-
// Collect all trivially rematerializable VGPR instructions with a single def
// and single use outside the defining block into RematerializableInsts.
void collectRematerializableInstructions();
@@ -150,26 +306,15 @@ class GCNScheduleDAGMILive final : public ScheduleDAGMILive {
bool sinkTriviallyRematInsts(const GCNSubtarget &ST,
const TargetInstrInfo *TII);
- // Return current region pressure.
- GCNRegPressure getRealRegPressure() const;
-
- // Compute and cache live-ins and pressure for all regions in block.
- void computeBlockPressure(const MachineBasicBlock *MBB);
-
- // Update region boundaries when removing MI or inserting NewMI before MI.
- void updateRegionBoundaries(
- SmallVectorImpl<std::pair<MachineBasicBlock::iterator,
- MachineBasicBlock::iterator>> &RegionBoundaries,
- MachineBasicBlock::iterator MI, MachineInstr *NewMI,
- bool Removing = false);
-
public:
- GCNScheduleDAGMILive(MachineSchedContext *C,
- std::unique_ptr<MachineSchedStrategy> S);
+ bool initGCNSchedStage() override;
- void schedule() override;
+ bool initGCNRegion() override;
- void finalizeSchedule() override;
+ bool shouldRevertScheduling(unsigned WavesAfter) override;
+
+ PreRARematStage(GCNSchedStageID StageID, GCNScheduleDAGMILive &DAG)
+ : GCNSchedStage(StageID, DAG) {}
};
} // End namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp
index e093d78b2cc6..d9d7d4efa8c3 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp
@@ -309,6 +309,11 @@ uint64_t SIMCCodeEmitter::getImplicitOpSelHiEncoding(int Opcode) const {
return OP_SEL_HI_0 | OP_SEL_HI_1 | OP_SEL_HI_2;
}
+static bool isVCMPX64(const MCInstrDesc &Desc) {
+ return (Desc.TSFlags & SIInstrFlags::VOP3) &&
+ Desc.hasImplicitDefOfPhysReg(AMDGPU::EXEC);
+}
+
void SIMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const {
@@ -326,6 +331,17 @@ void SIMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
Encoding |= getImplicitOpSelHiEncoding(Opcode);
}
+ // GFX11 v_cmpx opcodes promoted to VOP3 have implied dst=EXEC.
+ // Documentation requires dst to be encoded as EXEC (0x7E),
+ // but it looks like the actual value encoded for dst operand
+ // is ignored by HW. It was decided to define dst as "do not care"
+ // in td files to allow disassembler accept any dst value.
+ // However, dst is encoded as EXEC for compatibility with SP3.
+ if (AMDGPU::isGFX11Plus(STI) && isVCMPX64(Desc)) {
+ assert((Encoding & 0xFF) == 0);
+ Encoding |= MRI.getEncodingValue(AMDGPU::EXEC_LO);
+ }
+
for (unsigned i = 0; i < bytes; i++) {
OS.write((uint8_t)Encoding.extractBitsAsZExtValue(8, 8 * i));
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/R600ISelLowering.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/R600ISelLowering.h
index e7706fa0ef5c..1ed79add64c9 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/R600ISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/R600ISelLowering.h
@@ -54,8 +54,8 @@ public:
MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
bool *IsFast = nullptr) const override;
- virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT,
- bool LegalOperations) const override {
+ bool canCombineTruncStore(EVT ValVT, EVT MemVT,
+ bool LegalOperations) const override {
// R600 has "custom" lowering for truncating stores despite not supporting
// those instructions. If we allow that custom lowering in the DAG combiner
// then all truncates are merged into truncating stores, giving worse code
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 438e8b200ecc..f7d139adc63b 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -2132,7 +2132,8 @@ void SITargetLowering::allocateSystemSGPRs(CCState &CCInfo,
SIMachineFunctionInfo &Info,
CallingConv::ID CallConv,
bool IsShader) const {
- if (Subtarget->hasUserSGPRInit16Bug()) {
+ if (Subtarget->hasUserSGPRInit16Bug() && !IsShader) {
+ // Note: user SGPRs are handled by the front-end for graphics shaders
// Pad up the used user SGPRs with dead inputs.
unsigned CurrentUserSGPRs = Info.getNumUserSGPRs();
@@ -2195,7 +2196,8 @@ void SITargetLowering::allocateSystemSGPRs(CCState &CCInfo,
CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
}
- assert(!Subtarget->hasUserSGPRInit16Bug() || Info.getNumPreloadedSGPRs() >= 16);
+ assert(!Subtarget->hasUserSGPRInit16Bug() || IsShader ||
+ Info.getNumPreloadedSGPRs() >= 16);
}
static void reservePrivateMemoryRegs(const TargetMachine &TM,
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.h b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.h
index d1fecc1afc7f..e0101f53880f 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -487,10 +487,10 @@ public:
AtomicExpansionKind
shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
- virtual const TargetRegisterClass *
- getRegClassFor(MVT VT, bool isDivergent) const override;
- virtual bool requiresUniformRegister(MachineFunction &MF,
- const Value *V) const override;
+ const TargetRegisterClass *getRegClassFor(MVT VT,
+ bool isDivergent) const override;
+ bool requiresUniformRegister(MachineFunction &MF,
+ const Value *V) const override;
Align getPrefLoopAlignment(MachineLoop *ML) const override;
void allocateHSAUserSGPRs(CCState &CCInfo,
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
index ffe8dce79816..fccb08f86e6d 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -349,7 +349,7 @@ def M0_CLASS_LO16 : SIRegisterClass<"AMDGPU", [i16, f16], 16, (add M0_LO16)> {
def SGPR_LO16 : SIRegisterClass<"AMDGPU", [i16, f16], 16,
(add (sequence "SGPR%u_LO16", 0, 105))> {
- let AllocationPriority = 9;
+ let AllocationPriority = 0;
let Size = 16;
let GeneratePressureSet = 0;
let HasSGPR = 1;
@@ -368,7 +368,7 @@ def SGPR_32 : SIRegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16], 32,
(add (sequence "SGPR%u", 0, 105))> {
// Give all SGPR classes higher priority than VGPR classes, because
// we want to spill SGPRs to VGPRs.
- let AllocationPriority = 9;
+ let AllocationPriority = 0;
let GeneratePressureSet = 0;
let HasSGPR = 1;
}
@@ -528,14 +528,14 @@ def Reg32Types : RegisterTypes<[i32, f32, v2i16, v2f16, p2, p3, p5, p6]>;
let HasVGPR = 1 in {
def VGPR_LO16 : SIRegisterClass<"AMDGPU", Reg16Types.types, 16,
(add (sequence "VGPR%u_LO16", 0, 255))> {
- let AllocationPriority = 1;
+ let AllocationPriority = 0;
let Size = 16;
let GeneratePressureSet = 0;
}
def VGPR_HI16 : SIRegisterClass<"AMDGPU", Reg16Types.types, 16,
(add (sequence "VGPR%u_HI16", 0, 255))> {
- let AllocationPriority = 1;
+ let AllocationPriority = 0;
let Size = 16;
let GeneratePressureSet = 0;
}
@@ -544,7 +544,7 @@ def VGPR_HI16 : SIRegisterClass<"AMDGPU", Reg16Types.types, 16,
// i16/f16 only on VI+
def VGPR_32 : SIRegisterClass<"AMDGPU", !listconcat(Reg32Types.types, Reg16Types.types), 32,
(add (sequence "VGPR%u", 0, 255))> {
- let AllocationPriority = 1;
+ let AllocationPriority = 0;
let Size = 32;
let Weight = 1;
}
@@ -588,7 +588,7 @@ def AGPR_LO16 : SIRegisterClass<"AMDGPU", Reg16Types.types, 16,
// AccVGPR 32-bit registers
def AGPR_32 : SIRegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16], 32,
(add (sequence "AGPR%u", 0, 255))> {
- let AllocationPriority = 1;
+ let AllocationPriority = 0;
let Size = 32;
let Weight = 1;
}
@@ -653,7 +653,7 @@ def SReg_32_XM0_XEXEC : SIRegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2
SGPR_NULL, SGPR_NULL_HI, TTMP_32, TMA_LO, TMA_HI, TBA_LO, TBA_HI, SRC_SHARED_BASE,
SRC_SHARED_LIMIT, SRC_PRIVATE_BASE, SRC_PRIVATE_LIMIT, SRC_POPS_EXITING_WAVE_ID,
SRC_VCCZ, SRC_EXECZ, SRC_SCC)> {
- let AllocationPriority = 10;
+ let AllocationPriority = 0;
}
def SReg_LO16_XM0_XEXEC : SIRegisterClass<"AMDGPU", [i16, f16], 16,
@@ -663,42 +663,42 @@ def SReg_LO16_XM0_XEXEC : SIRegisterClass<"AMDGPU", [i16, f16], 16,
SRC_SHARED_LIMIT_LO16, SRC_PRIVATE_BASE_LO16, SRC_PRIVATE_LIMIT_LO16,
SRC_POPS_EXITING_WAVE_ID_LO16, SRC_VCCZ_LO16, SRC_EXECZ_LO16, SRC_SCC_LO16)> {
let Size = 16;
- let AllocationPriority = 10;
+ let AllocationPriority = 0;
}
def SReg_32_XEXEC_HI : SIRegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16, i1], 32,
(add SReg_32_XM0_XEXEC, EXEC_LO, M0_CLASS)> {
- let AllocationPriority = 10;
+ let AllocationPriority = 0;
}
def SReg_LO16_XEXEC_HI : SIRegisterClass<"AMDGPU", [i16, f16], 16,
(add SReg_LO16_XM0_XEXEC, EXEC_LO_LO16, M0_CLASS_LO16)> {
let Size = 16;
- let AllocationPriority = 10;
+ let AllocationPriority = 0;
}
def SReg_32_XM0 : SIRegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16, i1], 32,
(add SReg_32_XM0_XEXEC, EXEC_LO, EXEC_HI)> {
- let AllocationPriority = 10;
+ let AllocationPriority = 0;
}
def SReg_LO16_XM0 : SIRegisterClass<"AMDGPU", [i16, f16], 16,
(add SReg_LO16_XM0_XEXEC, EXEC_LO_LO16, EXEC_HI_LO16)> {
let Size = 16;
- let AllocationPriority = 10;
+ let AllocationPriority = 0;
}
def SReg_LO16 : SIRegisterClass<"AMDGPU", [i16, f16], 16,
(add SGPR_LO16, SReg_LO16_XM0, M0_CLASS_LO16, EXEC_LO_LO16, EXEC_HI_LO16, SReg_LO16_XEXEC_HI)> {
let Size = 16;
- let AllocationPriority = 10;
+ let AllocationPriority = 0;
}
} // End GeneratePressureSet = 0
// Register class for all scalar registers (SGPRs + Special Registers)
def SReg_32 : SIRegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16, i1], 32,
(add SReg_32_XM0, M0_CLASS, EXEC_LO, EXEC_HI, SReg_32_XEXEC_HI)> {
- let AllocationPriority = 10;
+ let AllocationPriority = 0;
let HasSGPR = 1;
}
@@ -712,7 +712,7 @@ def SRegOrLds_32 : SIRegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16],
def SGPR_64 : SIRegisterClass<"AMDGPU", [v2i32, i64, v2f32, f64, v4i16, v4f16], 32,
(add SGPR_64Regs)> {
let CopyCost = 1;
- let AllocationPriority = 11;
+ let AllocationPriority = 1;
let HasSGPR = 1;
}
@@ -725,14 +725,14 @@ def TTMP_64 : SIRegisterClass<"AMDGPU", [v2i32, i64, f64, v4i16, v4f16], 32,
def SReg_64_XEXEC : SIRegisterClass<"AMDGPU", [v2i32, i64, v2f32, f64, i1, v4i16, v4f16], 32,
(add SGPR_64, VCC, FLAT_SCR, XNACK_MASK, SGPR_NULL64, TTMP_64, TBA, TMA)> {
let CopyCost = 1;
- let AllocationPriority = 13;
+ let AllocationPriority = 1;
let HasSGPR = 1;
}
def SReg_64 : SIRegisterClass<"AMDGPU", [v2i32, i64, v2f32, f64, i1, v4i16, v4f16], 32,
(add SReg_64_XEXEC, EXEC)> {
let CopyCost = 1;
- let AllocationPriority = 13;
+ let AllocationPriority = 1;
let HasSGPR = 1;
}
@@ -750,7 +750,7 @@ def SReg_1 : SIRegisterClass<"AMDGPU", [i1], 32,
let HasSGPR = 1;
}
-multiclass SRegClass<int numRegs, int priority,
+multiclass SRegClass<int numRegs,
list<ValueType> regTypes,
SIRegisterTuples regList,
SIRegisterTuples ttmpList = regList,
@@ -760,7 +760,7 @@ multiclass SRegClass<int numRegs, int priority,
defvar sgprName = !strconcat("SGPR_", suffix);
defvar ttmpName = !strconcat("TTMP_", suffix);
- let AllocationPriority = priority, CopyCost = copyCost, HasSGPR = 1 in {
+ let AllocationPriority = !sub(numRegs, 1), CopyCost = copyCost, HasSGPR = 1 in {
def "" # sgprName : SIRegisterClass<"AMDGPU", regTypes, 32, (add regList)> {
}
@@ -781,14 +781,14 @@ multiclass SRegClass<int numRegs, int priority,
}
}
-defm "" : SRegClass<3, 14, [v3i32, v3f32], SGPR_96Regs, TTMP_96Regs>;
-defm "" : SRegClass<4, 15, [v4i32, v4f32, v2i64, v2f64, v8i16, v8f16], SGPR_128Regs, TTMP_128Regs>;
-defm "" : SRegClass<5, 16, [v5i32, v5f32], SGPR_160Regs, TTMP_160Regs>;
-defm "" : SRegClass<6, 17, [v6i32, v6f32, v3i64, v3f64], SGPR_192Regs, TTMP_192Regs>;
-defm "" : SRegClass<7, 18, [v7i32, v7f32], SGPR_224Regs, TTMP_224Regs>;
-defm "" : SRegClass<8, 19, [v8i32, v8f32, v4i64, v4f64, v16i16, v16f16], SGPR_256Regs, TTMP_256Regs>;
-defm "" : SRegClass<16, 20, [v16i32, v16f32, v8i64, v8f64], SGPR_512Regs, TTMP_512Regs>;
-defm "" : SRegClass<32, 21, [v32i32, v32f32, v16i64, v16f64], SGPR_1024Regs>;
+defm "" : SRegClass<3, [v3i32, v3f32], SGPR_96Regs, TTMP_96Regs>;
+defm "" : SRegClass<4, [v4i32, v4f32, v2i64, v2f64, v8i16, v8f16], SGPR_128Regs, TTMP_128Regs>;
+defm "" : SRegClass<5, [v5i32, v5f32], SGPR_160Regs, TTMP_160Regs>;
+defm "" : SRegClass<6, [v6i32, v6f32, v3i64, v3f64], SGPR_192Regs, TTMP_192Regs>;
+defm "" : SRegClass<7, [v7i32, v7f32], SGPR_224Regs, TTMP_224Regs>;
+defm "" : SRegClass<8, [v8i32, v8f32, v4i64, v4f64, v16i16, v16f16], SGPR_256Regs, TTMP_256Regs>;
+defm "" : SRegClass<16, [v16i32, v16f32, v8i64, v8f64], SGPR_512Regs, TTMP_512Regs>;
+defm "" : SRegClass<32, [v32i32, v32f32, v16i64, v16f64], SGPR_1024Regs>;
def VRegOrLds_32 : SIRegisterClass<"AMDGPU", [i32, f32, i16, f16, v2i16, v2f16], 32,
(add VGPR_32, LDS_DIRECT_CLASS)> {
@@ -803,7 +803,7 @@ class VRegClassBase<int numRegs, list<ValueType> regTypes, dag regList> :
// Requires n v_mov_b32 to copy
let CopyCost = numRegs;
- let AllocationPriority = numRegs;
+ let AllocationPriority = !sub(numRegs, 1);
let Weight = numRegs;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/VOPCInstructions.td b/contrib/llvm-project/llvm/lib/Target/AMDGPU/VOPCInstructions.td
index d489a089ac78..5973d32c91d6 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/VOPCInstructions.td
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/VOPCInstructions.td
@@ -718,7 +718,7 @@ class VOPC_Class_Profile<list<SchedReadWrite> sched, ValueType vt> :
// DPP8 forbids modifiers and can inherit from VOPC_Profile
let Ins64 = (ins Src0Mod:$src0_modifiers, Src0RC64:$src0, Src1RC64:$src1);
- dag InsPartVOP3DPP = (ins Src0Mod:$src0_modifiers, VGPRSrc_32:$src0, VGPRSrc_32:$src1);
+ dag InsPartVOP3DPP = (ins FPVRegInputMods:$src0_modifiers, VGPRSrc_32:$src0, VGPRSrc_32:$src1);
let InsVOP3Base = !con(InsPartVOP3DPP, !if(HasOpSel, (ins op_sel0:$op_sel),
(ins)));
let Asm64 = "$sdst, $src0_modifiers, $src1";
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
index 9acd49292268..f81495985405 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
@@ -139,6 +139,9 @@ public:
ArrayRef<uint8_t> Bytes, uint64_t Address,
raw_ostream &CStream) const override;
+ uint64_t suggestBytesToSkip(ArrayRef<uint8_t> Bytes,
+ uint64_t Address) const override;
+
private:
DecodeStatus getARMInstruction(MCInst &Instr, uint64_t &Size,
ArrayRef<uint8_t> Bytes, uint64_t Address,
@@ -739,6 +742,33 @@ static DecodeStatus checkDecodedInstruction(MCInst &MI, uint64_t &Size,
}
}
+uint64_t ARMDisassembler::suggestBytesToSkip(ArrayRef<uint8_t> Bytes,
+ uint64_t Address) const {
+ // In Arm state, instructions are always 4 bytes wide, so there's no
+ // point in skipping any smaller number of bytes if an instruction
+ // can't be decoded.
+ if (!STI.getFeatureBits()[ARM::ModeThumb])
+ return 4;
+
+ // In a Thumb instruction stream, a halfword is a standalone 2-byte
+ // instruction if and only if its value is less than 0xE800.
+ // Otherwise, it's the first halfword of a 4-byte instruction.
+ //
+ // So, if we can see the upcoming halfword, we can judge on that
+ // basis, and maybe skip a whole 4-byte instruction that we don't
+ // know how to decode, without accidentally trying to interpret its
+ // second half as something else.
+ //
+ // If we don't have the instruction data available, we just have to
+ // recommend skipping the minimum sensible distance, which is 2
+ // bytes.
+ if (Bytes.size() < 2)
+ return 2;
+
+ uint16_t Insn16 = (Bytes[1] << 8) | Bytes[0];
+ return Insn16 < 0xE800 ? 2 : 4;
+}
+
DecodeStatus ARMDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
ArrayRef<uint8_t> Bytes,
uint64_t Address,
diff --git a/contrib/llvm-project/llvm/lib/Target/DirectX/DXILOpBuilder.cpp b/contrib/llvm-project/llvm/lib/Target/DirectX/DXILOpBuilder.cpp
new file mode 100644
index 000000000000..1985bee8e0ae
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/DirectX/DXILOpBuilder.cpp
@@ -0,0 +1,324 @@
+//===- DXILOpBuilder.cpp - Helper class for build DIXLOp functions --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file This file contains class to help build DXIL op functions.
+//===----------------------------------------------------------------------===//
+
+#include "DXILOpBuilder.h"
+#include "DXILConstants.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/DXILOperationCommon.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+using namespace llvm::DXIL;
+
+constexpr StringLiteral DXILOpNamePrefix = "dx.op.";
+
+namespace {
+
+enum OverloadKind : uint16_t {
+ VOID = 1,
+ HALF = 1 << 1,
+ FLOAT = 1 << 2,
+ DOUBLE = 1 << 3,
+ I1 = 1 << 4,
+ I8 = 1 << 5,
+ I16 = 1 << 6,
+ I32 = 1 << 7,
+ I64 = 1 << 8,
+ UserDefineType = 1 << 9,
+ ObjectType = 1 << 10,
+};
+
+} // namespace
+
+static const char *getOverloadTypeName(OverloadKind Kind) {
+ switch (Kind) {
+ case OverloadKind::HALF:
+ return "f16";
+ case OverloadKind::FLOAT:
+ return "f32";
+ case OverloadKind::DOUBLE:
+ return "f64";
+ case OverloadKind::I1:
+ return "i1";
+ case OverloadKind::I8:
+ return "i8";
+ case OverloadKind::I16:
+ return "i16";
+ case OverloadKind::I32:
+ return "i32";
+ case OverloadKind::I64:
+ return "i64";
+ case OverloadKind::VOID:
+ case OverloadKind::ObjectType:
+ case OverloadKind::UserDefineType:
+ break;
+ }
+ llvm_unreachable("invalid overload type for name");
+ return "void";
+}
+
+static OverloadKind getOverloadKind(Type *Ty) {
+ Type::TypeID T = Ty->getTypeID();
+ switch (T) {
+ case Type::VoidTyID:
+ return OverloadKind::VOID;
+ case Type::HalfTyID:
+ return OverloadKind::HALF;
+ case Type::FloatTyID:
+ return OverloadKind::FLOAT;
+ case Type::DoubleTyID:
+ return OverloadKind::DOUBLE;
+ case Type::IntegerTyID: {
+ IntegerType *ITy = cast<IntegerType>(Ty);
+ unsigned Bits = ITy->getBitWidth();
+ switch (Bits) {
+ case 1:
+ return OverloadKind::I1;
+ case 8:
+ return OverloadKind::I8;
+ case 16:
+ return OverloadKind::I16;
+ case 32:
+ return OverloadKind::I32;
+ case 64:
+ return OverloadKind::I64;
+ default:
+ llvm_unreachable("invalid overload type");
+ return OverloadKind::VOID;
+ }
+ }
+ case Type::PointerTyID:
+ return OverloadKind::UserDefineType;
+ case Type::StructTyID:
+ return OverloadKind::ObjectType;
+ default:
+ llvm_unreachable("invalid overload type");
+ return OverloadKind::VOID;
+ }
+}
+
+static std::string getTypeName(OverloadKind Kind, Type *Ty) {
+ if (Kind < OverloadKind::UserDefineType) {
+ return getOverloadTypeName(Kind);
+ } else if (Kind == OverloadKind::UserDefineType) {
+ StructType *ST = cast<StructType>(Ty);
+ return ST->getStructName().str();
+ } else if (Kind == OverloadKind::ObjectType) {
+ StructType *ST = cast<StructType>(Ty);
+ return ST->getStructName().str();
+ } else {
+ std::string Str;
+ raw_string_ostream OS(Str);
+ Ty->print(OS);
+ return OS.str();
+ }
+}
+
+// Static properties.
+struct OpCodeProperty {
+ DXIL::OpCode OpCode;
+ // Offset in DXILOpCodeNameTable.
+ unsigned OpCodeNameOffset;
+ DXIL::OpCodeClass OpCodeClass;
+ // Offset in DXILOpCodeClassNameTable.
+ unsigned OpCodeClassNameOffset;
+ uint16_t OverloadTys;
+ llvm::Attribute::AttrKind FuncAttr;
+ int OverloadParamIndex; // parameter index which control the overload.
+ // When < 0, should be only 1 overload type.
+ unsigned NumOfParameters; // Number of parameters include return value.
+ unsigned ParameterTableOffset; // Offset in ParameterTable.
+};
+
+// Include getOpCodeClassName getOpCodeProperty, getOpCodeName and
+// getOpCodeParameterKind which generated by tableGen.
+#define DXIL_OP_OPERATION_TABLE
+#include "DXILOperation.inc"
+#undef DXIL_OP_OPERATION_TABLE
+
+static std::string constructOverloadName(OverloadKind Kind, Type *Ty,
+ const OpCodeProperty &Prop) {
+ if (Kind == OverloadKind::VOID) {
+ return (Twine(DXILOpNamePrefix) + getOpCodeClassName(Prop)).str();
+ }
+ return (Twine(DXILOpNamePrefix) + getOpCodeClassName(Prop) + "." +
+ getTypeName(Kind, Ty))
+ .str();
+}
+
+static std::string constructOverloadTypeName(OverloadKind Kind,
+ StringRef TypeName) {
+ if (Kind == OverloadKind::VOID)
+ return TypeName.str();
+
+ assert(Kind < OverloadKind::UserDefineType && "invalid overload kind");
+ return (Twine(TypeName) + getOverloadTypeName(Kind)).str();
+}
+
+static StructType *getOrCreateStructType(StringRef Name,
+ ArrayRef<Type *> EltTys,
+ LLVMContext &Ctx) {
+ StructType *ST = StructType::getTypeByName(Ctx, Name);
+ if (ST)
+ return ST;
+
+ return StructType::create(Ctx, EltTys, Name);
+}
+
+static StructType *getResRetType(Type *OverloadTy, LLVMContext &Ctx) {
+ OverloadKind Kind = getOverloadKind(OverloadTy);
+ std::string TypeName = constructOverloadTypeName(Kind, "dx.types.ResRet.");
+ Type *FieldTypes[5] = {OverloadTy, OverloadTy, OverloadTy, OverloadTy,
+ Type::getInt32Ty(Ctx)};
+ return getOrCreateStructType(TypeName, FieldTypes, Ctx);
+}
+
+static StructType *getHandleType(LLVMContext &Ctx) {
+ return getOrCreateStructType("dx.types.Handle", Type::getInt8PtrTy(Ctx), Ctx);
+}
+
+static Type *getTypeFromParameterKind(ParameterKind Kind, Type *OverloadTy) {
+ auto &Ctx = OverloadTy->getContext();
+ switch (Kind) {
+ case ParameterKind::VOID:
+ return Type::getVoidTy(Ctx);
+ case ParameterKind::HALF:
+ return Type::getHalfTy(Ctx);
+ case ParameterKind::FLOAT:
+ return Type::getFloatTy(Ctx);
+ case ParameterKind::DOUBLE:
+ return Type::getDoubleTy(Ctx);
+ case ParameterKind::I1:
+ return Type::getInt1Ty(Ctx);
+ case ParameterKind::I8:
+ return Type::getInt8Ty(Ctx);
+ case ParameterKind::I16:
+ return Type::getInt16Ty(Ctx);
+ case ParameterKind::I32:
+ return Type::getInt32Ty(Ctx);
+ case ParameterKind::I64:
+ return Type::getInt64Ty(Ctx);
+ case ParameterKind::OVERLOAD:
+ return OverloadTy;
+ case ParameterKind::RESOURCE_RET:
+ return getResRetType(OverloadTy, Ctx);
+ case ParameterKind::DXIL_HANDLE:
+ return getHandleType(Ctx);
+ default:
+ break;
+ }
+ llvm_unreachable("Invalid parameter kind");
+ return nullptr;
+}
+
+static FunctionType *getDXILOpFunctionType(const OpCodeProperty *Prop,
+ Type *OverloadTy) {
+ SmallVector<Type *> ArgTys;
+
+ auto ParamKinds = getOpCodeParameterKind(*Prop);
+
+ for (unsigned I = 0; I < Prop->NumOfParameters; ++I) {
+ ParameterKind Kind = ParamKinds[I];
+ ArgTys.emplace_back(getTypeFromParameterKind(Kind, OverloadTy));
+ }
+ return FunctionType::get(
+ ArgTys[0], ArrayRef<Type *>(&ArgTys[1], ArgTys.size() - 1), false);
+}
+
+static FunctionCallee getOrCreateDXILOpFunction(DXIL::OpCode DXILOp,
+ Type *OverloadTy, Module &M) {
+ const OpCodeProperty *Prop = getOpCodeProperty(DXILOp);
+
+ OverloadKind Kind = getOverloadKind(OverloadTy);
+ // FIXME: find the issue and report error in clang instead of check it in
+ // backend.
+ if ((Prop->OverloadTys & (uint16_t)Kind) == 0) {
+ llvm_unreachable("invalid overload");
+ }
+
+ std::string FnName = constructOverloadName(Kind, OverloadTy, *Prop);
+ // Dependent on name to dedup.
+ if (auto *Fn = M.getFunction(FnName))
+ return FunctionCallee(Fn);
+
+ FunctionType *DXILOpFT = getDXILOpFunctionType(Prop, OverloadTy);
+ return M.getOrInsertFunction(FnName, DXILOpFT);
+}
+
+namespace llvm {
+namespace DXIL {
+
+CallInst *DXILOpBuilder::createDXILOpCall(DXIL::OpCode OpCode, Type *OverloadTy,
+ llvm::iterator_range<Use *> Args) {
+ auto Fn = getOrCreateDXILOpFunction(OpCode, OverloadTy, M);
+ SmallVector<Value *> FullArgs;
+ FullArgs.emplace_back(B.getInt32((int32_t)OpCode));
+ FullArgs.append(Args.begin(), Args.end());
+ return B.CreateCall(Fn, FullArgs);
+}
+
+Type *DXILOpBuilder::getOverloadTy(DXIL::OpCode OpCode, FunctionType *FT,
+ bool NoOpCodeParam) {
+
+ const OpCodeProperty *Prop = getOpCodeProperty(OpCode);
+ if (Prop->OverloadParamIndex < 0) {
+ auto &Ctx = FT->getContext();
+ // When only has 1 overload type, just return it.
+ switch (Prop->OverloadTys) {
+ case OverloadKind::VOID:
+ return Type::getVoidTy(Ctx);
+ case OverloadKind::HALF:
+ return Type::getHalfTy(Ctx);
+ case OverloadKind::FLOAT:
+ return Type::getFloatTy(Ctx);
+ case OverloadKind::DOUBLE:
+ return Type::getDoubleTy(Ctx);
+ case OverloadKind::I1:
+ return Type::getInt1Ty(Ctx);
+ case OverloadKind::I8:
+ return Type::getInt8Ty(Ctx);
+ case OverloadKind::I16:
+ return Type::getInt16Ty(Ctx);
+ case OverloadKind::I32:
+ return Type::getInt32Ty(Ctx);
+ case OverloadKind::I64:
+ return Type::getInt64Ty(Ctx);
+ default:
+ llvm_unreachable("invalid overload type");
+ return nullptr;
+ }
+ }
+
+ // Prop->OverloadParamIndex is 0, overload type is FT->getReturnType().
+ Type *OverloadType = FT->getReturnType();
+ if (Prop->OverloadParamIndex != 0) {
+ // Skip Return Type and Type for DXIL opcode.
+ const unsigned SkipedParam = NoOpCodeParam ? 2 : 1;
+ OverloadType = FT->getParamType(Prop->OverloadParamIndex - SkipedParam);
+ }
+
+ auto ParamKinds = getOpCodeParameterKind(*Prop);
+ auto Kind = ParamKinds[Prop->OverloadParamIndex];
+ // For ResRet and CBufferRet, OverloadTy is in field of StructType.
+ if (Kind == ParameterKind::CBUFFER_RET ||
+ Kind == ParameterKind::RESOURCE_RET) {
+ auto *ST = cast<StructType>(OverloadType);
+ OverloadType = ST->getElementType(0);
+ }
+ return OverloadType;
+}
+
+const char *DXILOpBuilder::getOpCodeName(DXIL::OpCode DXILOp) {
+ return ::getOpCodeName(DXILOp);
+}
+} // namespace DXIL
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/DirectX/DXILOpBuilder.h b/contrib/llvm-project/llvm/lib/Target/DirectX/DXILOpBuilder.h
new file mode 100644
index 000000000000..0cc39e845b71
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/DirectX/DXILOpBuilder.h
@@ -0,0 +1,46 @@
+//===- DXILOpBuilder.h - Helper class for build DIXLOp functions ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file This file contains class to help build DXIL op functions.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_DIRECTX_DXILOPBUILDER_H
+#define LLVM_LIB_TARGET_DIRECTX_DXILOPBUILDER_H
+
+#include "DXILConstants.h"
+#include "llvm/ADT/iterator_range.h"
+
+namespace llvm {
+class Module;
+class IRBuilderBase;
+class CallInst;
+class Value;
+class Type;
+class FunctionType;
+class Use;
+
+namespace DXIL {
+
+class DXILOpBuilder {
+public:
+ DXILOpBuilder(Module &M, IRBuilderBase &B) : M(M), B(B) {}
+ CallInst *createDXILOpCall(DXIL::OpCode OpCode, Type *OverloadTy,
+ llvm::iterator_range<Use *> Args);
+ Type *getOverloadTy(DXIL::OpCode OpCode, FunctionType *FT,
+ bool NoOpCodeParam);
+ static const char *getOpCodeName(DXIL::OpCode DXILOp);
+
+private:
+ Module &M;
+ IRBuilderBase &B;
+};
+
+} // namespace DXIL
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/DirectX/DXILOpLowering.cpp b/contrib/llvm-project/llvm/lib/Target/DirectX/DXILOpLowering.cpp
index 11b89e4ec890..20c08f47745d 100644
--- a/contrib/llvm-project/llvm/lib/Target/DirectX/DXILOpLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/DirectX/DXILOpLowering.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "DXILConstants.h"
+#include "DXILOpBuilder.h"
#include "DirectX.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/Passes.h"
@@ -28,168 +29,12 @@
using namespace llvm;
using namespace llvm::DXIL;
-constexpr StringLiteral DXILOpNamePrefix = "dx.op.";
-
-enum OverloadKind : uint16_t {
- VOID = 1,
- HALF = 1 << 1,
- FLOAT = 1 << 2,
- DOUBLE = 1 << 3,
- I1 = 1 << 4,
- I8 = 1 << 5,
- I16 = 1 << 6,
- I32 = 1 << 7,
- I64 = 1 << 8,
- UserDefineType = 1 << 9,
- ObjectType = 1 << 10,
-};
-
-static const char *getOverloadTypeName(OverloadKind Kind) {
- switch (Kind) {
- case OverloadKind::HALF:
- return "f16";
- case OverloadKind::FLOAT:
- return "f32";
- case OverloadKind::DOUBLE:
- return "f64";
- case OverloadKind::I1:
- return "i1";
- case OverloadKind::I8:
- return "i8";
- case OverloadKind::I16:
- return "i16";
- case OverloadKind::I32:
- return "i32";
- case OverloadKind::I64:
- return "i64";
- case OverloadKind::VOID:
- case OverloadKind::ObjectType:
- case OverloadKind::UserDefineType:
- break;
- }
- llvm_unreachable("invalid overload type for name");
- return "void";
-}
-
-static OverloadKind getOverloadKind(Type *Ty) {
- Type::TypeID T = Ty->getTypeID();
- switch (T) {
- case Type::VoidTyID:
- return OverloadKind::VOID;
- case Type::HalfTyID:
- return OverloadKind::HALF;
- case Type::FloatTyID:
- return OverloadKind::FLOAT;
- case Type::DoubleTyID:
- return OverloadKind::DOUBLE;
- case Type::IntegerTyID: {
- IntegerType *ITy = cast<IntegerType>(Ty);
- unsigned Bits = ITy->getBitWidth();
- switch (Bits) {
- case 1:
- return OverloadKind::I1;
- case 8:
- return OverloadKind::I8;
- case 16:
- return OverloadKind::I16;
- case 32:
- return OverloadKind::I32;
- case 64:
- return OverloadKind::I64;
- default:
- llvm_unreachable("invalid overload type");
- return OverloadKind::VOID;
- }
- }
- case Type::PointerTyID:
- return OverloadKind::UserDefineType;
- case Type::StructTyID:
- return OverloadKind::ObjectType;
- default:
- llvm_unreachable("invalid overload type");
- return OverloadKind::VOID;
- }
-}
-
-static std::string getTypeName(OverloadKind Kind, Type *Ty) {
- if (Kind < OverloadKind::UserDefineType) {
- return getOverloadTypeName(Kind);
- } else if (Kind == OverloadKind::UserDefineType) {
- StructType *ST = cast<StructType>(Ty);
- return ST->getStructName().str();
- } else if (Kind == OverloadKind::ObjectType) {
- StructType *ST = cast<StructType>(Ty);
- return ST->getStructName().str();
- } else {
- std::string Str;
- raw_string_ostream OS(Str);
- Ty->print(OS);
- return OS.str();
- }
-}
-
-// Static properties.
-struct OpCodeProperty {
- DXIL::OpCode OpCode;
- // Offset in DXILOpCodeNameTable.
- unsigned OpCodeNameOffset;
- DXIL::OpCodeClass OpCodeClass;
- // Offset in DXILOpCodeClassNameTable.
- unsigned OpCodeClassNameOffset;
- uint16_t OverloadTys;
- llvm::Attribute::AttrKind FuncAttr;
-};
-
-// Include getOpCodeClassName getOpCodeProperty and getOpCodeName which
-// generated by tableGen.
-#define DXIL_OP_OPERATION_TABLE
-#include "DXILOperation.inc"
-#undef DXIL_OP_OPERATION_TABLE
-
-static std::string constructOverloadName(OverloadKind Kind, Type *Ty,
- const OpCodeProperty &Prop) {
- if (Kind == OverloadKind::VOID) {
- return (Twine(DXILOpNamePrefix) + getOpCodeClassName(Prop)).str();
- }
- return (Twine(DXILOpNamePrefix) + getOpCodeClassName(Prop) + "." +
- getTypeName(Kind, Ty))
- .str();
-}
-
-static FunctionCallee createDXILOpFunction(DXIL::OpCode DXILOp, Function &F,
- Module &M) {
- const OpCodeProperty *Prop = getOpCodeProperty(DXILOp);
-
- // Get return type as overload type for DXILOp.
- // Only simple mapping case here, so return type is good enough.
- Type *OverloadTy = F.getReturnType();
-
- OverloadKind Kind = getOverloadKind(OverloadTy);
- // FIXME: find the issue and report error in clang instead of check it in
- // backend.
- if ((Prop->OverloadTys & (uint16_t)Kind) == 0) {
- llvm_unreachable("invalid overload");
- }
-
- std::string FnName = constructOverloadName(Kind, OverloadTy, *Prop);
- assert(!M.getFunction(FnName) && "Function already exists");
-
- auto &Ctx = M.getContext();
- Type *OpCodeTy = Type::getInt32Ty(Ctx);
-
- SmallVector<Type *> ArgTypes;
- // DXIL has i32 opcode as first arg.
- ArgTypes.emplace_back(OpCodeTy);
- FunctionType *FT = F.getFunctionType();
- ArgTypes.append(FT->param_begin(), FT->param_end());
- FunctionType *DXILOpFT = FunctionType::get(OverloadTy, ArgTypes, false);
- return M.getOrInsertFunction(FnName, DXILOpFT);
-}
-
static void lowerIntrinsic(DXIL::OpCode DXILOp, Function &F, Module &M) {
- auto DXILOpFn = createDXILOpFunction(DXILOp, F, M);
IRBuilder<> B(M.getContext());
Value *DXILOpArg = B.getInt32(static_cast<unsigned>(DXILOp));
+ DXILOpBuilder DXILB(M, B);
+ Type *OverloadTy =
+ DXILB.getOverloadTy(DXILOp, F.getFunctionType(), /*NoOpCodeParam*/ true);
for (User *U : make_early_inc_range(F.users())) {
CallInst *CI = dyn_cast<CallInst>(U);
if (!CI)
@@ -199,8 +44,8 @@ static void lowerIntrinsic(DXIL::OpCode DXILOp, Function &F, Module &M) {
Args.emplace_back(DXILOpArg);
Args.append(CI->arg_begin(), CI->arg_end());
B.SetInsertPoint(CI);
- CallInst *DXILCI = B.CreateCall(DXILOpFn, Args);
- LLVM_DEBUG(DXILCI->setName(getOpCodeName(DXILOp)));
+ CallInst *DXILCI = DXILB.createDXILOpCall(DXILOp, OverloadTy, CI->args());
+
CI->replaceAllUsesWith(DXILCI);
CI->eraseFromParent();
}
diff --git a/contrib/llvm-project/llvm/lib/Target/DirectX/DXILWriter/DXILValueEnumerator.cpp b/contrib/llvm-project/llvm/lib/Target/DirectX/DXILWriter/DXILValueEnumerator.cpp
index e2a41515de38..a873662f730d 100644
--- a/contrib/llvm-project/llvm/lib/Target/DirectX/DXILWriter/DXILValueEnumerator.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/DirectX/DXILWriter/DXILValueEnumerator.cpp
@@ -260,9 +260,7 @@ static void predictValueUseListOrderImpl(const Value *V, const Function *F,
return LU->getOperandNo() > RU->getOperandNo();
});
- if (llvm::is_sorted(List, [](const Entry &L, const Entry &R) {
- return L.second < R.second;
- }))
+ if (llvm::is_sorted(List, llvm::less_second()))
// Order is already correct.
return;
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.h b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.h
index 1e50385a7b4b..505c90f66f43 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.h
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.h
@@ -95,7 +95,6 @@ public:
void SelectIndexedStore(StoreSDNode *ST, const SDLoc &dl);
void SelectStore(SDNode *N);
void SelectSHL(SDNode *N);
- void SelectZeroExtend(SDNode *N);
void SelectIntrinsicWChain(SDNode *N);
void SelectIntrinsicWOChain(SDNode *N);
void SelectConstant(SDNode *N);
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLowering.h b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLowering.h
index 9561dfe8a35d..1dc6a4cb9c89 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonISelLowering.h
@@ -107,9 +107,6 @@ class HexagonTargetLowering : public TargetLowering {
const HexagonTargetMachine &HTM;
const HexagonSubtarget &Subtarget;
- bool CanReturnSmallStruct(const Function* CalleeFn, unsigned& RetSize)
- const;
-
public:
explicit HexagonTargetLowering(const TargetMachine &TM,
const HexagonSubtarget &ST);
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
index c8e6276aa4de..b8671f26d124 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -2253,15 +2253,6 @@ bool HexagonInstrInfo::isDuplexPair(const MachineInstr &MIa,
return (isDuplexPairMatch(MIaG, MIbG) || isDuplexPairMatch(MIbG, MIaG));
}
-bool HexagonInstrInfo::isEarlySourceInstr(const MachineInstr &MI) const {
- if (MI.mayLoadOrStore() || MI.isCompare())
- return true;
-
- // Multiply
- unsigned SchedClass = MI.getDesc().getSchedClass();
- return is_TC4x(SchedClass) || is_TC3x(SchedClass);
-}
-
bool HexagonInstrInfo::isEndLoopN(unsigned Opcode) const {
return (Opcode == Hexagon::ENDLOOP0 ||
Opcode == Hexagon::ENDLOOP1);
@@ -2417,43 +2408,6 @@ bool HexagonInstrInfo::isJumpWithinBranchRange(const MachineInstr &MI,
}
}
-bool HexagonInstrInfo::isLateInstrFeedsEarlyInstr(const MachineInstr &LRMI,
- const MachineInstr &ESMI) const {
- bool isLate = isLateResultInstr(LRMI);
- bool isEarly = isEarlySourceInstr(ESMI);
-
- LLVM_DEBUG(dbgs() << "V60" << (isLate ? "-LR " : " -- "));
- LLVM_DEBUG(LRMI.dump());
- LLVM_DEBUG(dbgs() << "V60" << (isEarly ? "-ES " : " -- "));
- LLVM_DEBUG(ESMI.dump());
-
- if (isLate && isEarly) {
- LLVM_DEBUG(dbgs() << "++Is Late Result feeding Early Source\n");
- return true;
- }
-
- return false;
-}
-
-bool HexagonInstrInfo::isLateResultInstr(const MachineInstr &MI) const {
- switch (MI.getOpcode()) {
- case TargetOpcode::EXTRACT_SUBREG:
- case TargetOpcode::INSERT_SUBREG:
- case TargetOpcode::SUBREG_TO_REG:
- case TargetOpcode::REG_SEQUENCE:
- case TargetOpcode::IMPLICIT_DEF:
- case TargetOpcode::COPY:
- case TargetOpcode::INLINEASM:
- case TargetOpcode::PHI:
- return false;
- default:
- break;
- }
-
- unsigned SchedClass = MI.getDesc().getSchedClass();
- return !is_TC1(SchedClass);
-}
-
bool HexagonInstrInfo::isLateSourceInstr(const MachineInstr &MI) const {
// Instructions with iclass A_CVI_VX and attribute A_CVI_LATE uses a multiply
// resource, but all operands can be received late like an ALU instruction.
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.h b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
index 2af09c857d86..703a894132bb 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -363,7 +363,6 @@ public:
bool isDotCurInst(const MachineInstr &MI) const;
bool isDotNewInst(const MachineInstr &MI) const;
bool isDuplexPair(const MachineInstr &MIa, const MachineInstr &MIb) const;
- bool isEarlySourceInstr(const MachineInstr &MI) const;
bool isEndLoopN(unsigned Opcode) const;
bool isExpr(unsigned OpType) const;
bool isExtendable(const MachineInstr &MI) const;
@@ -375,9 +374,6 @@ public:
bool isIndirectL4Return(const MachineInstr &MI) const;
bool isJumpR(const MachineInstr &MI) const;
bool isJumpWithinBranchRange(const MachineInstr &MI, unsigned offset) const;
- bool isLateInstrFeedsEarlyInstr(const MachineInstr &LRMI,
- const MachineInstr &ESMI) const;
- bool isLateResultInstr(const MachineInstr &MI) const;
bool isLateSourceInstr(const MachineInstr &MI) const;
bool isLoopN(const MachineInstr &MI) const;
bool isMemOp(const MachineInstr &MI) const;
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp
index d11f5a9080a0..9793c7bc3532 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/AsmParser/LoongArchAsmParser.cpp
@@ -248,7 +248,7 @@ public:
addExpr(Inst, getImm());
}
};
-} // end anonymous namespace
+} // end namespace
#define GET_REGISTER_MATCHER
#define GET_SUBTARGET_FEATURE_NAME
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/Disassembler/LoongArchDisassembler.cpp b/contrib/llvm-project/llvm/lib/Target/LoongArch/Disassembler/LoongArchDisassembler.cpp
index 215d061f11f2..beb757c78596 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/Disassembler/LoongArchDisassembler.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/Disassembler/LoongArchDisassembler.cpp
@@ -39,7 +39,7 @@ public:
ArrayRef<uint8_t> Bytes, uint64_t Address,
raw_ostream &CStream) const override;
};
-} // end anonymous namespace
+} // end namespace
static MCDisassembler *createLoongArchDisassembler(const Target &T,
const MCSubtargetInfo &STI,
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArch.h b/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArch.h
index caa7bd31e28b..e6c9c24dd1b2 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArch.h
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArch.h
@@ -33,6 +33,6 @@ bool lowerLoongArchMachineOperandToMCOperand(const MachineOperand &MO,
const AsmPrinter &AP);
FunctionPass *createLoongArchISelDag(LoongArchTargetMachine &TM);
-} // namespace llvm
+} // end namespace llvm
#endif // LLVM_LIB_TARGET_LOONGARCH_LOONGARCH_H
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchFrameLowering.h b/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchFrameLowering.h
index 014b666de711..72d8e006a0bb 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchFrameLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchFrameLowering.h
@@ -52,5 +52,5 @@ private:
const DebugLoc &DL, Register DestReg, Register SrcReg,
int64_t Val, MachineInstr::MIFlag Flag) const;
};
-} // namespace llvm
+} // end namespace llvm
#endif // LLVM_LIB_TARGET_LOONGARCH_LOONGARCHFRAMELOWERING_H
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h b/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h
index 7ad329a64424..8c9357d75979 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h
@@ -55,6 +55,6 @@ public:
#include "LoongArchGenDAGISel.inc"
};
-} // namespace llvm
+} // end namespace llvm
#endif // LLVM_LIB_TARGET_LOONGARCH_LOONGARCHISELDAGTODAG_H
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 279550482675..141f1fd3a55d 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -45,7 +45,7 @@ enum NodeType : unsigned {
BSTRPICK,
};
-} // namespace LoongArchISD
+} // end namespace LoongArchISD
class LoongArchTargetLowering : public TargetLowering {
const LoongArchSubtarget &Subtarget;
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchRegisterInfo.h b/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchRegisterInfo.h
index 02c9156e2b87..cca130c3bc3a 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchRegisterInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchRegisterInfo.h
@@ -45,6 +45,6 @@ struct LoongArchRegisterInfo : public LoongArchGenRegisterInfo {
Register getFrameRegister(const MachineFunction &MF) const override;
};
-} // namespace llvm
+} // end namespace llvm
#endif // LLVM_LIB_TARGET_LOONGARCH_LOONGARCHREGISTERINFO_H
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchSubtarget.h b/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchSubtarget.h
index 95c2c676cc3c..fbe7a176b371 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchSubtarget.h
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchSubtarget.h
@@ -84,6 +84,6 @@ public:
unsigned getGRLen() const { return GRLen; }
LoongArchABI::ABI getTargetABI() const { return TargetABI; }
};
-} // namespace llvm
+} // end namespace llvm
#endif // LLVM_LIB_TARGET_LOONGARCH_LOONGARCHSUBTARGET_H
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp b/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
index 2d08d5c674bc..7ba5848e0997 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
@@ -103,7 +103,7 @@ public:
void addIRPasses() override;
bool addInstSelector() override;
};
-} // namespace
+} // end namespace
TargetPassConfig *
LoongArchTargetMachine::createPassConfig(PassManagerBase &PM) {
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h
index 77bbfb095747..a5f0b816c972 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h
@@ -58,6 +58,6 @@ public:
std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const override;
};
-} // namespace llvm
+} // end namespace llvm
#endif // LLVM_LIB_TARGET_LOONGARCH_MCTARGETDESC_LOONGARCHASMBACKEND_H
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.cpp b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.cpp
index f0c985883125..de2ba2833414 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.cpp
@@ -35,6 +35,6 @@ ABI getTargetABI(StringRef ABIName) {
// FIXME: other register?
MCRegister getBPReg() { return LoongArch::R31; }
-} // namespace LoongArchABI
+} // end namespace LoongArchABI
-} // namespace llvm
+} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.h b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.h
index e26f22de0cbc..fee247a0c02c 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchBaseInfo.h
@@ -37,8 +37,8 @@ ABI getTargetABI(StringRef ABIName);
// Returns the register used to hold the stack pointer after realignment.
MCRegister getBPReg();
-} // namespace LoongArchABI
+} // end namespace LoongArchABI
-} // namespace llvm
+} // end namespace llvm
#endif // LLVM_LIB_TARGET_LOONGARCH_MCTARGETDESC_LOONGARCHBASEINFO_H
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchELFObjectWriter.cpp b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchELFObjectWriter.cpp
index 95e1314f363a..1850b0d8a756 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchELFObjectWriter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchELFObjectWriter.cpp
@@ -33,7 +33,7 @@ protected:
unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
const MCFixup &Fixup, bool IsPCRel) const override;
};
-} // namespace
+} // end namespace
LoongArchELFObjectWriter::LoongArchELFObjectWriter(uint8_t OSABI, bool Is64Bit)
: MCELFObjectTargetWriter(Is64Bit, OSABI, ELF::EM_LOONGARCH,
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchInstPrinter.h b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchInstPrinter.h
index 727fc6a3e1f3..0cbb3d73cd03 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchInstPrinter.h
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchInstPrinter.h
@@ -44,6 +44,6 @@ private:
void printOperand(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
raw_ostream &O);
};
-} // namespace llvm
+} // end namespace llvm
#endif // LLVM_LIB_TARGET_LOONGARCH_MCTARGETDESC_LOONGARCHINSTPRINTER_H
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCAsmInfo.h b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCAsmInfo.h
index 1cf8a2fdf8aa..ed1abbf46153 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCAsmInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCAsmInfo.h
@@ -25,6 +25,6 @@ public:
explicit LoongArchMCAsmInfo(const Triple &TargetTriple);
};
-} // namespace llvm
+} // end namespace llvm
#endif // LLVM_LIB_TARGET_LOONGARCH_MCTARGETDESC_LOONGARCHMCASMINFO_H
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCCodeEmitter.cpp b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCCodeEmitter.cpp
index 9c6a4f39b9ea..01a370a90403 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCCodeEmitter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCCodeEmitter.cpp
@@ -69,7 +69,7 @@ public:
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
};
-} // end anonymous namespace
+} // end namespace
unsigned
LoongArchMCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO,
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCTargetDesc.cpp b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCTargetDesc.cpp
index e50761ab1e27..8d71235f6a81 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCTargetDesc.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCTargetDesc.cpp
@@ -95,7 +95,7 @@ public:
}
};
-} // end anonymous namespace
+} // end namespace
static MCInstrAnalysis *createLoongArchInstrAnalysis(const MCInstrInfo *Info) {
return new LoongArchMCInstrAnalysis(Info);
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCTargetDesc.h b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCTargetDesc.h
index a606ccdbc47c..ab35a0096c8a 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCTargetDesc.h
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMCTargetDesc.h
@@ -38,7 +38,7 @@ MCAsmBackend *createLoongArchAsmBackend(const Target &T,
std::unique_ptr<MCObjectTargetWriter>
createLoongArchELFObjectWriter(uint8_t OSABI, bool Is64Bit);
-} // namespace llvm
+} // end namespace llvm
// Defines symbolic names for LoongArch registers.
#define GET_REGINFO_ENUM
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMatInt.h b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMatInt.h
index 945aa91e40c0..be1b425894de 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMatInt.h
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchMatInt.h
@@ -24,7 +24,7 @@ using InstSeq = SmallVector<Inst, 4>;
// Helper to generate an instruction sequence that will materialise the given
// immediate value into a register.
InstSeq generateInstSeq(int64_t Val);
-} // namespace LoongArchMatInt
-} // namespace llvm
+} // end namespace LoongArchMatInt
+} // end namespace llvm
#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/LoongArch/TargetInfo/LoongArchTargetInfo.h b/contrib/llvm-project/llvm/lib/Target/LoongArch/TargetInfo/LoongArchTargetInfo.h
index 6fc13d52c065..b24cf879512c 100644
--- a/contrib/llvm-project/llvm/lib/Target/LoongArch/TargetInfo/LoongArchTargetInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/LoongArch/TargetInfo/LoongArchTargetInfo.h
@@ -16,6 +16,6 @@ class Target;
Target &getTheLoongArch32Target();
Target &getTheLoongArch64Target();
-} // namespace llvm
+} // end namespace llvm
#endif // LLVM_LIB_TARGET_LOONGARCH_TARGETINFO_LOONGARCHTARGETINFO_H
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
index cb6d53ec0a12..5dc2bf07ddd5 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
@@ -31,8 +31,8 @@ public:
: CombinerInfo(/*AllowIllegalOps*/ true, /*ShouldLegalizeIllegal*/ false,
/*LegalizerInfo*/ nullptr, /*EnableOpt*/ false,
/*EnableOptSize*/ false, /*EnableMinSize*/ false) {}
- virtual bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
- MachineIRBuilder &B) const override;
+ bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
+ MachineIRBuilder &B) const override;
};
bool MipsPreLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVCodeGenPrepare.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVCodeGenPrepare.cpp
index b700a9ede39b..a19253da440e 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVCodeGenPrepare.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVCodeGenPrepare.cpp
@@ -18,6 +18,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/PatternMatch.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
@@ -81,6 +82,20 @@ bool RISCVCodeGenPrepare::optimizeZExt(ZExtInst *ZExt) {
return true;
}
+ // Convert (zext (abs(i32 X, i1 1))) -> (sext (abs(i32 X, i1 1))). If abs of
+ // INT_MIN is poison, the sign bit is zero.
+ using namespace PatternMatch;
+ if (match(Src, m_Intrinsic<Intrinsic::abs>(m_Value(), m_One()))) {
+ auto *SExt = new SExtInst(Src, ZExt->getType(), "", ZExt);
+ SExt->takeName(ZExt);
+ SExt->setDebugLoc(ZExt->getDebugLoc());
+
+ ZExt->replaceAllUsesWith(SExt);
+ ZExt->eraseFromParent();
+ ++NumZExtToSExt;
+ return true;
+ }
+
return false;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 1702546b58a6..baa19e81e436 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1313,6 +1313,25 @@ bool RISCVTargetLowering::shouldSinkOperands(
return true;
}
+bool RISCVTargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
+ unsigned Opc = VecOp.getOpcode();
+
+ // Assume target opcodes can't be scalarized.
+ // TODO - do we have any exceptions?
+ if (Opc >= ISD::BUILTIN_OP_END)
+ return false;
+
+ // If the vector op is not supported, try to convert to scalar.
+ EVT VecVT = VecOp.getValueType();
+ if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
+ return true;
+
+ // If the vector op is supported, but the scalar op is not, the transform may
+ // not be worthwhile.
+ EVT ScalarVT = VecVT.getScalarType();
+ return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
+}
+
bool RISCVTargetLowering::isOffsetFoldingLegal(
const GlobalAddressSDNode *GA) const {
// In order to maximise the opportunity for common subexpression elimination,
@@ -1387,18 +1406,28 @@ static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
}
}
- // Convert X > -1 to X >= 0.
- if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
- RHS = DAG.getConstant(0, DL, RHS.getValueType());
- CC = ISD::SETGE;
- return;
- }
- // Convert X < 1 to 0 >= X.
- if (CC == ISD::SETLT && isOneConstant(RHS)) {
- RHS = LHS;
- LHS = DAG.getConstant(0, DL, RHS.getValueType());
- CC = ISD::SETGE;
- return;
+ if (auto *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
+ int64_t C = RHSC->getSExtValue();
+ switch (CC) {
+ default: break;
+ case ISD::SETGT:
+ // Convert X > -1 to X >= 0.
+ if (C == -1) {
+ RHS = DAG.getConstant(0, DL, RHS.getValueType());
+ CC = ISD::SETGE;
+ return;
+ }
+ break;
+ case ISD::SETLT:
+ // Convert X < 1 to 0 <= X.
+ if (C == 1) {
+ RHS = LHS;
+ LHS = DAG.getConstant(0, DL, RHS.getValueType());
+ CC = ISD::SETGE;
+ return;
+ }
+ break;
+ }
}
switch (CC) {
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 5e15176de59c..6ecf8b8324d4 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -376,6 +376,7 @@ public:
SelectionDAG &DAG) const override;
bool shouldSinkOperands(Instruction *I,
SmallVectorImpl<Use *> &Ops) const override;
+ bool shouldScalarizeBinop(SDValue VecOp) const override;
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
bool isFPImmLegal(const APFloat &Imm, EVT VT,
bool ForCodeSize) const override;
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 4aa9ded5b3a2..beb49f5f6249 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -134,14 +134,13 @@ public:
getSerializableDirectMachineOperandTargetFlags() const override;
// Return true if the function can safely be outlined from.
- virtual bool
- isFunctionSafeToOutlineFrom(MachineFunction &MF,
- bool OutlineFromLinkOnceODRs) const override;
+ bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
+ bool OutlineFromLinkOnceODRs) const override;
// Return true if MBB is safe to outline from, and return any target-specific
// information in Flags.
- virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
- unsigned &Flags) const override;
+ bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
+ unsigned &Flags) const override;
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override;
@@ -150,17 +149,15 @@ public:
std::vector<outliner::Candidate> &RepeatedSequenceLocs) const override;
// Return if/how a given MachineInstr should be outlined.
- virtual outliner::InstrType
- getOutliningType(MachineBasicBlock::iterator &MBBI,
- unsigned Flags) const override;
+ outliner::InstrType getOutliningType(MachineBasicBlock::iterator &MBBI,
+ unsigned Flags) const override;
// Insert a custom frame for outlined functions.
- virtual void
- buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
- const outliner::OutlinedFunction &OF) const override;
+ void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
+ const outliner::OutlinedFunction &OF) const override;
// Insert a call to an outlined function into a given basic block.
- virtual MachineBasicBlock::iterator
+ MachineBasicBlock::iterator
insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
MachineBasicBlock::iterator &It, MachineFunction &MF,
outliner::Candidate &C) const override;
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoC.td b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoC.td
index d204c85d6179..cd1da4360002 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoC.td
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoC.td
@@ -696,52 +696,36 @@ def C_SRAI64_HINT : RVInst16CI<0b100, 0b01, (outs GPRC:$rd_wb),
//===----------------------------------------------------------------------===//
let EmitPriority = 0 in {
-let Predicates = [HasStdExtC, HasStdExtD] in
-def : InstAlias<"c.fld $rd, (${rs1})", (C_FLD FPR64C:$rd, GPRC:$rs1, 0)>;
-
+let Predicates = [HasStdExtC] in {
def : InstAlias<"c.lw $rd, (${rs1})", (C_LW GPRC:$rd, GPRC:$rs1, 0)>;
-
-let Predicates = [HasStdExtC, HasStdExtF, IsRV32] in
-def : InstAlias<"c.flw $rd, (${rs1})", (C_FLW FPR32C:$rd, GPRC:$rs1, 0)>;
-
-let Predicates = [HasStdExtC, IsRV64] in
-def : InstAlias<"c.ld $rd, (${rs1})", (C_LD GPRC:$rd, GPRC:$rs1, 0)>;
-
-let Predicates = [HasStdExtC, HasStdExtD] in
-def : InstAlias<"c.fsd $rs2, (${rs1})", (C_FSD FPR64C:$rs2, GPRC:$rs1, 0)>;
-
def : InstAlias<"c.sw $rs2, (${rs1})", (C_SW GPRC:$rs2, GPRC:$rs1, 0)>;
-
-let Predicates = [HasStdExtC, HasStdExtF, IsRV32] in
-def : InstAlias<"c.fsw $rs2, (${rs1})", (C_FSW FPR32C:$rs2, GPRC:$rs1, 0)>;
-
-let Predicates = [HasStdExtC, IsRV64] in
-def : InstAlias<"c.sd $rs2, (${rs1})", (C_SD GPRC:$rs2, GPRC:$rs1, 0)>;
-
-let Predicates = [HasStdExtC, HasStdExtD] in
-def : InstAlias<"c.fldsp $rd, (${rs1})", (C_FLDSP FPR64C:$rd, SP:$rs1, 0)>;
-
def : InstAlias<"c.lwsp $rd, (${rs1})", (C_LWSP GPRC:$rd, SP:$rs1, 0)>;
+def : InstAlias<"c.swsp $rs2, (${rs1})", (C_SWSP GPRC:$rs2, SP:$rs1, 0)>;
+}
-let Predicates = [HasStdExtC, HasStdExtF, IsRV32] in
-def : InstAlias<"c.flwsp $rd, (${rs1})", (C_FLWSP FPR32C:$rd, SP:$rs1, 0)>;
-
-let Predicates = [HasStdExtC, IsRV64] in
+let Predicates = [HasStdExtC, IsRV64] in {
+def : InstAlias<"c.ld $rd, (${rs1})", (C_LD GPRC:$rd, GPRC:$rs1, 0)>;
+def : InstAlias<"c.sd $rs2, (${rs1})", (C_SD GPRC:$rs2, GPRC:$rs1, 0)>;
def : InstAlias<"c.ldsp $rd, (${rs1})", (C_LDSP GPRC:$rd, SP:$rs1, 0)>;
+def : InstAlias<"c.sdsp $rs2, (${rs1})", (C_SDSP GPRC:$rs2, SP:$rs1, 0)>;
+}
-let Predicates = [HasStdExtC, HasStdExtD] in
-def : InstAlias<"c.fsdsp $rs2, (${rs1})", (C_FSDSP FPR64C:$rs2, SP:$rs1, 0)>;
-
-def : InstAlias<"c.swsp $rs2, (${rs1})", (C_SWSP GPRC:$rs2, SP:$rs1, 0)>;
-
-let Predicates = [HasStdExtC, HasStdExtF, IsRV32] in
+let Predicates = [HasStdExtC, HasStdExtF, IsRV32] in {
+def : InstAlias<"c.flw $rd, (${rs1})", (C_FLW FPR32C:$rd, GPRC:$rs1, 0)>;
+def : InstAlias<"c.fsw $rs2, (${rs1})", (C_FSW FPR32C:$rs2, GPRC:$rs1, 0)>;
+def : InstAlias<"c.flwsp $rd, (${rs1})", (C_FLWSP FPR32C:$rd, SP:$rs1, 0)>;
def : InstAlias<"c.fswsp $rs2, (${rs1})", (C_FSWSP FPR32C:$rs2, SP:$rs1, 0)>;
+}
-let Predicates = [HasStdExtC, IsRV64] in
-def : InstAlias<"c.sdsp $rs2, (${rs1})", (C_SDSP GPRC:$rs2, SP:$rs1, 0)>;
+let Predicates = [HasStdExtC, HasStdExtD] in {
+def : InstAlias<"c.fld $rd, (${rs1})", (C_FLD FPR64C:$rd, GPRC:$rs1, 0)>;
+def : InstAlias<"c.fsd $rs2, (${rs1})", (C_FSD FPR64C:$rs2, GPRC:$rs1, 0)>;
+def : InstAlias<"c.fldsp $rd, (${rs1})", (C_FLDSP FPR64C:$rd, SP:$rs1, 0)>;
+def : InstAlias<"c.fsdsp $rs2, (${rs1})", (C_FSDSP FPR64C:$rs2, SP:$rs1, 0)>;
}
+} // EmitPriority = 0
-//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===/i
// Compress Instruction tablegen backend.
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetMachine.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetMachine.h
index 087646fb5ed9..4b2a403c5c5b 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetMachine.h
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetMachine.h
@@ -44,8 +44,7 @@ public:
TargetTransformInfo getTargetTransformInfo(const Function &F) const override;
- virtual bool isNoopAddrSpaceCast(unsigned SrcAS,
- unsigned DstAS) const override;
+ bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DstAS) const override;
yaml::MachineFunctionInfo *createDefaultFuncInfoYAML() const override;
yaml::MachineFunctionInfo *
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h
index 93ffa9847f06..db0936f3f56b 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h
@@ -81,25 +81,22 @@ public:
/// Particular to z/OS when in 64 bit mode
class SystemZXPLINK64Registers : public SystemZCallingConventionRegisters {
public:
- int getReturnFunctionAddressRegister() override final {
- return SystemZ::R7D;
- };
+ int getReturnFunctionAddressRegister() final { return SystemZ::R7D; };
- int getStackPointerRegister() override final { return SystemZ::R4D; };
+ int getStackPointerRegister() final { return SystemZ::R4D; };
- int getFramePointerRegister() override final { return SystemZ::R8D; };
+ int getFramePointerRegister() final { return SystemZ::R8D; };
int getAddressOfCalleeRegister() { return SystemZ::R6D; };
- const MCPhysReg *
- getCalleeSavedRegs(const MachineFunction *MF) const override final;
+ const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const final;
const uint32_t *getCallPreservedMask(const MachineFunction &MF,
- CallingConv::ID CC) const override final;
+ CallingConv::ID CC) const final;
- int getCallFrameSize() override final { return 128; }
+ int getCallFrameSize() final { return 128; }
- int getStackPointerBias() override final { return 2048; }
+ int getStackPointerBias() final { return 2048; }
/// Destroys the object. Bogus destructor overriding base class destructor
~SystemZXPLINK64Registers() = default;
@@ -109,23 +106,20 @@ public:
/// Particular when on zLinux in 64 bit mode
class SystemZELFRegisters : public SystemZCallingConventionRegisters {
public:
- int getReturnFunctionAddressRegister() override final {
- return SystemZ::R14D;
- };
+ int getReturnFunctionAddressRegister() final { return SystemZ::R14D; };
- int getStackPointerRegister() override final { return SystemZ::R15D; };
+ int getStackPointerRegister() final { return SystemZ::R15D; };
- int getFramePointerRegister() override final { return SystemZ::R11D; };
+ int getFramePointerRegister() final { return SystemZ::R11D; };
- const MCPhysReg *
- getCalleeSavedRegs(const MachineFunction *MF) const override final;
+ const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const final;
const uint32_t *getCallPreservedMask(const MachineFunction &MF,
- CallingConv::ID CC) const override final;
+ CallingConv::ID CC) const final;
- int getCallFrameSize() override final { return SystemZMC::ELFCallFrameSize; }
+ int getCallFrameSize() final { return SystemZMC::ELFCallFrameSize; }
- int getStackPointerBias() override final { return 0; }
+ int getStackPointerBias() final { return 0; }
/// Destroys the object. Bogus destructor overriding base class destructor
~SystemZELFRegisters() = default;
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 2636acaf1604..ab6d6b4f7ef1 100644
--- a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -577,8 +577,9 @@ LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB,
CallParams.removeOperand(0);
// For funcrefs, call_indirect is done through __funcref_call_table and the
- // funcref is always installed in slot 0 of the table, therefore instead of having
- // the function pointer added at the end of the params list, a zero (the index in
+ // funcref is always installed in slot 0 of the table, therefore instead of
+ // having the function pointer added at the end of the params list, a zero
+ // (the index in
// __funcref_call_table is added).
if (IsFuncrefCall) {
Register RegZero =
@@ -1156,7 +1157,7 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
// If the callee is a GlobalAddress node (quite common, every direct call
// is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
// doesn't at MO_GOT which is not needed for direct calls.
- GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee);
+ GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Callee);
Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
getPointerTy(DAG.getDataLayout()),
GA->getOffset());
@@ -1719,20 +1720,12 @@ WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
const GlobalValue *GV = GA->getGlobal();
- // Currently Emscripten does not support dynamic linking with threads.
- // Therefore, if we have thread-local storage, only the local-exec model
- // is possible.
- // TODO: remove this and implement proper TLS models once Emscripten
- // supports dynamic linking with threads.
- if (GV->getThreadLocalMode() != GlobalValue::LocalExecTLSModel &&
- !Subtarget->getTargetTriple().isOSEmscripten()) {
- report_fatal_error("only -ftls-model=local-exec is supported for now on "
- "non-Emscripten OSes: variable " +
- GV->getName(),
- false);
- }
-
- auto model = GV->getThreadLocalMode();
+ // Currently only Emscripten supports dynamic linking with threads. Therefore,
+ // on other targets, if we have thread-local storage, only the local-exec
+ // model is possible.
+ auto model = Subtarget->getTargetTriple().isOSEmscripten()
+ ? GV->getThreadLocalMode()
+ : GlobalValue::LocalExecTLSModel;
// Unsupported TLS modes
assert(model != GlobalValue::NotThreadLocal);
@@ -1791,8 +1784,7 @@ SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
if (GV->getValueType()->isFunctionTy()) {
BaseName = MF.createExternalSymbolName("__table_base");
OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
- }
- else {
+ } else {
BaseName = MF.createExternalSymbolName("__memory_base");
OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp
index 5a4533c4bac4..b080ab7e138c 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -1041,6 +1041,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::SMULO, MVT::v16i8, Custom);
setOperationAction(ISD::UMULO, MVT::v16i8, Custom);
+ setOperationAction(ISD::UMULO, MVT::v2i32, Custom);
setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
setOperationAction(ISD::FABS, MVT::v2f64, Custom);
@@ -1255,6 +1256,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// FIXME: Do we need to handle scalar-to-vector here?
setOperationAction(ISD::MUL, MVT::v4i32, Legal);
+ setOperationAction(ISD::SMULO, MVT::v2i32, Custom);
// We directly match byte blends in the backend as they match the VSELECT
// condition form.
@@ -19302,6 +19304,44 @@ static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
return false;
}
+static bool canCombineAsMaskOperation(SDValue V1, SDValue V2,
+ const X86Subtarget &Subtarget) {
+ if (!Subtarget.hasAVX512())
+ return false;
+
+ MVT VT = V1.getSimpleValueType().getScalarType();
+ if ((VT == MVT::i16 || VT == MVT::i8) && !Subtarget.hasBWI())
+ return false;
+
+ // i8 is better to be widen to i16, because there is PBLENDW for vXi16
+ // when the vector bit size is 128 or 256.
+ if (VT == MVT::i8 && V1.getSimpleValueType().getSizeInBits() < 512)
+ return false;
+
+ auto HasMaskOperation = [&](SDValue V) {
+ // TODO: Currently we only check limited opcode. We probably extend
+ // it to all binary operation by checking TLI.isBinOp().
+ switch (V->getOpcode()) {
+ default:
+ return false;
+ case ISD::ADD:
+ case ISD::SUB:
+ case ISD::AND:
+ case ISD::XOR:
+ break;
+ }
+ if (!V->hasOneUse())
+ return false;
+
+ return true;
+ };
+
+ if (HasMaskOperation(V1) || HasMaskOperation(V2))
+ return true;
+
+ return false;
+}
+
// Forward declaration.
static SDValue canonicalizeShuffleMaskWithHorizOp(
MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
@@ -19377,6 +19417,7 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
// integers to handle flipping the low and high halves of AVX 256-bit vectors.
SmallVector<int, 16> WidenedMask;
if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
+ !canCombineAsMaskOperation(V1, V2, Subtarget) &&
canWidenShuffleElements(OrigMask, Zeroable, V2IsZero, WidenedMask)) {
// Shuffle mask widening should not interfere with a broadcast opportunity
// by obfuscating the operands with bitcasts.
@@ -32379,6 +32420,43 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
Results.push_back(Res);
return;
}
+ case ISD::SMULO:
+ case ISD::UMULO: {
+ EVT VT = N->getValueType(0);
+ assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
+ VT == MVT::v2i32 && "Unexpected VT!");
+ bool IsSigned = N->getOpcode() == ISD::SMULO;
+ unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
+ SDValue Op0 = DAG.getNode(ExtOpc, dl, MVT::v2i64, N->getOperand(0));
+ SDValue Op1 = DAG.getNode(ExtOpc, dl, MVT::v2i64, N->getOperand(1));
+ SDValue Res = DAG.getNode(ISD::MUL, dl, MVT::v2i64, Op0, Op1);
+ // Extract the high 32 bits from each result using PSHUFD.
+ // TODO: Could use SRL+TRUNCATE but that doesn't become a PSHUFD.
+ SDValue Hi = DAG.getBitcast(MVT::v4i32, Res);
+ Hi = DAG.getVectorShuffle(MVT::v4i32, dl, Hi, Hi, {1, 3, -1, -1});
+ Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Hi,
+ DAG.getIntPtrConstant(0, dl));
+
+ // Truncate the low bits of the result. This will become PSHUFD.
+ Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
+
+ SDValue HiCmp;
+ if (IsSigned) {
+ // SMULO overflows if the high bits don't match the sign of the low.
+ HiCmp = DAG.getNode(ISD::SRA, dl, VT, Res, DAG.getConstant(31, dl, VT));
+ } else {
+ // UMULO overflows if the high bits are non-zero.
+ HiCmp = DAG.getConstant(0, dl, VT);
+ }
+ SDValue Ovf = DAG.getSetCC(dl, N->getValueType(1), Hi, HiCmp, ISD::SETNE);
+
+ // Widen the result with by padding with undef.
+ Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Res,
+ DAG.getUNDEF(VT));
+ Results.push_back(Res);
+ Results.push_back(Ovf);
+ return;
+ }
case X86ISD::VPMADDWD: {
// Legalize types for X86ISD::VPMADDWD by widening.
assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
@@ -37522,8 +37600,8 @@ static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
break;
}
if (IsBlend) {
- if (DAG.computeKnownBits(V1, DemandedZeroV1).isZero() &&
- DAG.computeKnownBits(V2, DemandedZeroV2).isZero()) {
+ if (DAG.MaskedVectorIsZero(V1, DemandedZeroV1) &&
+ DAG.MaskedVectorIsZero(V2, DemandedZeroV2)) {
Shuffle = ISD::OR;
SrcVT = DstVT = MaskVT.changeTypeToInteger();
return true;
@@ -41191,7 +41269,7 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
SDValue Src = Op.getOperand(0);
APInt DemandedUpperElts = DemandedElts;
DemandedUpperElts.clearLowBits(1);
- if (TLO.DAG.computeKnownBits(Src, DemandedUpperElts, Depth + 1).isZero())
+ if (TLO.DAG.MaskedVectorIsZero(Src, DemandedUpperElts, Depth + 1))
return TLO.CombineTo(Op, Src);
break;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h
index af110884049b..85e5d0ba4c34 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1409,7 +1409,7 @@ namespace llvm {
Register
getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
- virtual bool needsFixedCatchObjects() const override;
+ bool needsFixedCatchObjects() const override;
/// This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.h b/contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.h
index 98da00c39bdb..81729e3618d8 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86InstrInfo.h
@@ -544,7 +544,7 @@ public:
ArrayRef<std::pair<unsigned, const char *>>
getSerializableDirectMachineOperandTargetFlags() const override;
- virtual outliner::OutlinedFunction getOutliningCandidateInfo(
+ outliner::OutlinedFunction getOutliningCandidateInfo(
std::vector<outliner::Candidate> &RepeatedSequenceLocs) const override;
bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
diff --git a/contrib/llvm-project/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp b/contrib/llvm-project/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
index 1fd8b88dd776..35adaa3bde65 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
@@ -31,6 +31,7 @@
#include "llvm/IR/PatternMatch.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
+#include "llvm/Transforms/Utils/BuildLibCalls.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
@@ -427,27 +428,73 @@ static bool tryToFPToSat(Instruction &I, TargetTransformInfo &TTI) {
return true;
}
+/// Try to replace a mathlib call to sqrt with the LLVM intrinsic. This avoids
+/// pessimistic codegen that has to account for setting errno and can enable
+/// vectorization.
+static bool
+foldSqrt(Instruction &I, TargetTransformInfo &TTI, TargetLibraryInfo &TLI) {
+ // Match a call to sqrt mathlib function.
+ auto *Call = dyn_cast<CallInst>(&I);
+ if (!Call)
+ return false;
+
+ Module *M = Call->getModule();
+ LibFunc Func;
+ if (!TLI.getLibFunc(*Call, Func) || !isLibFuncEmittable(M, &TLI, Func))
+ return false;
+
+ if (Func != LibFunc_sqrt && Func != LibFunc_sqrtf && Func != LibFunc_sqrtl)
+ return false;
+
+ // If (1) this is a sqrt libcall, (2) we can assume that NAN is not created,
+ // and (3) we would not end up lowering to a libcall anyway (which could
+ // change the value of errno), then:
+ // (1) the operand arg must not be less than -0.0.
+ // (2) errno won't be set.
+ // (3) it is safe to convert this to an intrinsic call.
+ // TODO: Check if the arg is known non-negative.
+ Type *Ty = Call->getType();
+ if (TTI.haveFastSqrt(Ty) && Call->hasNoNaNs()) {
+ IRBuilder<> Builder(&I);
+ IRBuilderBase::FastMathFlagGuard Guard(Builder);
+ Builder.setFastMathFlags(Call->getFastMathFlags());
+
+ Function *Sqrt = Intrinsic::getDeclaration(M, Intrinsic::sqrt, Ty);
+ Value *NewSqrt = Builder.CreateCall(Sqrt, Call->getArgOperand(0), "sqrt");
+ I.replaceAllUsesWith(NewSqrt);
+
+ // Explicitly erase the old call because a call with side effects is not
+ // trivially dead.
+ I.eraseFromParent();
+ return true;
+ }
+
+ return false;
+}
+
/// This is the entry point for folds that could be implemented in regular
/// InstCombine, but they are separated because they are not expected to
/// occur frequently and/or have more than a constant-length pattern match.
static bool foldUnusualPatterns(Function &F, DominatorTree &DT,
- TargetTransformInfo &TTI) {
+ TargetTransformInfo &TTI,
+ TargetLibraryInfo &TLI) {
bool MadeChange = false;
for (BasicBlock &BB : F) {
// Ignore unreachable basic blocks.
if (!DT.isReachableFromEntry(&BB))
continue;
- // Do not delete instructions under here and invalidate the iterator.
+
// Walk the block backwards for efficiency. We're matching a chain of
// use->defs, so we're more likely to succeed by starting from the bottom.
// Also, we want to avoid matching partial patterns.
// TODO: It would be more efficient if we removed dead instructions
// iteratively in this loop rather than waiting until the end.
- for (Instruction &I : llvm::reverse(BB)) {
+ for (Instruction &I : make_early_inc_range(llvm::reverse(BB))) {
MadeChange |= foldAnyOrAllBitsSet(I);
MadeChange |= foldGuardedFunnelShift(I, DT);
MadeChange |= tryToRecognizePopCount(I);
MadeChange |= tryToFPToSat(I, TTI);
+ MadeChange |= foldSqrt(I, TTI, TLI);
}
}
@@ -467,7 +514,7 @@ static bool runImpl(Function &F, AssumptionCache &AC, TargetTransformInfo &TTI,
const DataLayout &DL = F.getParent()->getDataLayout();
TruncInstCombine TIC(AC, TLI, DL, DT);
MadeChange |= TIC.run(F);
- MadeChange |= foldUnusualPatterns(F, DT, TTI);
+ MadeChange |= foldUnusualPatterns(F, DT, TTI, TLI);
return MadeChange;
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
index 62cfc3294968..8c77b6937737 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -249,7 +249,8 @@ doPromotion(Function *F, FunctionAnalysisManager &FAM,
{LLVMContext::MD_range, LLVMContext::MD_nonnull,
LLVMContext::MD_dereferenceable,
LLVMContext::MD_dereferenceable_or_null,
- LLVMContext::MD_align, LLVMContext::MD_noundef});
+ LLVMContext::MD_align, LLVMContext::MD_noundef,
+ LLVMContext::MD_nontemporal});
}
Args.push_back(LI);
ArgAttrVec.push_back(AttributeSet());
@@ -631,8 +632,7 @@ static bool findArgParts(Argument *Arg, const DataLayout &DL, AAResults &AAR,
// Sort parts by offset.
append_range(ArgPartsVec, ArgParts);
- sort(ArgPartsVec,
- [](const auto &A, const auto &B) { return A.first < B.first; });
+ sort(ArgPartsVec, llvm::less_first());
// Make sure the parts are non-overlapping.
int64_t Offset = ArgPartsVec[0].first;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 660ff3ee9563..83252fec3ea8 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -3328,7 +3328,7 @@ struct AANoAliasReturned final : AANoAliasImpl {
}
/// See AbstractAttribute::updateImpl(...).
- virtual ChangeStatus updateImpl(Attributor &A) override {
+ ChangeStatus updateImpl(Attributor &A) override {
auto CheckReturnValue = [&](Value &RV) -> bool {
if (Constant *C = dyn_cast<Constant>(&RV))
@@ -3427,7 +3427,7 @@ struct AAIsDeadValueImpl : public AAIsDead {
}
/// See AbstractAttribute::getAsStr().
- virtual const std::string getAsStr() const override {
+ const std::string getAsStr() const override {
return isAssumedDead() ? "assumed-dead" : "assumed-live";
}
@@ -4500,9 +4500,8 @@ struct AAAlignImpl : AAAlign {
// to avoid making the alignment explicit if it did not improve.
/// See AbstractAttribute::getDeducedAttributes
- virtual void
- getDeducedAttributes(LLVMContext &Ctx,
- SmallVectorImpl<Attribute> &Attrs) const override {
+ void getDeducedAttributes(LLVMContext &Ctx,
+ SmallVectorImpl<Attribute> &Attrs) const override {
if (getAssumedAlign() > 1)
Attrs.emplace_back(
Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
@@ -4709,7 +4708,7 @@ struct AANoReturnImpl : public AANoReturn {
}
/// See AbstractAttribute::updateImpl(Attributor &A).
- virtual ChangeStatus updateImpl(Attributor &A) override {
+ ChangeStatus updateImpl(Attributor &A) override {
auto CheckForNoReturn = [](Instruction &) { return false; };
bool UsedAssumedInformation = false;
if (!A.checkForAllInstructions(CheckForNoReturn, *this,
@@ -4972,9 +4971,8 @@ struct AANoCaptureImpl : public AANoCapture {
ChangeStatus updateImpl(Attributor &A) override;
/// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
- virtual void
- getDeducedAttributes(LLVMContext &Ctx,
- SmallVectorImpl<Attribute> &Attrs) const override {
+ void getDeducedAttributes(LLVMContext &Ctx,
+ SmallVectorImpl<Attribute> &Attrs) const override {
if (!isAssumedNoCaptureMaybeReturned())
return;
@@ -6848,7 +6846,7 @@ struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
: AAPrivatizablePtrImpl(IRP, A) {}
/// See AbstractAttribute::initialize(...).
- virtual void initialize(Attributor &A) override {
+ void initialize(Attributor &A) override {
// TODO: We can privatize more than arguments.
indicatePessimisticFixpoint();
}
@@ -7222,7 +7220,7 @@ struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
: AAMemoryBehaviorImpl(IRP, A) {}
/// See AbstractAttribute::updateImpl(Attributor &A).
- virtual ChangeStatus updateImpl(Attributor &A) override;
+ ChangeStatus updateImpl(Attributor &A) override;
/// See AbstractAttribute::manifest(...).
ChangeStatus manifest(Attributor &A) override {
@@ -7934,7 +7932,7 @@ struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
: AAMemoryLocationImpl(IRP, A) {}
/// See AbstractAttribute::updateImpl(Attributor &A).
- virtual ChangeStatus updateImpl(Attributor &A) override {
+ ChangeStatus updateImpl(Attributor &A) override {
const auto &MemBehaviorAA =
A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
@@ -9332,13 +9330,13 @@ struct AANoUndefCallSiteReturned final
struct AACallEdgesImpl : public AACallEdges {
AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {}
- virtual const SetVector<Function *> &getOptimisticEdges() const override {
+ const SetVector<Function *> &getOptimisticEdges() const override {
return CalledFunctions;
}
- virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
+ bool hasUnknownCallee() const override { return HasUnknownCallee; }
- virtual bool hasNonAsmUnknownCallee() const override {
+ bool hasNonAsmUnknownCallee() const override {
return HasUnknownCalleeNonAsm;
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index 49077f92884f..50710eaa1b57 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -931,10 +931,9 @@ static void addArgumentAttrs(const SCCNodeSet &SCCNodes,
// a value can't capture arguments. Don't analyze them.
if (F->onlyReadsMemory() && F->doesNotThrow() &&
F->getReturnType()->isVoidTy()) {
- for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E;
- ++A) {
- if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr()) {
- A->addAttr(Attribute::NoCapture);
+ for (Argument &A : F->args()) {
+ if (A.getType()->isPointerTy() && !A.hasNoCaptureAttr()) {
+ A.addAttr(Attribute::NoCapture);
++NumNoCapture;
Changed.insert(F);
}
@@ -942,44 +941,43 @@ static void addArgumentAttrs(const SCCNodeSet &SCCNodes,
continue;
}
- for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E;
- ++A) {
- if (!A->getType()->isPointerTy())
+ for (Argument &A : F->args()) {
+ if (!A.getType()->isPointerTy())
continue;
bool HasNonLocalUses = false;
- if (!A->hasNoCaptureAttr()) {
+ if (!A.hasNoCaptureAttr()) {
ArgumentUsesTracker Tracker(SCCNodes);
- PointerMayBeCaptured(&*A, &Tracker);
+ PointerMayBeCaptured(&A, &Tracker);
if (!Tracker.Captured) {
if (Tracker.Uses.empty()) {
// If it's trivially not captured, mark it nocapture now.
- A->addAttr(Attribute::NoCapture);
+ A.addAttr(Attribute::NoCapture);
++NumNoCapture;
Changed.insert(F);
} else {
// If it's not trivially captured and not trivially not captured,
// then it must be calling into another function in our SCC. Save
// its particulars for Argument-SCC analysis later.
- ArgumentGraphNode *Node = AG[&*A];
+ ArgumentGraphNode *Node = AG[&A];
for (Argument *Use : Tracker.Uses) {
Node->Uses.push_back(AG[Use]);
- if (Use != &*A)
+ if (Use != &A)
HasNonLocalUses = true;
}
}
}
// Otherwise, it's captured. Don't bother doing SCC analysis on it.
}
- if (!HasNonLocalUses && !A->onlyReadsMemory()) {
+ if (!HasNonLocalUses && !A.onlyReadsMemory()) {
// Can we determine that it's readonly/readnone/writeonly without doing
// an SCC? Note that we don't allow any calls at all here, or else our
// result will be dependent on the iteration order through the
// functions in the SCC.
SmallPtrSet<Argument *, 8> Self;
- Self.insert(&*A);
- Attribute::AttrKind R = determinePointerAccessAttrs(&*A, Self);
+ Self.insert(&A);
+ Attribute::AttrKind R = determinePointerAccessAttrs(&A, Self);
if (R != Attribute::None)
- if (addAccessAttr(A, R))
+ if (addAccessAttr(&A, R))
Changed.insert(F);
}
}
@@ -1017,12 +1015,10 @@ static void addArgumentAttrs(const SCCNodeSet &SCCNodes,
}
bool SCCCaptured = false;
- for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end();
- I != E && !SCCCaptured; ++I) {
- ArgumentGraphNode *Node = *I;
- if (Node->Uses.empty()) {
- if (!Node->Definition->hasNoCaptureAttr())
- SCCCaptured = true;
+ for (ArgumentGraphNode *Node : ArgumentSCC) {
+ if (Node->Uses.empty() && !Node->Definition->hasNoCaptureAttr()) {
+ SCCCaptured = true;
+ break;
}
}
if (SCCCaptured)
@@ -1035,9 +1031,7 @@ static void addArgumentAttrs(const SCCNodeSet &SCCNodes,
ArgumentSCCNodes.insert(I->Definition);
}
- for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end();
- I != E && !SCCCaptured; ++I) {
- ArgumentGraphNode *N = *I;
+ for (ArgumentGraphNode *N : ArgumentSCC) {
for (ArgumentGraphNode *Use : N->Uses) {
Argument *A = Use->Definition;
if (A->hasNoCaptureAttr() || ArgumentSCCNodes.count(A))
@@ -1045,12 +1039,14 @@ static void addArgumentAttrs(const SCCNodeSet &SCCNodes,
SCCCaptured = true;
break;
}
+ if (SCCCaptured)
+ break;
}
if (SCCCaptured)
continue;
- for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) {
- Argument *A = ArgumentSCC[i]->Definition;
+ for (ArgumentGraphNode *N : ArgumentSCC) {
+ Argument *A = N->Definition;
A->addAttr(Attribute::NoCapture);
++NumNoCapture;
Changed.insert(A->getParent());
@@ -1078,16 +1074,17 @@ static void addArgumentAttrs(const SCCNodeSet &SCCNodes,
};
Attribute::AttrKind AccessAttr = Attribute::ReadNone;
- for (unsigned i = 0, e = ArgumentSCC.size();
- i != e && AccessAttr != Attribute::None; ++i) {
- Argument *A = ArgumentSCC[i]->Definition;
+ for (ArgumentGraphNode *N : ArgumentSCC) {
+ Argument *A = N->Definition;
Attribute::AttrKind K = determinePointerAccessAttrs(A, ArgumentSCCNodes);
AccessAttr = meetAccessAttr(AccessAttr, K);
+ if (AccessAttr == Attribute::None)
+ break;
}
if (AccessAttr != Attribute::None) {
- for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) {
- Argument *A = ArgumentSCC[i]->Definition;
+ for (ArgumentGraphNode *N : ArgumentSCC) {
+ Argument *A = N->Definition;
if (addAccessAttr(A, AccessAttr))
Changed.insert(A->getParent());
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index ec26db8bfc0b..6df0409256bb 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -470,8 +470,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
// Sort by offset.
SmallVector<std::pair<uint64_t, Type *>, 16> TypesVector;
append_range(TypesVector, Types);
- sort(TypesVector,
- [](const auto &A, const auto &B) { return A.first < B.first; });
+ sort(TypesVector, llvm::less_first());
// Check that the types are non-overlapping.
uint64_t Offset = 0;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/LowerTypeTests.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
index 6bf25df101fa..e3e4908f085b 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
@@ -1778,35 +1778,48 @@ void LowerTypeTestsModule::replaceDirectCalls(Value *Old, Value *New) {
Old->replaceUsesWithIf(New, isDirectCall);
}
+static void dropTypeTests(Module &M, Function &TypeTestFunc) {
+ for (Use &U : llvm::make_early_inc_range(TypeTestFunc.uses())) {
+ auto *CI = cast<CallInst>(U.getUser());
+ // Find and erase llvm.assume intrinsics for this llvm.type.test call.
+ for (Use &CIU : llvm::make_early_inc_range(CI->uses()))
+ if (auto *Assume = dyn_cast<AssumeInst>(CIU.getUser()))
+ Assume->eraseFromParent();
+ // If the assume was merged with another assume, we might have a use on a
+ // phi (which will feed the assume). Simply replace the use on the phi
+ // with "true" and leave the merged assume.
+ if (!CI->use_empty()) {
+ assert(
+ all_of(CI->users(), [](User *U) -> bool { return isa<PHINode>(U); }));
+ CI->replaceAllUsesWith(ConstantInt::getTrue(M.getContext()));
+ }
+ CI->eraseFromParent();
+ }
+}
+
bool LowerTypeTestsModule::lower() {
Function *TypeTestFunc =
M.getFunction(Intrinsic::getName(Intrinsic::type_test));
- if (DropTypeTests && TypeTestFunc) {
- for (Use &U : llvm::make_early_inc_range(TypeTestFunc->uses())) {
- auto *CI = cast<CallInst>(U.getUser());
- // Find and erase llvm.assume intrinsics for this llvm.type.test call.
- for (Use &CIU : llvm::make_early_inc_range(CI->uses()))
- if (auto *Assume = dyn_cast<AssumeInst>(CIU.getUser()))
- Assume->eraseFromParent();
- // If the assume was merged with another assume, we might have a use on a
- // phi (which will feed the assume). Simply replace the use on the phi
- // with "true" and leave the merged assume.
- if (!CI->use_empty()) {
- assert(all_of(CI->users(),
- [](User *U) -> bool { return isa<PHINode>(U); }));
- CI->replaceAllUsesWith(ConstantInt::getTrue(M.getContext()));
- }
- CI->eraseFromParent();
+ if (DropTypeTests) {
+ if (TypeTestFunc)
+ dropTypeTests(M, *TypeTestFunc);
+ // Normally we'd have already removed all @llvm.public.type.test calls,
+ // except for in the case where we originally were performing ThinLTO but
+ // decided not to in the backend.
+ Function *PublicTypeTestFunc =
+ M.getFunction(Intrinsic::getName(Intrinsic::public_type_test));
+ if (PublicTypeTestFunc)
+ dropTypeTests(M, *PublicTypeTestFunc);
+ if (TypeTestFunc || PublicTypeTestFunc) {
+ // We have deleted the type intrinsics, so we no longer have enough
+ // information to reason about the liveness of virtual function pointers
+ // in GlobalDCE.
+ for (GlobalVariable &GV : M.globals())
+ GV.eraseMetadata(LLVMContext::MD_vcall_visibility);
+ return true;
}
-
- // We have deleted the type intrinsics, so we no longer have enough
- // information to reason about the liveness of virtual function pointers
- // in GlobalDCE.
- for (GlobalVariable &GV : M.globals())
- GV.eraseMetadata(LLVMContext::MD_vcall_visibility);
-
- return true;
+ return false;
}
// If only some of the modules were split, we cannot correctly perform
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
index 0b42fc151991..ef2384faa273 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
@@ -499,18 +499,6 @@ struct OMPInformationCache : public InformationCache {
}
#include "llvm/Frontend/OpenMP/OMPKinds.def"
- // Remove the `noinline` attribute from `__kmpc`, `_OMP::` and `omp_`
- // functions, except if `optnone` is present.
- if (isOpenMPDevice(M)) {
- for (Function &F : M) {
- for (StringRef Prefix : {"__kmpc", "_ZN4_OMP", "omp_"})
- if (F.hasFnAttribute(Attribute::NoInline) &&
- F.getName().startswith(Prefix) &&
- !F.hasFnAttribute(Attribute::OptimizeNone))
- F.removeFnAttr(Attribute::NoInline);
- }
- }
-
// TODO: We should attach the attributes defined in OMPKinds.def.
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/SCCP.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/SCCP.cpp
index 26fb7d676429..0453af184a72 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/SCCP.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/SCCP.cpp
@@ -148,7 +148,7 @@ struct FunctionSpecializationLegacyPass : public ModulePass {
AU.addRequired<TargetTransformInfoWrapperPass>();
}
- virtual bool runOnModule(Module &M) override {
+ bool runOnModule(Module &M) override {
if (skipModule(M))
return false;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
index a360a768a2bc..ef7af551a328 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
@@ -132,6 +132,14 @@ void promoteTypeIds(Module &M, StringRef ModuleId) {
}
}
+ if (Function *PublicTypeTestFunc =
+ M.getFunction(Intrinsic::getName(Intrinsic::public_type_test))) {
+ for (const Use &U : PublicTypeTestFunc->uses()) {
+ auto CI = cast<CallInst>(U.getUser());
+ ExternalizeTypeId(CI, 1);
+ }
+ }
+
if (Function *TypeCheckedLoadFunc =
M.getFunction(Intrinsic::getName(Intrinsic::type_checked_load))) {
for (const Use &U : TypeCheckedLoadFunc->uses()) {
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
index ad00c116ce0a..18efe99f7cb4 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
@@ -773,15 +773,14 @@ PreservedAnalyses WholeProgramDevirtPass::run(Module &M,
return PreservedAnalyses::none();
}
+namespace llvm {
// Enable whole program visibility if enabled by client (e.g. linker) or
// internal option, and not force disabled.
-static bool hasWholeProgramVisibility(bool WholeProgramVisibilityEnabledInLTO) {
+bool hasWholeProgramVisibility(bool WholeProgramVisibilityEnabledInLTO) {
return (WholeProgramVisibilityEnabledInLTO || WholeProgramVisibility) &&
!DisableWholeProgramVisibility;
}
-namespace llvm {
-
/// If whole program visibility asserted, then upgrade all public vcall
/// visibility metadata on vtable definitions to linkage unit visibility in
/// Module IR (for regular or hybrid LTO).
@@ -790,7 +789,7 @@ void updateVCallVisibilityInModule(
const DenseSet<GlobalValue::GUID> &DynamicExportSymbols) {
if (!hasWholeProgramVisibility(WholeProgramVisibilityEnabledInLTO))
return;
- for (GlobalVariable &GV : M.globals())
+ for (GlobalVariable &GV : M.globals()) {
// Add linkage unit visibility to any variable with type metadata, which are
// the vtable definitions. We won't have an existing vcall_visibility
// metadata on vtable definitions with public visibility.
@@ -800,6 +799,34 @@ void updateVCallVisibilityInModule(
// linker, as we have no information on their eventual use.
!DynamicExportSymbols.count(GV.getGUID()))
GV.setVCallVisibilityMetadata(GlobalObject::VCallVisibilityLinkageUnit);
+ }
+}
+
+void updatePublicTypeTestCalls(Module &M,
+ bool WholeProgramVisibilityEnabledInLTO) {
+ Function *PublicTypeTestFunc =
+ M.getFunction(Intrinsic::getName(Intrinsic::public_type_test));
+ if (!PublicTypeTestFunc)
+ return;
+ if (hasWholeProgramVisibility(WholeProgramVisibilityEnabledInLTO)) {
+ Function *TypeTestFunc =
+ Intrinsic::getDeclaration(&M, Intrinsic::type_test);
+ for (Use &U : make_early_inc_range(PublicTypeTestFunc->uses())) {
+ auto *CI = cast<CallInst>(U.getUser());
+ auto *NewCI = CallInst::Create(
+ TypeTestFunc, {CI->getArgOperand(0), CI->getArgOperand(1)}, None, "",
+ CI);
+ CI->replaceAllUsesWith(NewCI);
+ CI->eraseFromParent();
+ }
+ } else {
+ auto *True = ConstantInt::getTrue(M.getContext());
+ for (Use &U : make_early_inc_range(PublicTypeTestFunc->uses())) {
+ auto *CI = cast<CallInst>(U.getUser());
+ CI->replaceAllUsesWith(True);
+ CI->eraseFromParent();
+ }
+ }
}
/// If whole program visibility asserted, then upgrade all public vcall
diff --git a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
index 827b25533513..664226ec187b 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -597,10 +597,9 @@ public:
/// demanded bits.
bool SimplifyDemandedInstructionBits(Instruction &Inst);
- virtual Value *
- SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &UndefElts,
- unsigned Depth = 0,
- bool AllowMultipleUsers = false) override;
+ Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
+ APInt &UndefElts, unsigned Depth = 0,
+ bool AllowMultipleUsers = false) override;
/// Canonicalize the position of binops relative to shufflevector.
Instruction *foldVectorBinop(BinaryOperator &Inst);
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index cf2754b1dd60..3274e36ab71a 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -1232,7 +1232,9 @@ bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
// dynamic alloca instrumentation for them as well.
!AI.isUsedWithInAlloca() &&
// swifterror allocas are register promoted by ISel
- !AI.isSwiftError());
+ !AI.isSwiftError() &&
+ // safe allocas are not interesting
+ !(SSGI && SSGI->isSafe(AI)));
ProcessedAllocas[&AI] = IsInteresting;
return IsInteresting;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
index fd2eaee8b47d..013a119c5096 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
@@ -213,10 +213,12 @@ bool LoopDataPrefetchLegacyPass::runOnFunction(Function &F) {
bool LoopDataPrefetch::run() {
// If PrefetchDistance is not set, don't run the pass. This gives an
// opportunity for targets to run this pass for selected subtargets only
- // (whose TTI sets PrefetchDistance).
- if (getPrefetchDistance() == 0)
+ // (whose TTI sets PrefetchDistance and CacheLineSize).
+ if (getPrefetchDistance() == 0 || TTI->getCacheLineSize() == 0) {
+ LLVM_DEBUG(dbgs() << "Please set both PrefetchDistance and CacheLineSize "
+ "for loop data prefetch.\n");
return false;
- assert(TTI->getCacheLineSize() && "Cache line size is not set for target");
+ }
bool MadeChange = false;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
index c05906649f16..f1e1359255bd 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
@@ -338,6 +338,9 @@ class LowerMatrixIntrinsics {
Value *extractVector(unsigned I, unsigned J, unsigned NumElts,
IRBuilder<> &Builder) const {
Value *Vec = isColumnMajor() ? getColumn(J) : getRow(I);
+ assert(cast<FixedVectorType>(Vec->getType())->getNumElements() >=
+ NumElts &&
+ "Extracted vector will contain poison values");
return Builder.CreateShuffleVector(
Vec, createSequentialMask(isColumnMajor() ? I : J, NumElts, 0),
"block");
@@ -1423,13 +1426,13 @@ public:
FixedVectorType::get(MatMul->getType()->getScalarType(), TileSize);
MatrixTy TileResult;
// Insert in the inner loop header.
- Builder.SetInsertPoint(TI.InnerLoopHeader->getTerminator());
+ Builder.SetInsertPoint(TI.KLoop.Header->getTerminator());
// Create PHI nodes for the result columns to accumulate across iterations.
SmallVector<PHINode *, 4> ColumnPhis;
for (unsigned I = 0; I < TileSize; I++) {
auto *Phi = Builder.CreatePHI(TileVecTy, 2, "result.vec." + Twine(I));
Phi->addIncoming(ConstantAggregateZero::get(TileVecTy),
- TI.RowLoopHeader->getSingleSuccessor());
+ TI.RowLoop.Header->getSingleSuccessor());
TileResult.addVector(Phi);
ColumnPhis.push_back(Phi);
}
@@ -1438,27 +1441,29 @@ public:
// Res += Load(CurrentRow, K) * Load(K, CurrentColumn)
Builder.SetInsertPoint(InnerBody->getTerminator());
// Load tiles of the operands.
- MatrixTy A = loadMatrix(LPtr, {}, false, LShape, TI.CurrentRow, TI.CurrentK,
- {TileSize, TileSize}, EltType, Builder);
- MatrixTy B = loadMatrix(RPtr, {}, false, RShape, TI.CurrentK, TI.CurrentCol,
- {TileSize, TileSize}, EltType, Builder);
+ MatrixTy A =
+ loadMatrix(LPtr, {}, false, LShape, TI.RowLoop.Index, TI.KLoop.Index,
+ {TileSize, TileSize}, EltType, Builder);
+ MatrixTy B =
+ loadMatrix(RPtr, {}, false, RShape, TI.KLoop.Index, TI.ColumnLoop.Index,
+ {TileSize, TileSize}, EltType, Builder);
emitMatrixMultiply(TileResult, A, B, Builder, true, false,
getFastMathFlags(MatMul));
// Store result after the inner loop is done.
- Builder.SetInsertPoint(TI.RowLoopLatch->getTerminator());
+ Builder.SetInsertPoint(TI.RowLoop.Latch->getTerminator());
storeMatrix(TileResult, Store->getPointerOperand(), Store->getAlign(),
Store->isVolatile(), {LShape.NumRows, RShape.NumColumns},
- TI.CurrentRow, TI.CurrentCol, EltType, Builder);
+ TI.RowLoop.Index, TI.ColumnLoop.Index, EltType, Builder);
for (unsigned I = 0; I < TileResult.getNumVectors(); I++)
- ColumnPhis[I]->addIncoming(TileResult.getVector(I), TI.InnerLoopLatch);
+ ColumnPhis[I]->addIncoming(TileResult.getVector(I), TI.KLoop.Latch);
// Force unrolling of a few iterations of the inner loop, to make sure there
// is enough work per iteration.
// FIXME: The unroller should make this decision directly instead, but
// currently the cost-model is not up to the task.
unsigned InnerLoopUnrollCount = std::min(10u, LShape.NumColumns / TileSize);
- addStringMetadataToLoop(LI->getLoopFor(TI.InnerLoopHeader),
+ addStringMetadataToLoop(LI->getLoopFor(TI.KLoop.Header),
"llvm.loop.unroll.count", InnerLoopUnrollCount);
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/Reassociate.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/Reassociate.cpp
index 240fb5e60687..cd2ce8ce336e 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/Reassociate.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/Reassociate.cpp
@@ -147,27 +147,27 @@ XorOpnd::XorOpnd(Value *V) {
/// Instruction::isAssociative() because it includes operations like fsub.
/// (This routine is only intended to be called for floating-point operations.)
static bool hasFPAssociativeFlags(Instruction *I) {
- assert(I && I->getType()->isFPOrFPVectorTy() && "Should only check FP ops");
+ assert(I && isa<FPMathOperator>(I) && "Should only check FP ops");
return I->hasAllowReassoc() && I->hasNoSignedZeros();
}
/// Return true if V is an instruction of the specified opcode and if it
/// only has one use.
static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode) {
- auto *I = dyn_cast<Instruction>(V);
- if (I && I->hasOneUse() && I->getOpcode() == Opcode)
- if (!isa<FPMathOperator>(I) || hasFPAssociativeFlags(I))
- return cast<BinaryOperator>(I);
+ auto *BO = dyn_cast<BinaryOperator>(V);
+ if (BO && BO->hasOneUse() && BO->getOpcode() == Opcode)
+ if (!isa<FPMathOperator>(BO) || hasFPAssociativeFlags(BO))
+ return BO;
return nullptr;
}
static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode1,
unsigned Opcode2) {
- auto *I = dyn_cast<Instruction>(V);
- if (I && I->hasOneUse() &&
- (I->getOpcode() == Opcode1 || I->getOpcode() == Opcode2))
- if (!isa<FPMathOperator>(I) || hasFPAssociativeFlags(I))
- return cast<BinaryOperator>(I);
+ auto *BO = dyn_cast<BinaryOperator>(V);
+ if (BO && BO->hasOneUse() &&
+ (BO->getOpcode() == Opcode1 || BO->getOpcode() == Opcode2))
+ if (!isa<FPMathOperator>(BO) || hasFPAssociativeFlags(BO))
+ return BO;
return nullptr;
}
@@ -778,7 +778,7 @@ void ReassociatePass::RewriteExprTree(BinaryOperator *I,
Constant *Undef = UndefValue::get(I->getType());
NewOp = BinaryOperator::Create(Instruction::BinaryOps(Opcode),
Undef, Undef, "", I);
- if (NewOp->getType()->isFPOrFPVectorTy())
+ if (isa<FPMathOperator>(NewOp))
NewOp->setFastMathFlags(I->getFastMathFlags());
} else {
NewOp = NodesToRewrite.pop_back_val();
@@ -2227,7 +2227,7 @@ void ReassociatePass::OptimizeInst(Instruction *I) {
// Don't optimize floating-point instructions unless they have the
// appropriate FastMathFlags for reassociation enabled.
- if (I->getType()->isFPOrFPVectorTy() && !hasFPAssociativeFlags(I))
+ if (isa<FPMathOperator>(I) && !hasFPAssociativeFlags(I))
return;
// Do not reassociate boolean (i1) expressions. We want to preserve the
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/InlineFunction.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/InlineFunction.cpp
index 00387ec426bf..878f9477a29d 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -825,6 +825,35 @@ static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart,
}
}
+/// Bundle operands of the inlined function must be added to inlined call sites.
+static void PropagateOperandBundles(Function::iterator InlinedBB,
+ Instruction *CallSiteEHPad) {
+ for (Instruction &II : llvm::make_early_inc_range(*InlinedBB)) {
+ CallBase *I = dyn_cast<CallBase>(&II);
+ if (!I)
+ continue;
+ // Skip call sites which already have a "funclet" bundle.
+ if (I->getOperandBundle(LLVMContext::OB_funclet))
+ continue;
+ // Skip call sites which are nounwind intrinsics (as long as they don't
+ // lower into regular function calls in the course of IR transformations).
+ auto *CalledFn =
+ dyn_cast<Function>(I->getCalledOperand()->stripPointerCasts());
+ if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow() &&
+ !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
+ continue;
+
+ SmallVector<OperandBundleDef, 1> OpBundles;
+ I->getOperandBundlesAsDefs(OpBundles);
+ OpBundles.emplace_back("funclet", CallSiteEHPad);
+
+ Instruction *NewInst = CallBase::Create(I, OpBundles, I);
+ NewInst->takeName(I);
+ I->replaceAllUsesWith(NewInst);
+ I->eraseFromParent();
+ }
+}
+
namespace {
/// Utility for cloning !noalias and !alias.scope metadata. When a code region
/// using scoped alias metadata is inlined, the aliasing relationships may not
@@ -2304,38 +2333,12 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
// Update the lexical scopes of the new funclets and callsites.
// Anything that had 'none' as its parent is now nested inside the callsite's
// EHPad.
-
if (CallSiteEHPad) {
for (Function::iterator BB = FirstNewBlock->getIterator(),
E = Caller->end();
BB != E; ++BB) {
- // Add bundle operands to any top-level call sites.
- SmallVector<OperandBundleDef, 1> OpBundles;
- for (Instruction &II : llvm::make_early_inc_range(*BB)) {
- CallBase *I = dyn_cast<CallBase>(&II);
- if (!I)
- continue;
-
- // Skip call sites which are nounwind intrinsics.
- auto *CalledFn =
- dyn_cast<Function>(I->getCalledOperand()->stripPointerCasts());
- if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow())
- continue;
-
- // Skip call sites which already have a "funclet" bundle.
- if (I->getOperandBundle(LLVMContext::OB_funclet))
- continue;
-
- I->getOperandBundlesAsDefs(OpBundles);
- OpBundles.emplace_back("funclet", CallSiteEHPad);
-
- Instruction *NewInst = CallBase::Create(I, OpBundles, I);
- NewInst->takeName(I);
- I->replaceAllUsesWith(NewInst);
- I->eraseFromParent();
-
- OpBundles.clear();
- }
+ // Add bundle operands to inlined call sites.
+ PropagateOperandBundles(BB, CallSiteEHPad);
// It is problematic if the inlinee has a cleanupret which unwinds to
// caller and we inline it into a call site which doesn't unwind but into
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/MatrixUtils.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/MatrixUtils.cpp
index 6a137630deeb..e218773cf5da 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/MatrixUtils.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/MatrixUtils.cpp
@@ -70,35 +70,35 @@ BasicBlock *TileInfo::CreateLoop(BasicBlock *Preheader, BasicBlock *Exit,
BasicBlock *TileInfo::CreateTiledLoops(BasicBlock *Start, BasicBlock *End,
IRBuilderBase &B, DomTreeUpdater &DTU,
LoopInfo &LI) {
- Loop *ColLoop = LI.AllocateLoop();
- Loop *RowLoop = LI.AllocateLoop();
- Loop *InnerLoop = LI.AllocateLoop();
- RowLoop->addChildLoop(InnerLoop);
- ColLoop->addChildLoop(RowLoop);
+ Loop *ColumnLoopInfo = LI.AllocateLoop();
+ Loop *RowLoopInfo = LI.AllocateLoop();
+ Loop *KLoopInfo = LI.AllocateLoop();
+ RowLoopInfo->addChildLoop(KLoopInfo);
+ ColumnLoopInfo->addChildLoop(RowLoopInfo);
if (Loop *ParentL = LI.getLoopFor(Start))
- ParentL->addChildLoop(ColLoop);
+ ParentL->addChildLoop(ColumnLoopInfo);
else
- LI.addTopLevelLoop(ColLoop);
+ LI.addTopLevelLoop(ColumnLoopInfo);
BasicBlock *ColBody =
CreateLoop(Start, End, B.getInt64(NumColumns), B.getInt64(TileSize),
- "cols", B, DTU, ColLoop, LI);
- BasicBlock *ColLatch = ColBody->getSingleSuccessor();
+ "cols", B, DTU, ColumnLoopInfo, LI);
+ ColumnLoop.Latch = ColBody->getSingleSuccessor();
BasicBlock *RowBody =
- CreateLoop(ColBody, ColLatch, B.getInt64(NumRows), B.getInt64(TileSize),
- "rows", B, DTU, RowLoop, LI);
- RowLoopLatch = RowBody->getSingleSuccessor();
+ CreateLoop(ColBody, ColumnLoop.Latch, B.getInt64(NumRows),
+ B.getInt64(TileSize), "rows", B, DTU, RowLoopInfo, LI);
+ RowLoop.Latch = RowBody->getSingleSuccessor();
BasicBlock *InnerBody =
- CreateLoop(RowBody, RowLoopLatch, B.getInt64(NumInner),
- B.getInt64(TileSize), "inner", B, DTU, InnerLoop, LI);
- InnerLoopLatch = InnerBody->getSingleSuccessor();
- ColumnLoopHeader = ColBody->getSinglePredecessor();
- RowLoopHeader = RowBody->getSinglePredecessor();
- InnerLoopHeader = InnerBody->getSinglePredecessor();
- CurrentRow = &*RowLoopHeader->begin();
- CurrentCol = &*ColumnLoopHeader->begin();
- CurrentK = &*InnerLoopHeader->begin();
+ CreateLoop(RowBody, RowLoop.Latch, B.getInt64(NumInner),
+ B.getInt64(TileSize), "inner", B, DTU, KLoopInfo, LI);
+ KLoop.Latch = InnerBody->getSingleSuccessor();
+ ColumnLoop.Header = ColBody->getSinglePredecessor();
+ RowLoop.Header = RowBody->getSinglePredecessor();
+ KLoop.Header = InnerBody->getSinglePredecessor();
+ RowLoop.Index = &*RowLoop.Header->begin();
+ ColumnLoop.Index = &*ColumnLoop.Header->begin();
+ KLoop.Index = &*KLoop.Header->begin();
return InnerBody;
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
index bca3b0538c5d..03087d8370d5 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -75,39 +75,109 @@ static bool callHasFP128Argument(const CallInst *CI) {
});
}
-static Value *convertStrToNumber(CallInst *CI, StringRef &Str, Value *EndPtr,
- int64_t Base, IRBuilderBase &B) {
+// Convert the entire string Str representing an integer in Base, up to
+// the terminating nul if present, to a constant according to the rules
+// of strtoul[l] or, when AsSigned is set, of strtol[l]. On success
+// return the result, otherwise null.
+// The function assumes the string is encoded in ASCII and carefully
+// avoids converting sequences (including "") that the corresponding
+// library call might fail and set errno for.
+static Value *convertStrToInt(CallInst *CI, StringRef &Str, Value *EndPtr,
+ uint64_t Base, bool AsSigned, IRBuilderBase &B) {
if (Base < 2 || Base > 36)
- // handle special zero base
if (Base != 0)
+ // Fail for an invalid base (required by POSIX).
return nullptr;
- char *End;
- std::string nptr = Str.str();
- errno = 0;
- long long int Result = strtoll(nptr.c_str(), &End, Base);
- if (errno)
- return nullptr;
+ // Strip leading whitespace.
+ for (unsigned i = 0; i != Str.size(); ++i)
+ if (!isSpace((unsigned char)Str[i])) {
+ Str = Str.substr(i);
+ break;
+ }
- // if we assume all possible target locales are ASCII supersets,
- // then if strtoll successfully parses a number on the host,
- // it will also successfully parse the same way on the target
- if (*End != '\0')
+ if (Str.empty())
+ // Fail for empty subject sequences (POSIX allows but doesn't require
+ // strtol[l]/strtoul[l] to fail with EINVAL).
return nullptr;
- if (!isIntN(CI->getType()->getPrimitiveSizeInBits(), Result))
- return nullptr;
+ // Strip but remember the sign.
+ bool Negate = Str[0] == '-';
+ if (Str[0] == '-' || Str[0] == '+') {
+ Str = Str.drop_front();
+ if (Str.empty())
+ // Fail for a sign with nothing after it.
+ return nullptr;
+ }
+
+ // Set Max to the absolute value of the minimum (for signed), or
+ // to the maximum (for unsigned) value representable in the type.
+ Type *RetTy = CI->getType();
+ unsigned NBits = RetTy->getPrimitiveSizeInBits();
+ uint64_t Max = AsSigned && Negate ? 1 : 0;
+ Max += AsSigned ? maxIntN(NBits) : maxUIntN(NBits);
+
+ // Autodetect Base if it's zero and consume the "0x" prefix.
+ if (Str.size() > 1) {
+ if (Str[0] == '0') {
+ if (toUpper((unsigned char)Str[1]) == 'X') {
+ if (Str.size() == 2 || (Base && Base != 16))
+ // Fail if Base doesn't allow the "0x" prefix or for the prefix
+ // alone that implementations like BSD set errno to EINVAL for.
+ return nullptr;
+
+ Str = Str.drop_front(2);
+ Base = 16;
+ }
+ else if (Base == 0)
+ Base = 8;
+ } else if (Base == 0)
+ Base = 10;
+ }
+ else if (Base == 0)
+ Base = 10;
+
+ // Convert the rest of the subject sequence, not including the sign,
+ // to its uint64_t representation (this assumes the source character
+ // set is ASCII).
+ uint64_t Result = 0;
+ for (unsigned i = 0; i != Str.size(); ++i) {
+ unsigned char DigVal = Str[i];
+ if (isDigit(DigVal))
+ DigVal = DigVal - '0';
+ else {
+ DigVal = toUpper(DigVal);
+ if (isAlpha(DigVal))
+ DigVal = DigVal - 'A' + 10;
+ else
+ return nullptr;
+ }
+
+ if (DigVal >= Base)
+ // Fail if the digit is not valid in the Base.
+ return nullptr;
+
+ // Add the digit and fail if the result is not representable in
+ // the (unsigned form of the) destination type.
+ bool VFlow;
+ Result = SaturatingMultiplyAdd(Result, Base, (uint64_t)DigVal, &VFlow);
+ if (VFlow || Result > Max)
+ return nullptr;
+ }
if (EndPtr) {
// Store the pointer to the end.
- uint64_t ILen = End - nptr.c_str();
- Value *Off = B.getInt64(ILen);
+ Value *Off = B.getInt64(Str.size());
Value *StrBeg = CI->getArgOperand(0);
Value *StrEnd = B.CreateInBoundsGEP(B.getInt8Ty(), StrBeg, Off, "endptr");
B.CreateStore(StrEnd, EndPtr);
}
- return ConstantInt::get(CI->getType(), Result);
+ if (Negate)
+ // Unsigned negation doesn't overflow.
+ Result = -Result;
+
+ return ConstantInt::get(RetTy, Result);
}
static bool isOnlyUsedInComparisonWithZero(Value *V) {
@@ -2531,27 +2601,35 @@ Value *LibCallSimplifier::optimizeToAscii(CallInst *CI, IRBuilderBase &B) {
ConstantInt::get(CI->getType(), 0x7F));
}
+// Fold calls to atoi, atol, and atoll.
Value *LibCallSimplifier::optimizeAtoi(CallInst *CI, IRBuilderBase &B) {
+ CI->addParamAttr(0, Attribute::NoCapture);
+
StringRef Str;
if (!getConstantStringInfo(CI->getArgOperand(0), Str))
return nullptr;
- return convertStrToNumber(CI, Str, nullptr, 10, B);
+ return convertStrToInt(CI, Str, nullptr, 10, /*AsSigned=*/true, B);
}
-Value *LibCallSimplifier::optimizeStrtol(CallInst *CI, IRBuilderBase &B) {
- StringRef Str;
- if (!getConstantStringInfo(CI->getArgOperand(0), Str))
- return nullptr;
-
+// Fold calls to strtol, strtoll, strtoul, and strtoull.
+Value *LibCallSimplifier::optimizeStrToInt(CallInst *CI, IRBuilderBase &B,
+ bool AsSigned) {
Value *EndPtr = CI->getArgOperand(1);
- if (isa<ConstantPointerNull>(EndPtr))
+ if (isa<ConstantPointerNull>(EndPtr)) {
+ // With a null EndPtr, this function won't capture the main argument.
+ // It would be readonly too, except that it still may write to errno.
+ CI->addParamAttr(0, Attribute::NoCapture);
EndPtr = nullptr;
- else if (!isKnownNonZero(EndPtr, DL))
+ } else if (!isKnownNonZero(EndPtr, DL))
+ return nullptr;
+
+ StringRef Str;
+ if (!getConstantStringInfo(CI->getArgOperand(0), Str))
return nullptr;
if (ConstantInt *CInt = dyn_cast<ConstantInt>(CI->getArgOperand(2))) {
- return convertStrToNumber(CI, Str, EndPtr, CInt->getSExtValue(), B);
+ return convertStrToInt(CI, Str, EndPtr, CInt->getSExtValue(), AsSigned, B);
}
return nullptr;
@@ -3390,7 +3468,10 @@ Value *LibCallSimplifier::optimizeCall(CallInst *CI, IRBuilderBase &Builder) {
return optimizeAtoi(CI, Builder);
case LibFunc_strtol:
case LibFunc_strtoll:
- return optimizeStrtol(CI, Builder);
+ return optimizeStrToInt(CI, Builder, /*AsSigned=*/true);
+ case LibFunc_strtoul:
+ case LibFunc_strtoull:
+ return optimizeStrToInt(CI, Builder, /*AsSigned=*/false);
case LibFunc_printf:
return optimizePrintF(CI, Builder);
case LibFunc_sprintf:
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index b887ea41676b..238b074089aa 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -798,8 +798,7 @@ public:
// Override this function to handle the more complex control flow around the
// three loops.
- std::pair<BasicBlock *, Value *>
- createVectorizedLoopSkeleton() final override {
+ std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton() final {
return createEpilogueVectorizedLoopSkeleton();
}
@@ -835,8 +834,7 @@ public:
EPI, LVL, CM, BFI, PSI, Check) {}
/// Implements the interface for creating a vectorized skeleton using the
/// *main loop* strategy (ie the first pass of vplan execution).
- std::pair<BasicBlock *, Value *>
- createEpilogueVectorizedLoopSkeleton() final override;
+ std::pair<BasicBlock *, Value *> createEpilogueVectorizedLoopSkeleton() final;
protected:
/// Emits an iteration count bypass check once for the main loop (when \p
@@ -866,8 +864,7 @@ public:
}
/// Implements the interface for creating a vectorized skeleton using the
/// *epilogue loop* strategy (ie the second pass of vplan execution).
- std::pair<BasicBlock *, Value *>
- createEpilogueVectorizedLoopSkeleton() final override;
+ std::pair<BasicBlock *, Value *> createEpilogueVectorizedLoopSkeleton() final;
protected:
/// Emits an iteration count bypass check after the main vector loop has
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index cd044c78d900..d69d1e3d19f3 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -10972,9 +10972,7 @@ public:
It != E; ++It) {
PossibleRedValsVect.emplace_back();
auto RedValsVect = It->second.takeVector();
- stable_sort(RedValsVect, [](const auto &P1, const auto &P2) {
- return P1.second < P2.second;
- });
+ stable_sort(RedValsVect, llvm::less_second());
for (const std::pair<Value *, unsigned> &Data : RedValsVect)
PossibleRedValsVect.back().append(Data.second, Data.first);
}
diff --git a/contrib/llvm-project/llvm/tools/llvm-dwarfutil/DebugInfoLinker.cpp b/contrib/llvm-project/llvm/tools/llvm-dwarfutil/DebugInfoLinker.cpp
index 458a58c12ca7..3e70f460bc58 100644
--- a/contrib/llvm-project/llvm/tools/llvm-dwarfutil/DebugInfoLinker.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-dwarfutil/DebugInfoLinker.cpp
@@ -8,6 +8,7 @@
#include "DebugInfoLinker.h"
#include "Error.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/DWARFLinker/DWARFLinker.h"
#include "llvm/DWARFLinker/DWARFStreamer.h"
#include "llvm/DebugInfo/DWARF/DWARFContext.h"
@@ -210,8 +211,29 @@ private:
const Options &Opts;
};
-bool linkDebugInfo(object::ObjectFile &File, const Options &Options,
- raw_pwrite_stream &OutStream) {
+static bool knownByDWARFUtil(StringRef SecName) {
+ return llvm::StringSwitch<bool>(SecName)
+ .Case(".debug_info", true)
+ .Case(".debug_types", true)
+ .Case(".debug_abbrev", true)
+ .Case(".debug_loc", true)
+ .Case(".debug_loclists", true)
+ .Case(".debug_frame", true)
+ .Case(".debug_aranges", true)
+ .Case(".debug_ranges", true)
+ .Case(".debug_rnglists", true)
+ .Case(".debug_line", true)
+ .Case(".debug_line_str", true)
+ .Case(".debug_addr", true)
+ .Case(".debug_macro", true)
+ .Case(".debug_macinfo", true)
+ .Case(".debug_str", true)
+ .Case(".debug_str_offsets", true)
+ .Default(false);
+}
+
+Error linkDebugInfo(object::ObjectFile &File, const Options &Options,
+ raw_pwrite_stream &OutStream) {
auto ReportWarn = [&](const Twine &Message, StringRef Context,
const DWARFDie *Die) {
@@ -235,8 +257,11 @@ bool linkDebugInfo(object::ObjectFile &File, const Options &Options,
// Create output streamer.
DwarfStreamer OutStreamer(OutputFileType::Object, OutStream, nullptr,
ReportWarn, ReportWarn);
- if (!OutStreamer.init(File.makeTriple(), ""))
- return false;
+ Triple TargetTriple = File.makeTriple();
+ if (!OutStreamer.init(TargetTriple, formatv("cannot create a stream for {0}",
+ TargetTriple.getTriple())
+ .str()))
+ return createStringError(std::errc::invalid_argument, "");
// Create DWARF linker.
DWARFLinker DebugInfoLinker(&OutStreamer, DwarfLinkerClient::LLD);
@@ -256,6 +281,16 @@ bool linkDebugInfo(object::ObjectFile &File, const Options &Options,
std::unique_ptr<DWARFContext> Context = DWARFContext::create(File);
+ // Unknown debug sections would be removed. Display warning
+ // for such sections.
+ for (SectionName Sec : Context->getDWARFObj().getSectionNames()) {
+ if (isDebugSection(Sec.Name) && !knownByDWARFUtil(Sec.Name))
+ warning(
+ formatv("'{0}' is not currently supported: section will be skipped",
+ Sec.Name),
+ Options.InputFileName);
+ }
+
// Add object files to the DWARFLinker.
AddresssMapForLinking[0] =
std::make_unique<ObjFileAddressMap>(*Context, Options, File);
@@ -268,9 +303,11 @@ bool linkDebugInfo(object::ObjectFile &File, const Options &Options,
DebugInfoLinker.addObjectFile(*ObjectsForLinking[I]);
// Link debug info.
- DebugInfoLinker.link();
+ if (Error Err = DebugInfoLinker.link())
+ return Err;
+
OutStreamer.finish();
- return true;
+ return Error::success();
}
} // end of namespace dwarfutil
diff --git a/contrib/llvm-project/llvm/tools/llvm-dwarfutil/DebugInfoLinker.h b/contrib/llvm-project/llvm/tools/llvm-dwarfutil/DebugInfoLinker.h
index e95c83cb9609..d9d99ffc8747 100644
--- a/contrib/llvm-project/llvm/tools/llvm-dwarfutil/DebugInfoLinker.h
+++ b/contrib/llvm-project/llvm/tools/llvm-dwarfutil/DebugInfoLinker.h
@@ -22,8 +22,8 @@ inline bool isDebugSection(StringRef SecName) {
SecName == ".gdb_index";
}
-bool linkDebugInfo(object::ObjectFile &file, const Options &Options,
- raw_pwrite_stream &OutStream);
+Error linkDebugInfo(object::ObjectFile &file, const Options &Options,
+ raw_pwrite_stream &OutStream);
} // end of namespace dwarfutil
} // end of namespace llvm
diff --git a/contrib/llvm-project/llvm/tools/llvm-dwarfutil/llvm-dwarfutil.cpp b/contrib/llvm-project/llvm/tools/llvm-dwarfutil/llvm-dwarfutil.cpp
index e77c82e0fad9..a6466be37513 100644
--- a/contrib/llvm-project/llvm/tools/llvm-dwarfutil/llvm-dwarfutil.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-dwarfutil/llvm-dwarfutil.cpp
@@ -292,7 +292,7 @@ using DebugInfoBits = SmallString<10000>;
static Error addSectionsFromLinkedData(objcopy::ConfigManager &Config,
ObjectFile &InputFile,
DebugInfoBits &LinkedDebugInfoBits) {
- if (dyn_cast<ELFObjectFile<ELF32LE>>(&InputFile)) {
+ if (isa<ELFObjectFile<ELF32LE>>(&InputFile)) {
Expected<ELFObjectFile<ELF32LE>> MemFile = ELFObjectFile<ELF32LE>::create(
MemoryBufferRef(LinkedDebugInfoBits, ""));
if (!MemFile)
@@ -300,7 +300,7 @@ static Error addSectionsFromLinkedData(objcopy::ConfigManager &Config,
if (Error Err = setConfigToAddNewDebugSections(Config, *MemFile))
return Err;
- } else if (dyn_cast<ELFObjectFile<ELF64LE>>(&InputFile)) {
+ } else if (isa<ELFObjectFile<ELF64LE>>(&InputFile)) {
Expected<ELFObjectFile<ELF64LE>> MemFile = ELFObjectFile<ELF64LE>::create(
MemoryBufferRef(LinkedDebugInfoBits, ""));
if (!MemFile)
@@ -308,7 +308,7 @@ static Error addSectionsFromLinkedData(objcopy::ConfigManager &Config,
if (Error Err = setConfigToAddNewDebugSections(Config, *MemFile))
return Err;
- } else if (dyn_cast<ELFObjectFile<ELF32BE>>(&InputFile)) {
+ } else if (isa<ELFObjectFile<ELF32BE>>(&InputFile)) {
Expected<ELFObjectFile<ELF32BE>> MemFile = ELFObjectFile<ELF32BE>::create(
MemoryBufferRef(LinkedDebugInfoBits, ""));
if (!MemFile)
@@ -316,7 +316,7 @@ static Error addSectionsFromLinkedData(objcopy::ConfigManager &Config,
if (Error Err = setConfigToAddNewDebugSections(Config, *MemFile))
return Err;
- } else if (dyn_cast<ELFObjectFile<ELF64BE>>(&InputFile)) {
+ } else if (isa<ELFObjectFile<ELF64BE>>(&InputFile)) {
Expected<ELFObjectFile<ELF64BE>> MemFile = ELFObjectFile<ELF64BE>::create(
MemoryBufferRef(LinkedDebugInfoBits, ""));
if (!MemFile)
@@ -426,16 +426,14 @@ static Error applyCLOptions(const struct Options &Opts, ObjectFile &InputFile) {
DebugInfoBits LinkedDebugInfo;
raw_svector_ostream OutStream(LinkedDebugInfo);
- if (linkDebugInfo(InputFile, Opts, OutStream)) {
- if (Error Err =
- saveLinkedDebugInfo(Opts, InputFile, std::move(LinkedDebugInfo)))
- return Err;
+ if (Error Err = linkDebugInfo(InputFile, Opts, OutStream))
+ return Err;
- return Error::success();
- }
+ if (Error Err =
+ saveLinkedDebugInfo(Opts, InputFile, std::move(LinkedDebugInfo)))
+ return Err;
- return createStringError(std::errc::invalid_argument,
- "possible broken debug info");
+ return Error::success();
} else if (Opts.BuildSeparateDebugFile) {
if (Error Err = splitDebugIntoSeparateFile(Opts, InputFile))
return Err;
diff --git a/contrib/llvm-project/llvm/tools/llvm-lto/llvm-lto.cpp b/contrib/llvm-project/llvm/tools/llvm-lto/llvm-lto.cpp
index c8266616b73d..64c8c1954ec9 100644
--- a/contrib/llvm-project/llvm/tools/llvm-lto/llvm-lto.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-lto/llvm-lto.cpp
@@ -261,6 +261,10 @@ static cl::opt<bool>
cl::desc("Print pass management debugging information"),
cl::cat(LTOCategory));
+static cl::opt<bool>
+ LTOSaveBeforeOpt("lto-save-before-opt", cl::init(false),
+ cl::desc("Save the IR before running optimizations"));
+
namespace {
struct ModuleInfo {
@@ -1069,6 +1073,9 @@ int main(int argc, char **argv) {
CodeGen.setFileType(*FT);
if (!OutputFilename.empty()) {
+ if (LTOSaveBeforeOpt)
+ CodeGen.setSaveIRBeforeOptPath(OutputFilename + ".0.preopt.bc");
+
if (SaveLinkedModuleFile) {
std::string ModuleFilename = OutputFilename;
ModuleFilename += ".linked.bc";
diff --git a/contrib/llvm-project/llvm/tools/llvm-mca/CodeRegionGenerator.cpp b/contrib/llvm-project/llvm/tools/llvm-mca/CodeRegionGenerator.cpp
index cb8e1822ee30..cdecfba9a375 100644
--- a/contrib/llvm-project/llvm/tools/llvm-mca/CodeRegionGenerator.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-mca/CodeRegionGenerator.cpp
@@ -48,8 +48,8 @@ public:
: MCStreamer(Context), Regions(R) {}
// We only want to intercept the emission of new instructions.
- virtual void emitInstruction(const MCInst &Inst,
- const MCSubtargetInfo & /* unused */) override {
+ void emitInstruction(const MCInst &Inst,
+ const MCSubtargetInfo & /* unused */) override {
Regions.addInstruction(Inst);
}
diff --git a/contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp b/contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp
index 8a2b4855501b..7db1e79f3e49 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOptions.cpp
@@ -719,24 +719,15 @@ objcopy::parseObjcopyOptions(ArrayRef<const char *> RawArgsArr,
}
}
- if (auto Arg = InputArgs.getLastArg(OBJCOPY_compress_debug_sections,
- OBJCOPY_compress_debug_sections_eq)) {
- Config.CompressionType = DebugCompressionType::Z;
-
- if (Arg->getOption().getID() == OBJCOPY_compress_debug_sections_eq) {
- Config.CompressionType =
- StringSwitch<DebugCompressionType>(
- InputArgs.getLastArgValue(OBJCOPY_compress_debug_sections_eq))
- .Case("zlib", DebugCompressionType::Z)
- .Default(DebugCompressionType::None);
- if (Config.CompressionType == DebugCompressionType::None)
- return createStringError(
- errc::invalid_argument,
- "invalid or unsupported --compress-debug-sections format: %s",
- InputArgs.getLastArgValue(OBJCOPY_compress_debug_sections_eq)
- .str()
- .c_str());
- }
+ if (const auto *A = InputArgs.getLastArg(OBJCOPY_compress_debug_sections)) {
+ Config.CompressionType = StringSwitch<DebugCompressionType>(A->getValue())
+ .Case("zlib", DebugCompressionType::Z)
+ .Default(DebugCompressionType::None);
+ if (Config.CompressionType == DebugCompressionType::None)
+ return createStringError(
+ errc::invalid_argument,
+ "invalid or unsupported --compress-debug-sections format: %s",
+ A->getValue());
if (!compression::zlib::isAvailable())
return createStringError(
errc::invalid_argument,
diff --git a/contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOpts.td b/contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOpts.td
index 962028da47a0..d3713b5ec3c3 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOpts.td
+++ b/contrib/llvm-project/llvm/tools/llvm-objcopy/ObjcopyOpts.td
@@ -29,12 +29,13 @@ defm new_symbol_visibility : Eq<"new-symbol-visibility", "Visibility of "
" with --add-symbol unless otherwise"
" specified. The default value is 'default'.">;
-def compress_debug_sections : Flag<["--"], "compress-debug-sections">;
-def compress_debug_sections_eq
+def compress_debug_sections
: Joined<["--"], "compress-debug-sections=">,
- MetaVarName<"[ zlib ]">,
- HelpText<"Compress DWARF debug sections using specified style. Supported "
- "formats: 'zlib'">;
+ MetaVarName<"format">,
+ HelpText<"Compress DWARF debug sections using specified format. Supported "
+ "formats: zlib">;
+def : Flag<["--"], "compress-debug-sections">, Alias<compress_debug_sections>,
+ AliasArgs<["zlib"]>;
def decompress_debug_sections : Flag<["--"], "decompress-debug-sections">,
HelpText<"Decompress DWARF debug sections.">;
defm split_dwo
diff --git a/contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.cpp b/contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.cpp
index 9e4fa7c0d9dd..fd83dc197fe9 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.cpp
@@ -466,6 +466,15 @@ static void printRelocation(formatted_raw_ostream &OS, StringRef FileName,
OS << format(Fmt.data(), Address) << Name << "\t" << Val;
}
+static void AlignToInstStartColumn(size_t Start, const MCSubtargetInfo &STI,
+ raw_ostream &OS) {
+ // The output of printInst starts with a tab. Print some spaces so that
+ // the tab has 1 column and advances to the target tab stop.
+ unsigned TabStop = getInstStartColumn(STI);
+ unsigned Column = OS.tell() - Start;
+ OS.indent(Column < TabStop - 1 ? TabStop - 1 - Column : 7 - Column % 8);
+}
+
class PrettyPrinter {
public:
virtual ~PrettyPrinter() = default;
@@ -487,11 +496,7 @@ public:
dumpBytes(Bytes, OS);
}
- // The output of printInst starts with a tab. Print some spaces so that
- // the tab has 1 column and advances to the target tab stop.
- unsigned TabStop = getInstStartColumn(STI);
- unsigned Column = OS.tell() - Start;
- OS.indent(Column < TabStop - 1 ? TabStop - 1 - Column : 7 - Column % 8);
+ AlignToInstStartColumn(Start, STI, OS);
if (MI) {
// See MCInstPrinter::printInst. On targets where a PC relative immediate
@@ -664,6 +669,91 @@ public:
};
BPFPrettyPrinter BPFPrettyPrinterInst;
+class ARMPrettyPrinter : public PrettyPrinter {
+public:
+ void printInst(MCInstPrinter &IP, const MCInst *MI, ArrayRef<uint8_t> Bytes,
+ object::SectionedAddress Address, formatted_raw_ostream &OS,
+ StringRef Annot, MCSubtargetInfo const &STI, SourcePrinter *SP,
+ StringRef ObjectFilename, std::vector<RelocationRef> *Rels,
+ LiveVariablePrinter &LVP) override {
+ if (SP && (PrintSource || PrintLines))
+ SP->printSourceLine(OS, Address, ObjectFilename, LVP);
+ LVP.printBetweenInsts(OS, false);
+
+ size_t Start = OS.tell();
+ if (LeadingAddr)
+ OS << format("%8" PRIx64 ":", Address.Address);
+ if (ShowRawInsn) {
+ size_t Pos = 0, End = Bytes.size();
+ if (STI.checkFeatures("+thumb-mode")) {
+ for (; Pos + 2 <= End; Pos += 2)
+ OS << ' '
+ << format_hex_no_prefix(
+ llvm::support::endian::read<uint16_t>(
+ Bytes.data() + Pos, llvm::support::little),
+ 4);
+ } else {
+ for (; Pos + 4 <= End; Pos += 4)
+ OS << ' '
+ << format_hex_no_prefix(
+ llvm::support::endian::read<uint32_t>(
+ Bytes.data() + Pos, llvm::support::little),
+ 8);
+ }
+ if (Pos < End) {
+ OS << ' ';
+ dumpBytes(Bytes.slice(Pos), OS);
+ }
+ }
+
+ AlignToInstStartColumn(Start, STI, OS);
+
+ if (MI) {
+ IP.printInst(MI, Address.Address, "", STI, OS);
+ } else
+ OS << "\t<unknown>";
+ }
+};
+ARMPrettyPrinter ARMPrettyPrinterInst;
+
+class AArch64PrettyPrinter : public PrettyPrinter {
+public:
+ void printInst(MCInstPrinter &IP, const MCInst *MI, ArrayRef<uint8_t> Bytes,
+ object::SectionedAddress Address, formatted_raw_ostream &OS,
+ StringRef Annot, MCSubtargetInfo const &STI, SourcePrinter *SP,
+ StringRef ObjectFilename, std::vector<RelocationRef> *Rels,
+ LiveVariablePrinter &LVP) override {
+ if (SP && (PrintSource || PrintLines))
+ SP->printSourceLine(OS, Address, ObjectFilename, LVP);
+ LVP.printBetweenInsts(OS, false);
+
+ size_t Start = OS.tell();
+ if (LeadingAddr)
+ OS << format("%8" PRIx64 ":", Address.Address);
+ if (ShowRawInsn) {
+ size_t Pos = 0, End = Bytes.size();
+ for (; Pos + 4 <= End; Pos += 4)
+ OS << ' '
+ << format_hex_no_prefix(
+ llvm::support::endian::read<uint32_t>(Bytes.data() + Pos,
+ llvm::support::little),
+ 8);
+ if (Pos < End) {
+ OS << ' ';
+ dumpBytes(Bytes.slice(Pos), OS);
+ }
+ }
+
+ AlignToInstStartColumn(Start, STI, OS);
+
+ if (MI) {
+ IP.printInst(MI, Address.Address, "", STI, OS);
+ } else
+ OS << "\t<unknown>";
+ }
+};
+AArch64PrettyPrinter AArch64PrettyPrinterInst;
+
PrettyPrinter &selectPrettyPrinter(Triple const &Triple) {
switch(Triple.getArch()) {
default:
@@ -675,6 +765,15 @@ PrettyPrinter &selectPrettyPrinter(Triple const &Triple) {
case Triple::bpfel:
case Triple::bpfeb:
return BPFPrettyPrinterInst;
+ case Triple::arm:
+ case Triple::armeb:
+ case Triple::thumb:
+ case Triple::thumbeb:
+ return ARMPrettyPrinterInst;
+ case Triple::aarch64:
+ case Triple::aarch64_be:
+ case Triple::aarch64_32:
+ return AArch64PrettyPrinterInst;
}
}
}
@@ -895,12 +994,14 @@ static uint64_t dumpARMELFData(uint64_t SectionAddr, uint64_t Index,
uint64_t End, const ObjectFile &Obj,
ArrayRef<uint8_t> Bytes,
ArrayRef<MappingSymbolPair> MappingSymbols,
- raw_ostream &OS) {
+ const MCSubtargetInfo &STI, raw_ostream &OS) {
support::endianness Endian =
Obj.isLittleEndian() ? support::little : support::big;
- OS << format("%8" PRIx64 ":\t", SectionAddr + Index);
+ size_t Start = OS.tell();
+ OS << format("%8" PRIx64 ": ", SectionAddr + Index);
if (Index + 4 <= End) {
dumpBytes(Bytes.slice(Index, 4), OS);
+ AlignToInstStartColumn(Start, STI, OS);
OS << "\t.word\t"
<< format_hex(support::endian::read32(Bytes.data() + Index, Endian),
10);
@@ -908,13 +1009,14 @@ static uint64_t dumpARMELFData(uint64_t SectionAddr, uint64_t Index,
}
if (Index + 2 <= End) {
dumpBytes(Bytes.slice(Index, 2), OS);
- OS << "\t\t.short\t"
- << format_hex(support::endian::read16(Bytes.data() + Index, Endian),
- 6);
+ AlignToInstStartColumn(Start, STI, OS);
+ OS << "\t.short\t"
+ << format_hex(support::endian::read16(Bytes.data() + Index, Endian), 6);
return 2;
}
dumpBytes(Bytes.slice(Index, 1), OS);
- OS << "\t\t.byte\t" << format_hex(Bytes[0], 4);
+ AlignToInstStartColumn(Start, STI, OS);
+ OS << "\t.byte\t" << format_hex(Bytes[Index], 4);
return 1;
}
@@ -1022,10 +1124,12 @@ static void collectLocalBranchTargets(
// Disassemble a real instruction and record function-local branch labels.
MCInst Inst;
uint64_t Size;
- bool Disassembled = DisAsm->getInstruction(
- Inst, Size, Bytes.slice(Index - SectionAddr), Index, nulls());
+ ArrayRef<uint8_t> ThisBytes = Bytes.slice(Index - SectionAddr);
+ bool Disassembled =
+ DisAsm->getInstruction(Inst, Size, ThisBytes, Index, nulls());
if (Size == 0)
- Size = 1;
+ Size = std::min<uint64_t>(ThisBytes.size(),
+ DisAsm->suggestBytesToSkip(ThisBytes, Index));
if (Disassembled && MIA) {
uint64_t Target;
@@ -1068,10 +1172,11 @@ static void addSymbolizer(
for (size_t Index = 0; Index != Bytes.size();) {
MCInst Inst;
uint64_t Size;
- DisAsm->getInstruction(Inst, Size, Bytes.slice(Index), SectionAddr + Index,
- nulls());
+ ArrayRef<uint8_t> ThisBytes = Bytes.slice(Index - SectionAddr);
+ DisAsm->getInstruction(Inst, Size, ThisBytes, Index, nulls());
if (Size == 0)
- Size = 1;
+ Size = std::min<uint64_t>(ThisBytes.size(),
+ DisAsm->suggestBytesToSkip(ThisBytes, Index));
Index += Size;
}
ArrayRef<uint64_t> LabelAddrsRef = SymbolizerPtr->getReferencedAddresses();
@@ -1504,7 +1609,7 @@ static void disassembleObject(const Target *TheTarget, ObjectFile &Obj,
if (DumpARMELFData) {
Size = dumpARMELFData(SectionAddr, Index, End, Obj, Bytes,
- MappingSymbols, FOS);
+ MappingSymbols, *STI, FOS);
} else {
// When -z or --disassemble-zeroes are given we always dissasemble
// them. Otherwise we might want to skip zero bytes we see.
@@ -1538,11 +1643,14 @@ static void disassembleObject(const Target *TheTarget, ObjectFile &Obj,
// Disassemble a real instruction or a data when disassemble all is
// provided
MCInst Inst;
- bool Disassembled =
- DisAsm->getInstruction(Inst, Size, Bytes.slice(Index),
- SectionAddr + Index, CommentStream);
+ ArrayRef<uint8_t> ThisBytes = Bytes.slice(Index);
+ uint64_t ThisAddr = SectionAddr + Index;
+ bool Disassembled = DisAsm->getInstruction(Inst, Size, ThisBytes,
+ ThisAddr, CommentStream);
if (Size == 0)
- Size = 1;
+ Size = std::min<uint64_t>(
+ ThisBytes.size(),
+ DisAsm->suggestBytesToSkip(ThisBytes, ThisAddr));
LVP.update({Index, Section.getIndex()},
{Index + Size, Section.getIndex()}, Index + Size != End);
diff --git a/contrib/llvm-project/llvm/tools/llvm-profdata/llvm-profdata.cpp b/contrib/llvm-project/llvm/tools/llvm-profdata/llvm-profdata.cpp
index 0c23d7c1435f..3af8f800adcb 100644
--- a/contrib/llvm-project/llvm/tools/llvm-profdata/llvm-profdata.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-profdata/llvm-profdata.cpp
@@ -2200,8 +2200,7 @@ static int showInstrProfile(const std::string &Filename, bool ShowCounts,
Builder.addRecord(Func);
if (ShowCovered) {
- if (std::any_of(Func.Counts.begin(), Func.Counts.end(),
- [](uint64_t C) { return C; }))
+ if (llvm::any_of(Func.Counts, [](uint64_t C) { return C; }))
OS << Func.Name << "\n";
continue;
}
diff --git a/contrib/llvm-project/llvm/tools/llvm-readobj/ELFDumper.cpp b/contrib/llvm-project/llvm/tools/llvm-readobj/ELFDumper.cpp
index ae2dec5d15fb..ba7bae96ade3 100644
--- a/contrib/llvm-project/llvm/tools/llvm-readobj/ELFDumper.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-readobj/ELFDumper.cpp
@@ -1648,6 +1648,15 @@ const EnumEntry<unsigned> ElfHeaderAVRFlags[] = {
ENUM_ENT(EF_AVR_LINKRELAX_PREPARED, "relaxable"),
};
+const EnumEntry<unsigned> ElfHeaderLoongArchFlags[] = {
+ ENUM_ENT(EF_LOONGARCH_BASE_ABI_ILP32S, "ILP32, SOFT-FLOAT"),
+ ENUM_ENT(EF_LOONGARCH_BASE_ABI_ILP32F, "ILP32, SINGLE-FLOAT"),
+ ENUM_ENT(EF_LOONGARCH_BASE_ABI_ILP32D, "ILP32, DOUBLE-FLOAT"),
+ ENUM_ENT(EF_LOONGARCH_BASE_ABI_LP64S, "LP64, SOFT-FLOAT"),
+ ENUM_ENT(EF_LOONGARCH_BASE_ABI_LP64F, "LP64, SINGLE-FLOAT"),
+ ENUM_ENT(EF_LOONGARCH_BASE_ABI_LP64D, "LP64, DOUBLE-FLOAT"),
+};
+
const EnumEntry<unsigned> ElfSymOtherFlags[] = {
LLVM_READOBJ_ENUM_ENT(ELF, STV_INTERNAL),
@@ -3357,6 +3366,9 @@ template <class ELFT> void GNUELFDumper<ELFT>::printFileHeaders() {
else if (e.e_machine == EM_AVR)
ElfFlags = printFlags(e.e_flags, makeArrayRef(ElfHeaderAVRFlags),
unsigned(ELF::EF_AVR_ARCH_MASK));
+ else if (e.e_machine == EM_LOONGARCH)
+ ElfFlags = printFlags(e.e_flags, makeArrayRef(ElfHeaderLoongArchFlags),
+ unsigned(ELF::EF_LOONGARCH_BASE_ABI_MASK));
Str = "0x" + utohexstr(e.e_flags);
if (!ElfFlags.empty())
Str = Str + ", " + ElfFlags;
@@ -6507,6 +6519,9 @@ template <class ELFT> void LLVMELFDumper<ELFT>::printFileHeaders() {
else if (E.e_machine == EM_AVR)
W.printFlags("Flags", E.e_flags, makeArrayRef(ElfHeaderAVRFlags),
unsigned(ELF::EF_AVR_ARCH_MASK));
+ else if (E.e_machine == EM_LOONGARCH)
+ W.printFlags("Flags", E.e_flags, makeArrayRef(ElfHeaderLoongArchFlags),
+ unsigned(ELF::EF_LOONGARCH_BASE_ABI_MASK));
else
W.printFlags("Flags", E.e_flags);
W.printNumber("HeaderSize", E.e_ehsize);
diff --git a/contrib/llvm-project/llvm/tools/llvm-xray/xray-graph.cpp b/contrib/llvm-project/llvm/tools/llvm-xray/xray-graph.cpp
index 39d2c5c153ef..ff47eb64e947 100644
--- a/contrib/llvm-project/llvm/tools/llvm-xray/xray-graph.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-xray/xray-graph.cpp
@@ -232,10 +232,11 @@ Error GraphRenderer::accountRecord(const XRayRecord &Record) {
if (!DeduceSiblingCalls)
return make_error<StringError>("No matching ENTRY record",
make_error_code(errc::invalid_argument));
- auto Parent = std::find_if(
- ThreadStack.rbegin(), ThreadStack.rend(),
- [&](const FunctionAttr &A) { return A.FuncId == Record.FuncId; });
- if (Parent == ThreadStack.rend())
+ bool FoundParent =
+ llvm::any_of(llvm::reverse(ThreadStack), [&](const FunctionAttr &A) {
+ return A.FuncId == Record.FuncId;
+ });
+ if (!FoundParent)
return make_error<StringError>(
"No matching Entry record in stack",
make_error_code(errc::invalid_argument)); // There is no matching
diff --git a/contrib/llvm-project/llvm/utils/TableGen/DXILEmitter.cpp b/contrib/llvm-project/llvm/utils/TableGen/DXILEmitter.cpp
index b9c563c62bbe..cd41fbaa6ca1 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/DXILEmitter.cpp
+++ b/contrib/llvm-project/llvm/utils/TableGen/DXILEmitter.cpp
@@ -16,10 +16,12 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/DXILOperationCommon.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
using namespace llvm;
+using namespace llvm::DXIL;
namespace {
@@ -27,25 +29,16 @@ struct DXILShaderModel {
int Major;
int Minor;
};
+
struct DXILParam {
- int Pos; // position in parameter list
- StringRef Type; // llvm type name, $o for overload, $r for resource
- // type, $cb for legacy cbuffer, $u4 for u4 struct
+ int Pos; // position in parameter list
+ ParameterKind Kind;
StringRef Name; // short, unique name
StringRef Doc; // the documentation description of this parameter
bool IsConst; // whether this argument requires a constant value in the IR
StringRef EnumName; // the name of the enum type if applicable
int MaxValue; // the maximum value for this parameter if applicable
- DXILParam(const Record *R) {
- Name = R->getValueAsString("name");
- Pos = R->getValueAsInt("pos");
- Type = R->getValueAsString("llvm_type");
- if (R->getValue("doc"))
- Doc = R->getValueAsString("doc");
- IsConst = R->getValueAsBit("is_const");
- EnumName = R->getValueAsString("enum_name");
- MaxValue = R->getValueAsInt("max_value");
- }
+ DXILParam(const Record *R);
};
struct DXILOperationData {
@@ -74,7 +67,9 @@ struct DXILOperationData {
DXILShaderModel ShaderModel; // minimum shader model required
DXILShaderModel ShaderModelTranslated; // minimum shader model required with
// translation by linker
- SmallVector<StringRef, 4> counters; // counters for this inst.
+ int OverloadParamIndex; // parameter index which control the overload.
+ // When < 0, should be only 1 overload type.
+ SmallVector<StringRef, 4> counters; // counters for this inst.
DXILOperationData(const Record *R) {
Name = R->getValueAsString("name");
DXILOp = R->getValueAsString("dxil_op");
@@ -93,9 +88,13 @@ struct DXILOperationData {
Doc = R->getValueAsString("doc");
ListInit *ParamList = R->getValueAsListInit("ops");
- for (unsigned i = 0; i < ParamList->size(); ++i) {
- Record *Param = ParamList->getElementAsRecord(i);
+ OverloadParamIndex = -1;
+ for (unsigned I = 0; I < ParamList->size(); ++I) {
+ Record *Param = ParamList->getElementAsRecord(I);
Params.emplace_back(DXILParam(Param));
+ auto &CurParam = Params.back();
+ if (CurParam.Kind >= ParameterKind::OVERLOAD)
+ OverloadParamIndex = I;
}
OverloadTypes = R->getValueAsString("oload_types");
FnAttr = R->getValueAsString("fn_attr");
@@ -103,6 +102,51 @@ struct DXILOperationData {
};
} // end anonymous namespace
+DXILParam::DXILParam(const Record *R) {
+ Name = R->getValueAsString("name");
+ Pos = R->getValueAsInt("pos");
+ Kind = parameterTypeNameToKind(R->getValueAsString("llvm_type"));
+ if (R->getValue("doc"))
+ Doc = R->getValueAsString("doc");
+ IsConst = R->getValueAsBit("is_const");
+ EnumName = R->getValueAsString("enum_name");
+ MaxValue = R->getValueAsInt("max_value");
+}
+
+static std::string parameterKindToString(ParameterKind Kind) {
+ switch (Kind) {
+ case ParameterKind::INVALID:
+ return "INVALID";
+ case ParameterKind::VOID:
+ return "VOID";
+ case ParameterKind::HALF:
+ return "HALF";
+ case ParameterKind::FLOAT:
+ return "FLOAT";
+ case ParameterKind::DOUBLE:
+ return "DOUBLE";
+ case ParameterKind::I1:
+ return "I1";
+ case ParameterKind::I8:
+ return "I8";
+ case ParameterKind::I16:
+ return "I16";
+ case ParameterKind::I32:
+ return "I32";
+ case ParameterKind::I64:
+ return "I64";
+ case ParameterKind::OVERLOAD:
+ return "OVERLOAD";
+ case ParameterKind::CBUFFER_RET:
+ return "CBUFFER_RET";
+ case ParameterKind::RESOURCE_RET:
+ return "RESOURCE_RET";
+ case ParameterKind::DXIL_HANDLE:
+ return "DXIL_HANDLE";
+ }
+ llvm_unreachable("Unknown llvm::DXIL::ParameterKind enum");
+}
+
static void emitDXILOpEnum(DXILOperationData &DXILOp, raw_ostream &OS) {
// Name = ID, // Doc
OS << DXILOp.Name << " = " << DXILOp.DXILOpID << ", // " << DXILOp.Doc
@@ -271,7 +315,9 @@ static void emitDXILOperationTable(std::vector<DXILOperationData> &DXILOps,
// Collect Names.
SequenceToOffsetTable<std::string> OpClassStrings;
SequenceToOffsetTable<std::string> OpStrings;
+ SequenceToOffsetTable<SmallVector<ParameterKind>> Parameters;
+ StringMap<SmallVector<ParameterKind>> ParameterMap;
StringSet<> ClassSet;
for (auto &DXILOp : DXILOps) {
OpStrings.add(DXILOp.DXILOp.str());
@@ -280,16 +326,24 @@ static void emitDXILOperationTable(std::vector<DXILOperationData> &DXILOps,
continue;
ClassSet.insert(DXILOp.DXILClass);
OpClassStrings.add(getDXILOpClassName(DXILOp.DXILClass));
+ SmallVector<ParameterKind> ParamKindVec;
+ for (auto &Param : DXILOp.Params) {
+ ParamKindVec.emplace_back(Param.Kind);
+ }
+ ParameterMap[DXILOp.DXILClass] = ParamKindVec;
+ Parameters.add(ParamKindVec);
}
// Layout names.
OpStrings.layout();
OpClassStrings.layout();
+ Parameters.layout();
// Emit the DXIL operation table.
//{DXIL::OpCode::Sin, OpCodeNameIndex, OpCodeClass::Unary,
// OpCodeClassNameIndex,
- // OverloadKind::FLOAT | OverloadKind::HALF, Attribute::AttrKind::ReadNone},
+ // OverloadKind::FLOAT | OverloadKind::HALF, Attribute::AttrKind::ReadNone, 0,
+ // 3, ParameterTableOffset},
OS << "static const OpCodeProperty *getOpCodeProperty(DXIL::OpCode DXILOp) "
"{\n";
@@ -300,7 +354,9 @@ static void emitDXILOperationTable(std::vector<DXILOperationData> &DXILOps,
<< ", OpCodeClass::" << DXILOp.DXILClass << ", "
<< OpClassStrings.get(getDXILOpClassName(DXILOp.DXILClass)) << ", "
<< getDXILOperationOverload(DXILOp.OverloadTypes) << ", "
- << emitDXILOperationFnAttr(DXILOp.FnAttr) << " },\n";
+ << emitDXILOperationFnAttr(DXILOp.FnAttr) << ", "
+ << DXILOp.OverloadParamIndex << ", " << DXILOp.Params.size() << ", "
+ << Parameters.get(ParameterMap[DXILOp.DXILClass]) << " },\n";
}
OS << " };\n";
@@ -338,6 +394,21 @@ static void emitDXILOperationTable(std::vector<DXILOperationData> &DXILOps,
OS << " unsigned Index = Prop.OpCodeClassNameOffset;\n";
OS << " return DXILOpCodeClassNameTable + Index;\n";
OS << "}\n ";
+
+ OS << "static const ParameterKind *getOpCodeParameterKind(const "
+ "OpCodeProperty &Prop) "
+ "{\n\n";
+ OS << " static const ParameterKind DXILOpParameterKindTable[] = {\n";
+ Parameters.emit(
+ OS,
+ [](raw_ostream &ParamOS, ParameterKind Kind) {
+ ParamOS << "ParameterKind::" << parameterKindToString(Kind);
+ },
+ "ParameterKind::INVALID");
+ OS << " };\n\n";
+ OS << " unsigned Index = Prop.ParameterTableOffset;\n";
+ OS << " return DXILOpParameterKindTable + Index;\n";
+ OS << "}\n ";
}
namespace llvm {
diff --git a/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDagPredicate.h b/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDagPredicate.h
index 08e541b76a5a..96fef21b7627 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDagPredicate.h
+++ b/contrib/llvm-project/llvm/utils/TableGen/GlobalISel/GIMatchDagPredicate.h
@@ -96,7 +96,7 @@ public:
void printDescription(raw_ostream &OS) const override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
- virtual LLVM_DUMP_METHOD void dump() const override { print(errs()); }
+ LLVM_DUMP_METHOD void dump() const override { print(errs()); }
#endif // if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
};
@@ -119,7 +119,7 @@ public:
void printDescription(raw_ostream &OS) const override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
- virtual LLVM_DUMP_METHOD void dump() const override { print(errs()); }
+ LLVM_DUMP_METHOD void dump() const override { print(errs()); }
#endif // if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
};
@@ -134,7 +134,7 @@ public:
void printDescription(raw_ostream &OS) const override;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
- virtual LLVM_DUMP_METHOD void dump() const override { print(errs()); }
+ LLVM_DUMP_METHOD void dump() const override { print(errs()); }
#endif // if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
};
diff --git a/lib/clang/include/VCSVersion.inc b/lib/clang/include/VCSVersion.inc
index 28ec17e11314..571ed75d3dc9 100644
--- a/lib/clang/include/VCSVersion.inc
+++ b/lib/clang/include/VCSVersion.inc
@@ -1,10 +1,10 @@
// $FreeBSD$
-#define LLVM_REVISION "llvmorg-15-init-17485-ga3e38b4a206b"
+#define LLVM_REVISION "llvmorg-15-init-17826-g1f8ae9d7e7e4"
#define LLVM_REPOSITORY "https://github.com/llvm/llvm-project.git"
-#define CLANG_REVISION "llvmorg-15-init-17485-ga3e38b4a206b"
+#define CLANG_REVISION "llvmorg-15-init-17826-g1f8ae9d7e7e4"
#define CLANG_REPOSITORY "https://github.com/llvm/llvm-project.git"
-#define LLDB_REVISION "llvmorg-15-init-17485-ga3e38b4a206b"
+#define LLDB_REVISION "llvmorg-15-init-17826-g1f8ae9d7e7e4"
#define LLDB_REPOSITORY "https://github.com/llvm/llvm-project.git"
diff --git a/lib/clang/include/clang/Config/config.h b/lib/clang/include/clang/Config/config.h
index 49a883a045a5..6c3208239419 100644
--- a/lib/clang/include/clang/Config/config.h
+++ b/lib/clang/include/clang/Config/config.h
@@ -80,7 +80,7 @@
#define CLANG_HAVE_RLIMITS 1
/* The LLVM product name and version */
-#define BACKEND_PACKAGE_STRING "LLVM 15.0.0git"
+#define BACKEND_PACKAGE_STRING "LLVM 15.0.0"
/* Linker version detected at compile time. */
/* #undef HOST_LINK_VERSION */
diff --git a/lib/clang/include/lld/Common/Version.inc b/lib/clang/include/lld/Common/Version.inc
index 5290875aeba6..6e25b65b845e 100644
--- a/lib/clang/include/lld/Common/Version.inc
+++ b/lib/clang/include/lld/Common/Version.inc
@@ -1,4 +1,4 @@
// Local identifier in __FreeBSD_version style
#define LLD_FREEBSD_VERSION 1400005
-#define LLD_VERSION_STRING "15.0.0 (FreeBSD llvmorg-15-init-17485-ga3e38b4a206b-" __XSTRING(LLD_FREEBSD_VERSION) ")"
+#define LLD_VERSION_STRING "15.0.0 (FreeBSD llvmorg-15-init-17826-g1f8ae9d7e7e4-" __XSTRING(LLD_FREEBSD_VERSION) ")"
diff --git a/lib/clang/include/lldb/Version/Version.inc b/lib/clang/include/lldb/Version/Version.inc
index f2f189e58e28..919499091d2a 100644
--- a/lib/clang/include/lldb/Version/Version.inc
+++ b/lib/clang/include/lldb/Version/Version.inc
@@ -1,5 +1,5 @@
-#define LLDB_VERSION 15.0.0git
-#define LLDB_VERSION_STRING "15.0.0git"
+#define LLDB_VERSION 15.0.0
+#define LLDB_VERSION_STRING "15.0.0"
#define LLDB_VERSION_MAJOR 15
#define LLDB_VERSION_MINOR 0
#define LLDB_VERSION_PATCH 0
diff --git a/lib/clang/include/llvm/Config/config.h b/lib/clang/include/llvm/Config/config.h
index 7b98178f61ac..4834ac961e39 100644
--- a/lib/clang/include/llvm/Config/config.h
+++ b/lib/clang/include/llvm/Config/config.h
@@ -353,10 +353,10 @@
#define PACKAGE_NAME "LLVM"
/* Define to the full name and version of this package. */
-#define PACKAGE_STRING "LLVM 15.0.0git"
+#define PACKAGE_STRING "LLVM 15.0.0"
/* Define to the version of this package. */
-#define PACKAGE_VERSION "15.0.0git"
+#define PACKAGE_VERSION "15.0.0"
/* Define to the vendor of this package. */
/* #undef PACKAGE_VENDOR */
diff --git a/lib/clang/include/llvm/Config/llvm-config.h b/lib/clang/include/llvm/Config/llvm-config.h
index fcc0e63063e2..e0f50991ec6d 100644
--- a/lib/clang/include/llvm/Config/llvm-config.h
+++ b/lib/clang/include/llvm/Config/llvm-config.h
@@ -77,7 +77,7 @@
#define LLVM_VERSION_PATCH 0
/* LLVM version string */
-#define LLVM_VERSION_STRING "15.0.0git"
+#define LLVM_VERSION_STRING "15.0.0"
/* Whether LLVM records statistics for use with GetStatistics(),
* PrintStatistics() or PrintStatisticsJSON()
diff --git a/lib/clang/include/llvm/Support/VCSRevision.h b/lib/clang/include/llvm/Support/VCSRevision.h
index 981b9a094d58..1405a0d08395 100644
--- a/lib/clang/include/llvm/Support/VCSRevision.h
+++ b/lib/clang/include/llvm/Support/VCSRevision.h
@@ -1,3 +1,3 @@
/* $FreeBSD$ */
-#define LLVM_REVISION "llvmorg-15-init-17485-ga3e38b4a206b"
+#define LLVM_REVISION "llvmorg-15-init-17826-g1f8ae9d7e7e4"
#define LLVM_REPOSITORY "https://github.com/llvm/llvm-project.git"
diff --git a/lib/clang/libclang/Makefile b/lib/clang/libclang/Makefile
index 7be8a7442dc1..7181da12a065 100644
--- a/lib/clang/libclang/Makefile
+++ b/lib/clang/libclang/Makefile
@@ -525,6 +525,7 @@ SRCS_MIN+= Sema/SemaObjCProperty.cpp
SRCS_MIN+= Sema/SemaOpenMP.cpp
SRCS_MIN+= Sema/SemaOverload.cpp
SRCS_MIN+= Sema/SemaPseudoObject.cpp
+SRCS_MIN+= Sema/SemaRISCVVectorLookup.cpp
SRCS_MIN+= Sema/SemaSYCL.cpp
SRCS_MIN+= Sema/SemaStmt.cpp
SRCS_MIN+= Sema/SemaStmtAsm.cpp
@@ -733,6 +734,7 @@ SRCS_FUL+= StaticAnalyzer/Frontend/CreateCheckerManager.cpp
SRCS_FUL+= StaticAnalyzer/Frontend/FrontendActions.cpp
SRCS_FUL+= StaticAnalyzer/Frontend/ModelConsumer.cpp
SRCS_FUL+= StaticAnalyzer/Frontend/ModelInjector.cpp
+SRCS_MIN+= Support/RISCVVIntrinsicUtils.cpp
SRCS_MIN+= Tooling/ArgumentsAdjusters.cpp
SRCS_MIN+= Tooling/CommonOptionsParser.cpp
SRCS_MIN+= Tooling/CompilationDatabase.cpp
@@ -1028,6 +1030,13 @@ clang/Basic/riscv_vector_builtin_cg.inc: \
-o ${.TARGET} ${CLANG_SRCS}/include/clang/Basic/riscv_vector.td
TGHDRS+= clang/Basic/riscv_vector_builtin_cg.inc
+clang/Basic/riscv_vector_builtin_sema.inc: \
+ ${CLANG_SRCS}/include/clang/Basic/riscv_vector.td
+ ${CLANG_TBLGEN} -gen-riscv-vector-builtin-sema \
+ -I ${CLANG_SRCS}/include/clang/Basic -d ${.TARGET:C/$/.d/} \
+ -o ${.TARGET} ${CLANG_SRCS}/include/clang/Basic/riscv_vector.td
+TGHDRS+= clang/Basic/riscv_vector_builtin_sema.inc
+
clang/Driver/Options.inc: ${CLANG_SRCS}/include/clang/Driver/Options.td
${LLVM_TBLGEN} -gen-opt-parser-defs \
-I ${LLVM_SRCS}/include -I ${CLANG_SRCS}/include/clang/Driver \
diff --git a/lib/clang/liblldb/LLDBWrapLua.cpp b/lib/clang/liblldb/LLDBWrapLua.cpp
index 061eb327c437..c6c778cd6955 100644
--- a/lib/clang/liblldb/LLDBWrapLua.cpp
+++ b/lib/clang/liblldb/LLDBWrapLua.cpp
@@ -72736,10 +72736,10 @@ static swig_lua_const_info swig_SwigModule_constants[]= {
{SWIG_LUA_CONSTTAB_INT("eSaveCoreFull", lldb::eSaveCoreFull)},
{SWIG_LUA_CONSTTAB_INT("eSaveCoreDirtyOnly", lldb::eSaveCoreDirtyOnly)},
{SWIG_LUA_CONSTTAB_INT("eSaveCoreStackOnly", lldb::eSaveCoreStackOnly)},
- {SWIG_LUA_CONSTTAB_INT("eTraceCounterTSC", lldb::eTraceCounterTSC)},
{SWIG_LUA_CONSTTAB_INT("eTraceEventDisabledSW", lldb::eTraceEventDisabledSW)},
{SWIG_LUA_CONSTTAB_INT("eTraceEventDisabledHW", lldb::eTraceEventDisabledHW)},
{SWIG_LUA_CONSTTAB_INT("eTraceEventCPUChanged", lldb::eTraceEventCPUChanged)},
+ {SWIG_LUA_CONSTTAB_INT("eTraceEventHWClockTick", lldb::eTraceEventHWClockTick)},
{SWIG_LUA_CONSTTAB_INT("eTraceItemKindError", lldb::eTraceItemKindError)},
{SWIG_LUA_CONSTTAB_INT("eTraceItemKindEvent", lldb::eTraceItemKindEvent)},
{SWIG_LUA_CONSTTAB_INT("eTraceItemKindInstruction", lldb::eTraceItemKindInstruction)},
diff --git a/lib/libc++/Makefile b/lib/libc++/Makefile
index 39e099dccafa..9d059ea39319 100644
--- a/lib/libc++/Makefile
+++ b/lib/libc++/Makefile
@@ -713,6 +713,7 @@ MEM_HEADERS+= ranges_construct_at.h
MEM_HEADERS+= ranges_uninitialized_algorithms.h
MEM_HEADERS+= raw_storage_iterator.h
MEM_HEADERS+= shared_ptr.h
+MEM_HEADERS+= swap_allocator.h
MEM_HEADERS+= temporary_buffer.h
MEM_HEADERS+= uninitialized_algorithms.h
MEM_HEADERS+= unique_ptr.h
@@ -846,10 +847,16 @@ TTR_HEADERS+= add_lvalue_reference.h
TTR_HEADERS+= add_pointer.h
TTR_HEADERS+= add_rvalue_reference.h
TTR_HEADERS+= add_volatile.h
+TTR_HEADERS+= aligned_storage.h
+TTR_HEADERS+= aligned_union.h
TTR_HEADERS+= alignment_of.h
TTR_HEADERS+= apply_cv.h
+TTR_HEADERS+= common_reference.h
+TTR_HEADERS+= common_type.h
TTR_HEADERS+= conditional.h
TTR_HEADERS+= conjunction.h
+TTR_HEADERS+= copy_cv.h
+TTR_HEADERS+= copy_cvref.h
TTR_HEADERS+= decay.h
TTR_HEADERS+= disjunction.h
TTR_HEADERS+= enable_if.h
@@ -891,6 +898,7 @@ TTR_HEADERS+= is_move_assignable.h
TTR_HEADERS+= is_move_constructible.h
TTR_HEADERS+= is_nothrow_assignable.h
TTR_HEADERS+= is_nothrow_constructible.h
+TTR_HEADERS+= is_nothrow_convertible.h
TTR_HEADERS+= is_nothrow_copy_assignable.h
TTR_HEADERS+= is_nothrow_copy_constructible.h
TTR_HEADERS+= is_nothrow_default_constructible.h
@@ -902,6 +910,7 @@ TTR_HEADERS+= is_object.h
TTR_HEADERS+= is_pod.h
TTR_HEADERS+= is_pointer.h
TTR_HEADERS+= is_polymorphic.h
+TTR_HEADERS+= is_primary_template.h
TTR_HEADERS+= is_reference.h
TTR_HEADERS+= is_reference_wrapper.h
TTR_HEADERS+= is_referenceable.h
@@ -909,6 +918,7 @@ TTR_HEADERS+= is_same.h
TTR_HEADERS+= is_scalar.h
TTR_HEADERS+= is_scoped_enum.h
TTR_HEADERS+= is_signed.h
+TTR_HEADERS+= is_signed_integer.h
TTR_HEADERS+= is_standard_layout.h
TTR_HEADERS+= is_trivial.h
TTR_HEADERS+= is_trivially_assignable.h
@@ -923,18 +933,28 @@ TTR_HEADERS+= is_trivially_move_constructible.h
TTR_HEADERS+= is_unbounded_array.h
TTR_HEADERS+= is_union.h
TTR_HEADERS+= is_unsigned.h
+TTR_HEADERS+= is_unsigned_integer.h
+TTR_HEADERS+= is_valid_expansion.h
TTR_HEADERS+= is_void.h
TTR_HEADERS+= is_volatile.h
+TTR_HEADERS+= lazy.h
+TTR_HEADERS+= make_32_64_or_128_bit.h
+TTR_HEADERS+= make_signed.h
+TTR_HEADERS+= make_unsigned.h
+TTR_HEADERS+= nat.h
TTR_HEADERS+= negation.h
+TTR_HEADERS+= promote.h
TTR_HEADERS+= rank.h
TTR_HEADERS+= remove_all_extents.h
TTR_HEADERS+= remove_const.h
TTR_HEADERS+= remove_cv.h
+TTR_HEADERS+= remove_cvref.h
TTR_HEADERS+= remove_extent.h
TTR_HEADERS+= remove_pointer.h
TTR_HEADERS+= remove_reference.h
TTR_HEADERS+= remove_volatile.h
TTR_HEADERS+= type_identity.h
+TTR_HEADERS+= type_list.h
TTR_HEADERS+= underlying_type.h
TTR_HEADERS+= void_t.h
.for hdr in ${TTR_HEADERS}