aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2023-04-17 17:14:23 +0000
committerDimitry Andric <dim@FreeBSD.org>2023-07-23 17:38:08 +0000
commit49071a9c083dd7841f8cdf5a1f7023a945262e2d (patch)
tree802cd3df44649a54b04f9a19c6dee2fbb131b6d0
parent320d4fb58b6b1c6a0c7ffeab3d4672d1479d5e17 (diff)
downloadsrc-49071a9c083dd7841f8cdf5a1f7023a945262e2d.tar.gz
src-49071a9c083dd7841f8cdf5a1f7023a945262e2d.zip
Merge llvm-project release/16.x llvmorg-16.0.1-0-gcd89023f7979
This updates llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and openmp to llvmorg-16.0.1-0-gcd89023f7979 (aka 16.0.1 release). PR: 271047 MFC after: 1 month (cherry picked from commit 1ac55f4cb0001fed92329746c730aa9a947c09a5)
-rw-r--r--contrib/llvm-project/clang/include/clang-c/Index.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td7
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td5
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_sve.td41
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td102
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Options.td2
-rwxr-xr-xcontrib/llvm-project/clang/include/clang/Format/Format.h39
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h184
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h10
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Sema.h38
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/SemaInternal.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Support/RISCVVIntrinsicUtils.h46
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningService.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h27
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h29
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprConstant.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/Type.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PPC.h8
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h5
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp98
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Driver.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp93
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp71
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.h14
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Format/Format.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp56
-rw-r--r--contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.h3
-rw-r--r--contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp31
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Headers/immintrin.h4
-rw-r--r--contrib/llvm-project/clang/lib/Headers/smmintrin.h2
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp37
-rw-r--r--contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp44
-rw-r--r--contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp17
-rwxr-xr-xcontrib/llvm-project/clang/lib/Sema/SemaConcept.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp34
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaModule.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp234
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaType.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Sema/TreeTransform.h3
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp90
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp150
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp33
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp2
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/ranges_binary_search.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__algorithm/sort.h578
-rw-r--r--contrib/llvm-project/libcxx/include/__config38
-rw-r--r--contrib/llvm-project/libcxx/include/__expected/expected.h7
-rw-r--r--contrib/llvm-project/libcxx/include/__format/concepts.h5
-rw-r--r--contrib/llvm-project/libcxx/include/__format/format_functions.h8
-rw-r--r--contrib/llvm-project/libcxx/include/__format/formatter_floating_point.h3
-rw-r--r--contrib/llvm-project/libcxx/include/__functional/function.h38
-rw-r--r--contrib/llvm-project/libcxx/include/__functional/hash.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__memory/construct_at.h10
-rw-r--r--contrib/llvm-project/libcxx/include/__memory/shared_ptr.h218
-rw-r--r--contrib/llvm-project/libcxx/include/__memory/uninitialized_algorithms.h34
-rw-r--r--contrib/llvm-project/libcxx/include/__memory_resource/polymorphic_allocator.h2
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/elements_view.h118
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/filter_view.h50
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/iota_view.h432
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/istream_view.h26
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/join_view.h96
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/split_view.h52
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/take_while_view.h30
-rw-r--r--contrib/llvm-project/libcxx/include/__ranges/transform_view.h127
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/add_pointer.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__type_traits/remove_pointer.h4
-rw-r--r--contrib/llvm-project/libcxx/include/__utility/exception_guard.h47
-rw-r--r--contrib/llvm-project/libcxx/include/any10
-rw-r--r--contrib/llvm-project/libcxx/include/module.modulemap.in16
-rw-r--r--contrib/llvm-project/libcxx/include/source_location6
-rw-r--r--contrib/llvm-project/libcxx/include/version4
-rw-r--r--contrib/llvm-project/libunwind/include/libunwind.modulemap3
-rw-r--r--contrib/llvm-project/libunwind/include/unwind.h4
-rw-r--r--contrib/llvm-project/libunwind/src/DwarfInstructions.hpp3
-rw-r--r--contrib/llvm-project/libunwind/src/UnwindRegistersRestore.S18
-rw-r--r--contrib/llvm-project/libunwind/src/UnwindRegistersSave.S11
-rw-r--r--contrib/llvm-project/lld/COFF/MinGW.cpp3
-rw-r--r--contrib/llvm-project/lld/ELF/Arch/RISCV.cpp6
-rw-r--r--contrib/llvm-project/lld/ELF/ICF.cpp10
-rw-r--r--contrib/llvm-project/lld/ELF/Relocations.cpp10
-rw-r--r--contrib/llvm-project/lld/ELF/SymbolTable.cpp4
-rw-r--r--contrib/llvm-project/lld/docs/ReleaseNotes.rst17
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/Coroutines.cpp164
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/Coroutines.h7
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/ObjectFile/ELF/ObjectFileELF.cpp143
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeRegisterContextFreeBSD_mips64.cpp4
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeRegisterContextFreeBSD_powerpc.cpp4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolution.h11
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/LogicalView/Core/LVElement.h1
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/LogicalView/Core/LVStringPool.h7
-rw-r--r--contrib/llvm-project/llvm/include/llvm/DebugInfo/LogicalView/Core/LVSupport.h4
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Debuginfod/Debuginfod.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAArch64.td31
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h16
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMappingReader.h3
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/ExitCodes.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/RISCVISAInfo.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Support/Signals.h11
-rw-r--r--contrib/llvm-project/llvm/include/llvm/TargetParser/RISCVTargetParser.h5
-rw-r--r--contrib/llvm-project/llvm/include/llvm/TargetParser/Triple.h8
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/IPO/OpenMPOpt.h12
-rw-r--r--contrib/llvm-project/llvm/lib/Analysis/LazyValueInfo.cpp13
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/IfConversion.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp13
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/DebugInfo/LogicalView/Core/LVSupport.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/DebugInfo/Symbolize/Symbolize.cpp27
-rw-r--r--contrib/llvm-project/llvm/lib/Debuginfod/Debuginfod.cpp19
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Layer.cpp4
-rw-r--r--contrib/llvm-project/llvm/lib/IR/AutoUpgrade.cpp35
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Verifier.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/Object/ELFObjectFile.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/Passes/PassBuilderPipelines.cpp14
-rw-r--r--contrib/llvm-project/llvm/lib/Passes/PassRegistry.def1
-rw-r--r--contrib/llvm-project/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp110
-rw-r--r--contrib/llvm-project/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp19
-rw-r--r--contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx2_x86-64_unix.S2
-rw-r--r--contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx2_x86-64_windows_gnu.S2
-rw-r--r--contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx2_x86-64_windows_msvc.asm12
-rw-r--r--contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx512_x86-64_unix.S2
-rw-r--r--contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx512_x86-64_windows_gnu.S2
-rw-r--r--contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx512_x86-64_windows_msvc.asm36
-rw-r--r--contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_impl.h10
-rw-r--r--contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse2_x86-64_unix.S2
-rw-r--r--contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse2_x86-64_windows_gnu.S2
-rw-r--r--contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse2_x86-64_windows_msvc.asm36
-rw-r--r--contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse41_x86-64_unix.S2
-rw-r--r--contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse41_x86-64_windows_gnu.S2
-rw-r--r--contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse41_x86-64_windows_msvc.asm36
-rw-r--r--contrib/llvm-project/llvm/lib/Support/BLAKE3/llvm_blake3_prefix.h41
-rw-r--r--contrib/llvm-project/llvm/lib/Support/Parallel.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Support/RISCVISAInfo.cpp63
-rw-r--r--contrib/llvm-project/llvm/lib/Support/Windows/Path.inc2
-rw-r--r--contrib/llvm-project/llvm/lib/Support/Windows/Signals.inc23
-rw-r--r--contrib/llvm-project/llvm/lib/Support/Z3Solver.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Support/raw_ostream.cpp12
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td14
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/SVEInstrFormats.td52
-rw-r--r--contrib/llvm-project/llvm/lib/Target/ARM/ARMFrameLowering.cpp28
-rw-r--r--contrib/llvm-project/llvm/lib/Target/BPF/BTFDebug.cpp46
-rw-r--r--contrib/llvm-project/llvm/lib/Target/BPF/BTFDebug.h3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp10
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp22
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.h2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Sparc/MCTargetDesc/SparcInstPrinter.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Sparc/SparcInstrInfo.cpp121
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Sparc/SparcInstrInfo.h9
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Sparc/SparcInstrInfo.td9
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Sparc/SparcTargetMachine.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86.td3
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86CallingConv.td4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp101
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h6
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86InstrCompiler.td6
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86InstrFPStack.td15
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86InstrSSE.td17
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86PfmCounters.td15
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleZnver4.td1957
-rw-r--r--contrib/llvm-project/llvm/lib/TargetParser/ARMTargetParser.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/TargetParser/RISCVTargetParser.cpp6
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/Attributor.cpp22
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp32
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/OpenMPOpt.cpp79
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp16
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Utils/Local.cpp3
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp15
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-cov/CodeCoverage.cpp32
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-objdump/ObjdumpOpts.td4
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.cpp4
-rw-r--r--contrib/llvm-project/llvm/tools/llvm-symbolizer/llvm-symbolizer.cpp8
-rw-r--r--contrib/llvm-project/openmp/runtime/src/kmp_os.h2
-rw-r--r--contrib/llvm-project/openmp/runtime/src/kmp_runtime.cpp6
-rw-r--r--contrib/llvm-project/openmp/runtime/src/kmp_safe_c_api.h2
-rw-r--r--contrib/llvm-project/openmp/runtime/src/kmp_str.cpp8
-rw-r--r--contrib/llvm-project/openmp/runtime/src/z_Linux_asm.S5
-rw-r--r--lib/clang/include/VCSVersion.inc6
-rw-r--r--lib/clang/include/clang/Basic/Version.inc6
-rw-r--r--lib/clang/include/lld/Common/Version.inc2
-rw-r--r--lib/clang/include/lldb/Version/Version.inc6
-rw-r--r--lib/clang/include/llvm/Config/config.h9
-rw-r--r--lib/clang/include/llvm/Config/llvm-config.h4
-rw-r--r--lib/clang/include/llvm/Support/VCSRevision.h2
-rw-r--r--usr.bin/clang/llvm-cov/Makefile1
227 files changed, 5335 insertions, 2401 deletions
diff --git a/contrib/llvm-project/clang/include/clang-c/Index.h b/contrib/llvm-project/clang/include/clang-c/Index.h
index fd758ddde085..a3e54285f89f 100644
--- a/contrib/llvm-project/clang/include/clang-c/Index.h
+++ b/contrib/llvm-project/clang/include/clang-c/Index.h
@@ -34,7 +34,7 @@
* compatible, thus CINDEX_VERSION_MAJOR is expected to remain stable.
*/
#define CINDEX_VERSION_MAJOR 0
-#define CINDEX_VERSION_MINOR 62
+#define CINDEX_VERSION_MINOR 63
#define CINDEX_VERSION_ENCODE(major, minor) (((major)*10000) + ((minor)*1))
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td
index c367a34b762b..e99beb3a7636 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td
@@ -1052,7 +1052,7 @@ def err_lambda_template_parameter_list_empty : Error<
// C++2b static lambdas
def err_static_lambda: ExtWarn<
"static lambdas are a C++2b extension">, InGroup<CXX2b>;
-def warn_cxx20_compat_static_lambda: ExtWarn<
+def warn_cxx20_compat_static_lambda : Warning<
"static lambdas are incompatible with C++ standards before C++2b">,
InGroup<CXXPre2bCompat>, DefaultIgnore;
def err_static_mutable_lambda : Error<
@@ -1607,11 +1607,6 @@ def err_import_in_wrong_fragment : Error<
def err_export_empty : Error<"export declaration cannot be empty">;
}
-def ext_offsetof_member_designator : Extension<
- "using %select{a member access expression|an array subscript expression}0 "
- "within '%select{__builtin_offsetof|offsetof}1' is a Clang extension">,
- InGroup<GNUOffsetofExtensions>;
-
let CategoryName = "Generics Issue" in {
def err_objc_expected_type_parameter : Error<
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td
index de56e3e1566b..bfe582d8252f 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -9138,8 +9138,9 @@ def err_operator_overload_static : Error<
def err_operator_overload_default_arg : Error<
"parameter of overloaded %0 cannot have a default argument">;
-def ext_subscript_overload : ExtWarn<
- "overloaded %0 with %select{no|a defaulted|more than one}1 parameter is a C++2b extension">, InGroup<CXXPre2bCompat>, DefaultIgnore;
+def ext_subscript_overload : Warning<
+ "overloaded %0 with %select{no|a defaulted|more than one}1 parameter is a "
+ "C++2b extension">, InGroup<CXXPre2bCompat>, DefaultIgnore;
def error_subscript_overload : Error<
"overloaded %0 cannot have %select{no|a defaulted|more than one}1 parameter before C++2b">;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_sve.td b/contrib/llvm-project/clang/include/clang/Basic/arm_sve.td
index e910036117b7..e547bbd34b5e 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/arm_sve.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_sve.td
@@ -1249,16 +1249,37 @@ def SVZIP1_BF16 : SInst<"svzip1[_{d}]", "ddd", "b", MergeNone, "aarch64_sve
def SVZIP2_BF16 : SInst<"svzip2[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_zip2">;
}
-def SVREV_B : SInst<"svrev_{d}", "PP", "PcPsPiPl", MergeNone, "aarch64_sve_rev">;
-def SVSEL_B : SInst<"svsel[_b]", "PPPP", "Pc", MergeNone, "aarch64_sve_sel">;
-def SVTRN1_B : SInst<"svtrn1_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_trn1">;
-def SVTRN2_B : SInst<"svtrn2_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_trn2">;
-def SVPUNPKHI : SInst<"svunpkhi[_b]", "PP", "Pc", MergeNone, "aarch64_sve_punpkhi">;
-def SVPUNPKLO : SInst<"svunpklo[_b]", "PP", "Pc", MergeNone, "aarch64_sve_punpklo">;
-def SVUZP1_B : SInst<"svuzp1_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_uzp1">;
-def SVUZP2_B : SInst<"svuzp2_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_uzp2">;
-def SVZIP1_B : SInst<"svzip1_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_zip1">;
-def SVZIP2_B : SInst<"svzip2_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_zip2">;
+def SVREV_B8 : SInst<"svrev_b8", "PP", "Pc", MergeNone, "aarch64_sve_rev">;
+def SVREV_B16 : SInst<"svrev_b16", "PP", "Pc", MergeNone, "aarch64_sve_rev_b16", [IsOverloadNone]>;
+def SVREV_B32 : SInst<"svrev_b32", "PP", "Pc", MergeNone, "aarch64_sve_rev_b32", [IsOverloadNone]>;
+def SVREV_B64 : SInst<"svrev_b64", "PP", "Pc", MergeNone, "aarch64_sve_rev_b64", [IsOverloadNone]>;
+def SVSEL_B : SInst<"svsel[_b]", "PPPP", "Pc", MergeNone, "aarch64_sve_sel">;
+def SVTRN1_B8 : SInst<"svtrn1_b8", "PPP", "Pc", MergeNone, "aarch64_sve_trn1">;
+def SVTRN1_B16 : SInst<"svtrn1_b16", "PPP", "Pc", MergeNone, "aarch64_sve_trn1_b16", [IsOverloadNone]>;
+def SVTRN1_B32 : SInst<"svtrn1_b32", "PPP", "Pc", MergeNone, "aarch64_sve_trn1_b32", [IsOverloadNone]>;
+def SVTRN1_B64 : SInst<"svtrn1_b64", "PPP", "Pc", MergeNone, "aarch64_sve_trn1_b64", [IsOverloadNone]>;
+def SVTRN2_B8 : SInst<"svtrn2_b8", "PPP", "Pc", MergeNone, "aarch64_sve_trn2">;
+def SVTRN2_B16 : SInst<"svtrn2_b16", "PPP", "Pc", MergeNone, "aarch64_sve_trn2_b16", [IsOverloadNone]>;
+def SVTRN2_B32 : SInst<"svtrn2_b32", "PPP", "Pc", MergeNone, "aarch64_sve_trn2_b32", [IsOverloadNone]>;
+def SVTRN2_B64 : SInst<"svtrn2_b64", "PPP", "Pc", MergeNone, "aarch64_sve_trn2_b64", [IsOverloadNone]>;
+def SVPUNPKHI : SInst<"svunpkhi[_b]", "PP", "Pc", MergeNone, "aarch64_sve_punpkhi">;
+def SVPUNPKLO : SInst<"svunpklo[_b]", "PP", "Pc", MergeNone, "aarch64_sve_punpklo">;
+def SVUZP1_B8 : SInst<"svuzp1_b8", "PPP", "Pc", MergeNone, "aarch64_sve_uzp1">;
+def SVUZP1_B16 : SInst<"svuzp1_b16", "PPP", "Pc", MergeNone, "aarch64_sve_uzp1_b16", [IsOverloadNone]>;
+def SVUZP1_B32 : SInst<"svuzp1_b32", "PPP", "Pc", MergeNone, "aarch64_sve_uzp1_b32", [IsOverloadNone]>;
+def SVUZP1_B64 : SInst<"svuzp1_b64", "PPP", "Pc", MergeNone, "aarch64_sve_uzp1_b64", [IsOverloadNone]>;
+def SVUZP2_B8 : SInst<"svuzp2_b8", "PPP", "Pc", MergeNone, "aarch64_sve_uzp2">;
+def SVUZP2_B16 : SInst<"svuzp2_b16", "PPP", "Pc", MergeNone, "aarch64_sve_uzp2_b16", [IsOverloadNone]>;
+def SVUZP2_B32 : SInst<"svuzp2_b32", "PPP", "Pc", MergeNone, "aarch64_sve_uzp2_b32", [IsOverloadNone]>;
+def SVUZP2_B64 : SInst<"svuzp2_b64", "PPP", "Pc", MergeNone, "aarch64_sve_uzp2_b64", [IsOverloadNone]>;
+def SVZIP1_B8 : SInst<"svzip1_b8", "PPP", "Pc", MergeNone, "aarch64_sve_zip1">;
+def SVZIP1_B16 : SInst<"svzip1_b16", "PPP", "Pc", MergeNone, "aarch64_sve_zip1_b16", [IsOverloadNone]>;
+def SVZIP1_B32 : SInst<"svzip1_b32", "PPP", "Pc", MergeNone, "aarch64_sve_zip1_b32", [IsOverloadNone]>;
+def SVZIP1_B64 : SInst<"svzip1_b64", "PPP", "Pc", MergeNone, "aarch64_sve_zip1_b64", [IsOverloadNone]>;
+def SVZIP2_B : SInst<"svzip2_b8", "PPP", "Pc", MergeNone, "aarch64_sve_zip2">;
+def SVZIP2_B16 : SInst<"svzip2_b16", "PPP", "Pc", MergeNone, "aarch64_sve_zip2_b16", [IsOverloadNone]>;
+def SVZIP2_B32 : SInst<"svzip2_b32", "PPP", "Pc", MergeNone, "aarch64_sve_zip2_b32", [IsOverloadNone]>;
+def SVZIP2_B64 : SInst<"svzip2_b64", "PPP", "Pc", MergeNone, "aarch64_sve_zip2_b64", [IsOverloadNone]>;
////////////////////////////////////////////////////////////////////////////////
// Predicate creation
diff --git a/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td b/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td
index c63cba9aa459..b23e26ecaa57 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td
@@ -1539,7 +1539,7 @@ enum RVV_CSR {
};
static __inline__ __attribute__((__always_inline__, __nodebug__))
-unsigned long vread_csr(enum RVV_CSR __csr) {
+unsigned long __riscv_vread_csr(enum RVV_CSR __csr) {
unsigned long __rv = 0;
switch (__csr) {
case RVV_VSTART:
@@ -1559,7 +1559,7 @@ unsigned long vread_csr(enum RVV_CSR __csr) {
}
static __inline__ __attribute__((__always_inline__, __nodebug__))
-void vwrite_csr(enum RVV_CSR __csr, unsigned long __value) {
+void __riscv_vwrite_csr(enum RVV_CSR __csr, unsigned long __value) {
switch (__csr) {
case RVV_VSTART:
__asm__ __volatile__ ("csrw\tvstart, %z0" : : "rJ"(__value) : "memory");
@@ -1580,7 +1580,7 @@ def vread_vwrite_csr: RVVHeader;
let HeaderCode =
[{
-#define vlenb() __builtin_rvv_vlenb()
+#define __riscv_vlenb() __builtin_rvv_vlenb()
}] in
def vlenb_macro: RVVHeader;
@@ -1611,62 +1611,62 @@ let HasBuiltinAlias = false, HasVL = false, HasMasked = false,
// and LMUL.
let HeaderCode =
[{
-#define vsetvl_e8mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 6)
-#define vsetvl_e8mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 7)
-#define vsetvl_e8m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 0)
-#define vsetvl_e8m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 1)
-#define vsetvl_e8m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 2)
-#define vsetvl_e8m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 3)
-
-#define vsetvl_e16mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 7)
-#define vsetvl_e16m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 0)
-#define vsetvl_e16m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 1)
-#define vsetvl_e16m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 2)
-#define vsetvl_e16m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 3)
-
-#define vsetvl_e32m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 0)
-#define vsetvl_e32m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 1)
-#define vsetvl_e32m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 2)
-#define vsetvl_e32m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 3)
+#define __riscv_vsetvl_e8mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 6)
+#define __riscv_vsetvl_e8mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 7)
+#define __riscv_vsetvl_e8m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 0)
+#define __riscv_vsetvl_e8m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 1)
+#define __riscv_vsetvl_e8m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 2)
+#define __riscv_vsetvl_e8m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 3)
+
+#define __riscv_vsetvl_e16mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 7)
+#define __riscv_vsetvl_e16m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 0)
+#define __riscv_vsetvl_e16m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 1)
+#define __riscv_vsetvl_e16m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 2)
+#define __riscv_vsetvl_e16m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 3)
+
+#define __riscv_vsetvl_e32m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 0)
+#define __riscv_vsetvl_e32m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 1)
+#define __riscv_vsetvl_e32m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 2)
+#define __riscv_vsetvl_e32m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 3)
#if __riscv_v_elen >= 64
-#define vsetvl_e8mf8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 5)
-#define vsetvl_e16mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 6)
-#define vsetvl_e32mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 7)
-
-#define vsetvl_e64m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 0)
-#define vsetvl_e64m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 1)
-#define vsetvl_e64m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 2)
-#define vsetvl_e64m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 3)
+#define __riscv_vsetvl_e8mf8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 5)
+#define __riscv_vsetvl_e16mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 6)
+#define __riscv_vsetvl_e32mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 7)
+
+#define __riscv_vsetvl_e64m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 0)
+#define __riscv_vsetvl_e64m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 1)
+#define __riscv_vsetvl_e64m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 2)
+#define __riscv_vsetvl_e64m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 3)
#endif
-#define vsetvlmax_e8mf4() __builtin_rvv_vsetvlimax(0, 6)
-#define vsetvlmax_e8mf2() __builtin_rvv_vsetvlimax(0, 7)
-#define vsetvlmax_e8m1() __builtin_rvv_vsetvlimax(0, 0)
-#define vsetvlmax_e8m2() __builtin_rvv_vsetvlimax(0, 1)
-#define vsetvlmax_e8m4() __builtin_rvv_vsetvlimax(0, 2)
-#define vsetvlmax_e8m8() __builtin_rvv_vsetvlimax(0, 3)
+#define __riscv_vsetvlmax_e8mf4() __builtin_rvv_vsetvlimax(0, 6)
+#define __riscv_vsetvlmax_e8mf2() __builtin_rvv_vsetvlimax(0, 7)
+#define __riscv_vsetvlmax_e8m1() __builtin_rvv_vsetvlimax(0, 0)
+#define __riscv_vsetvlmax_e8m2() __builtin_rvv_vsetvlimax(0, 1)
+#define __riscv_vsetvlmax_e8m4() __builtin_rvv_vsetvlimax(0, 2)
+#define __riscv_vsetvlmax_e8m8() __builtin_rvv_vsetvlimax(0, 3)
-#define vsetvlmax_e16mf2() __builtin_rvv_vsetvlimax(1, 7)
-#define vsetvlmax_e16m1() __builtin_rvv_vsetvlimax(1, 0)
-#define vsetvlmax_e16m2() __builtin_rvv_vsetvlimax(1, 1)
-#define vsetvlmax_e16m4() __builtin_rvv_vsetvlimax(1, 2)
-#define vsetvlmax_e16m8() __builtin_rvv_vsetvlimax(1, 3)
+#define __riscv_vsetvlmax_e16mf2() __builtin_rvv_vsetvlimax(1, 7)
+#define __riscv_vsetvlmax_e16m1() __builtin_rvv_vsetvlimax(1, 0)
+#define __riscv_vsetvlmax_e16m2() __builtin_rvv_vsetvlimax(1, 1)
+#define __riscv_vsetvlmax_e16m4() __builtin_rvv_vsetvlimax(1, 2)
+#define __riscv_vsetvlmax_e16m8() __builtin_rvv_vsetvlimax(1, 3)
-#define vsetvlmax_e32m1() __builtin_rvv_vsetvlimax(2, 0)
-#define vsetvlmax_e32m2() __builtin_rvv_vsetvlimax(2, 1)
-#define vsetvlmax_e32m4() __builtin_rvv_vsetvlimax(2, 2)
-#define vsetvlmax_e32m8() __builtin_rvv_vsetvlimax(2, 3)
+#define __riscv_vsetvlmax_e32m1() __builtin_rvv_vsetvlimax(2, 0)
+#define __riscv_vsetvlmax_e32m2() __builtin_rvv_vsetvlimax(2, 1)
+#define __riscv_vsetvlmax_e32m4() __builtin_rvv_vsetvlimax(2, 2)
+#define __riscv_vsetvlmax_e32m8() __builtin_rvv_vsetvlimax(2, 3)
#if __riscv_v_elen >= 64
-#define vsetvlmax_e8mf8() __builtin_rvv_vsetvlimax(0, 5)
-#define vsetvlmax_e16mf4() __builtin_rvv_vsetvlimax(1, 6)
-#define vsetvlmax_e32mf2() __builtin_rvv_vsetvlimax(2, 7)
-
-#define vsetvlmax_e64m1() __builtin_rvv_vsetvlimax(3, 0)
-#define vsetvlmax_e64m2() __builtin_rvv_vsetvlimax(3, 1)
-#define vsetvlmax_e64m4() __builtin_rvv_vsetvlimax(3, 2)
-#define vsetvlmax_e64m8() __builtin_rvv_vsetvlimax(3, 3)
+#define __riscv_vsetvlmax_e8mf8() __builtin_rvv_vsetvlimax(0, 5)
+#define __riscv_vsetvlmax_e16mf4() __builtin_rvv_vsetvlimax(1, 6)
+#define __riscv_vsetvlmax_e32mf2() __builtin_rvv_vsetvlimax(2, 7)
+
+#define __riscv_vsetvlmax_e64m1() __builtin_rvv_vsetvlimax(3, 0)
+#define __riscv_vsetvlmax_e64m2() __builtin_rvv_vsetvlimax(3, 1)
+#define __riscv_vsetvlmax_e64m4() __builtin_rvv_vsetvlimax(3, 2)
+#define __riscv_vsetvlmax_e64m8() __builtin_rvv_vsetvlimax(3, 3)
#endif
}] in
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Options.td b/contrib/llvm-project/clang/include/clang/Driver/Options.td
index 23752823e88f..652c15afcce8 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Options.td
+++ b/contrib/llvm-project/clang/include/clang/Driver/Options.td
@@ -4559,6 +4559,8 @@ def mnvs : Flag<["-"], "mnvs">, Group<m_hexagon_Features_Group>,
Flags<[CC1Option]>, HelpText<"Enable generation of new-value stores">;
def mno_nvs : Flag<["-"], "mno-nvs">, Group<m_hexagon_Features_Group>,
Flags<[CC1Option]>, HelpText<"Disable generation of new-value stores">;
+def mcabac: Flag<["-"], "mcabac">, Group<m_hexagon_Features_Group>,
+ HelpText<"Enable CABAC instructions">;
// SPARC feature flags
def mfpu : Flag<["-"], "mfpu">, Group<m_sparc_Features_Group>;
diff --git a/contrib/llvm-project/clang/include/clang/Format/Format.h b/contrib/llvm-project/clang/include/clang/Format/Format.h
index 72efd3be1cc7..7a313460d888 100755
--- a/contrib/llvm-project/clang/include/clang/Format/Format.h
+++ b/contrib/llvm-project/clang/include/clang/Format/Format.h
@@ -2500,6 +2500,10 @@ struct FormatStyle {
/// Decimal: 3
/// Hex: -1
/// \endcode
+ ///
+ /// You can also specify a minimum number of digits (``BinaryMinDigits``,
+ /// ``DecimalMinDigits``, and ``HexMinDigits``) the integer literal must
+ /// have in order for the separators to be inserted.
struct IntegerLiteralSeparatorStyle {
/// Format separators in binary literals.
/// \code{.text}
@@ -2509,6 +2513,14 @@ struct FormatStyle {
/// /* 4: */ b = 0b1001'1110'1101;
/// \endcode
int8_t Binary;
+ /// Format separators in binary literals with a minimum number of digits.
+ /// \code{.text}
+ /// // Binary: 3
+ /// // BinaryMinDigits: 7
+ /// b1 = 0b101101;
+ /// b2 = 0b1'101'101;
+ /// \endcode
+ int8_t BinaryMinDigits;
/// Format separators in decimal literals.
/// \code{.text}
/// /* -1: */ d = 18446744073709550592ull;
@@ -2516,6 +2528,14 @@ struct FormatStyle {
/// /* 3: */ d = 18'446'744'073'709'550'592ull;
/// \endcode
int8_t Decimal;
+ /// Format separators in decimal literals with a minimum number of digits.
+ /// \code{.text}
+ /// // Decimal: 3
+ /// // DecimalMinDigits: 5
+ /// d1 = 2023;
+ /// d2 = 10'000;
+ /// \endcode
+ int8_t DecimalMinDigits;
/// Format separators in hexadecimal literals.
/// \code{.text}
/// /* -1: */ h = 0xDEADBEEFDEADBEEFuz;
@@ -2523,6 +2543,20 @@ struct FormatStyle {
/// /* 2: */ h = 0xDE'AD'BE'EF'DE'AD'BE'EFuz;
/// \endcode
int8_t Hex;
+ /// Format separators in hexadecimal literals with a minimum number of
+ /// digits.
+ /// \code{.text}
+ /// // Hex: 2
+ /// // HexMinDigits: 6
+ /// h1 = 0xABCDE;
+ /// h2 = 0xAB'CD'EF;
+ /// \endcode
+ int8_t HexMinDigits;
+ bool operator==(const IntegerLiteralSeparatorStyle &R) const {
+ return Binary == R.Binary && BinaryMinDigits == R.BinaryMinDigits &&
+ Decimal == R.Decimal && DecimalMinDigits == R.DecimalMinDigits &&
+ Hex == R.Hex && HexMinDigits == R.HexMinDigits;
+ }
};
/// Format integer literal separators (``'`` for C++ and ``_`` for C#, Java,
@@ -4212,10 +4246,7 @@ struct FormatStyle {
IndentWrappedFunctionNames == R.IndentWrappedFunctionNames &&
InsertBraces == R.InsertBraces &&
InsertNewlineAtEOF == R.InsertNewlineAtEOF &&
- IntegerLiteralSeparator.Binary == R.IntegerLiteralSeparator.Binary &&
- IntegerLiteralSeparator.Decimal ==
- R.IntegerLiteralSeparator.Decimal &&
- IntegerLiteralSeparator.Hex == R.IntegerLiteralSeparator.Hex &&
+ IntegerLiteralSeparator == R.IntegerLiteralSeparator &&
JavaImportGroups == R.JavaImportGroups &&
JavaScriptQuotes == R.JavaScriptQuotes &&
JavaScriptWrapImports == R.JavaScriptWrapImports &&
diff --git a/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h b/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h
index f383a2e5b530..322626802eab 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h
@@ -193,11 +193,6 @@ class Preprocessor {
LangOptions::FPEvalMethodKind CurrentFPEvalMethod =
LangOptions::FPEvalMethodKind::FEM_UnsetOnCommandLine;
- // Keeps the value of the last evaluation method before a
- // `pragma float_control (precise,off) is applied.
- LangOptions::FPEvalMethodKind LastFPEvalMethod =
- LangOptions::FPEvalMethodKind::FEM_UnsetOnCommandLine;
-
// The most recent pragma location where the floating point evaluation
// method was modified. This is used to determine whether the
// 'pragma clang fp eval_method' was used whithin the current scope.
@@ -313,6 +308,9 @@ private:
/// The import path for named module that we're currently processing.
SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> NamedModuleImportPath;
+ /// Whether the import is an `@import` or a standard c++ modules import.
+ bool IsAtImport = false;
+
/// Whether the last token we lexed was an '@'.
bool LastTokenWasAt = false;
@@ -456,6 +454,144 @@ private:
TrackGMF TrackGMFState = TrackGMF::BeforeGMFIntroducer;
+ /// Track the status of the c++20 module decl.
+ ///
+ /// module-declaration:
+ /// 'export'[opt] 'module' module-name module-partition[opt]
+ /// attribute-specifier-seq[opt] ';'
+ ///
+ /// module-name:
+ /// module-name-qualifier[opt] identifier
+ ///
+ /// module-partition:
+ /// ':' module-name-qualifier[opt] identifier
+ ///
+ /// module-name-qualifier:
+ /// identifier '.'
+ /// module-name-qualifier identifier '.'
+ ///
+ /// Transition state:
+ ///
+ /// NotAModuleDecl --- export ---> FoundExport
+ /// NotAModuleDecl --- module ---> ImplementationCandidate
+ /// FoundExport --- module ---> InterfaceCandidate
+ /// ImplementationCandidate --- Identifier ---> ImplementationCandidate
+ /// ImplementationCandidate --- period ---> ImplementationCandidate
+ /// ImplementationCandidate --- colon ---> ImplementationCandidate
+ /// InterfaceCandidate --- Identifier ---> InterfaceCandidate
+ /// InterfaceCandidate --- period ---> InterfaceCandidate
+ /// InterfaceCandidate --- colon ---> InterfaceCandidate
+ /// ImplementationCandidate --- Semi ---> NamedModuleImplementation
+ /// NamedModuleInterface --- Semi ---> NamedModuleInterface
+ /// NamedModuleImplementation --- Anything ---> NamedModuleImplementation
+ /// NamedModuleInterface --- Anything ---> NamedModuleInterface
+ ///
+ /// FIXME: We haven't handle attribute-specifier-seq here. It may not be bad
+ /// soon since we don't support any module attributes yet.
+ class ModuleDeclSeq {
+ enum ModuleDeclState : int {
+ NotAModuleDecl,
+ FoundExport,
+ InterfaceCandidate,
+ ImplementationCandidate,
+ NamedModuleInterface,
+ NamedModuleImplementation,
+ };
+
+ public:
+ ModuleDeclSeq() : State(NotAModuleDecl) {}
+
+ void handleExport() {
+ if (State == NotAModuleDecl)
+ State = FoundExport;
+ else if (!isNamedModule())
+ reset();
+ }
+
+ void handleModule() {
+ if (State == FoundExport)
+ State = InterfaceCandidate;
+ else if (State == NotAModuleDecl)
+ State = ImplementationCandidate;
+ else if (!isNamedModule())
+ reset();
+ }
+
+ void handleIdentifier(IdentifierInfo *Identifier) {
+ if (isModuleCandidate() && Identifier)
+ Name += Identifier->getName().str();
+ else if (!isNamedModule())
+ reset();
+ }
+
+ void handleColon() {
+ if (isModuleCandidate())
+ Name += ":";
+ else if (!isNamedModule())
+ reset();
+ }
+
+ void handlePeriod() {
+ if (isModuleCandidate())
+ Name += ".";
+ else if (!isNamedModule())
+ reset();
+ }
+
+ void handleSemi() {
+ if (!Name.empty() && isModuleCandidate()) {
+ if (State == InterfaceCandidate)
+ State = NamedModuleInterface;
+ else if (State == ImplementationCandidate)
+ State = NamedModuleImplementation;
+ else
+ llvm_unreachable("Unimaged ModuleDeclState.");
+ } else if (!isNamedModule())
+ reset();
+ }
+
+ void handleMisc() {
+ if (!isNamedModule())
+ reset();
+ }
+
+ bool isModuleCandidate() const {
+ return State == InterfaceCandidate || State == ImplementationCandidate;
+ }
+
+ bool isNamedModule() const {
+ return State == NamedModuleInterface ||
+ State == NamedModuleImplementation;
+ }
+
+ bool isNamedInterface() const { return State == NamedModuleInterface; }
+
+ bool isImplementationUnit() const {
+ return State == NamedModuleImplementation && !getName().contains(':');
+ }
+
+ StringRef getName() const {
+ assert(isNamedModule() && "Can't get name from a non named module");
+ return Name;
+ }
+
+ StringRef getPrimaryName() const {
+ assert(isNamedModule() && "Can't get name from a non named module");
+ return getName().split(':').first;
+ }
+
+ void reset() {
+ Name.clear();
+ State = NotAModuleDecl;
+ }
+
+ private:
+ ModuleDeclState State;
+ std::string Name;
+ };
+
+ ModuleDeclSeq ModuleDeclState;
+
/// Whether the module import expects an identifier next. Otherwise,
/// it expects a '.' or ';'.
bool ModuleImportExpectsIdentifier = false;
@@ -2194,14 +2330,6 @@ public:
return LastFPEvalPragmaLocation;
}
- LangOptions::FPEvalMethodKind getLastFPEvalMethod() const {
- return LastFPEvalMethod;
- }
-
- void setLastFPEvalMethod(LangOptions::FPEvalMethodKind Val) {
- LastFPEvalMethod = Val;
- }
-
void setCurrentFPEvalMethod(SourceLocation PragmaLoc,
LangOptions::FPEvalMethodKind Val) {
assert(Val != LangOptions::FEM_UnsetOnCommandLine &&
@@ -2225,6 +2353,36 @@ public:
/// Retrieves the module whose implementation we're current compiling, if any.
Module *getCurrentModuleImplementation();
+ /// If we are preprocessing a named module.
+ bool isInNamedModule() const { return ModuleDeclState.isNamedModule(); }
+
+ /// If we are proprocessing a named interface unit.
+ /// Note that a module implementation partition is not considered as an
+ /// named interface unit here although it is importable
+ /// to ease the parsing.
+ bool isInNamedInterfaceUnit() const {
+ return ModuleDeclState.isNamedInterface();
+ }
+
+ /// Get the named module name we're preprocessing.
+ /// Requires we're preprocessing a named module.
+ StringRef getNamedModuleName() const { return ModuleDeclState.getName(); }
+
+ /// If we are implementing an implementation module unit.
+ /// Note that the module implementation partition is not considered as an
+ /// implementation unit.
+ bool isInImplementationUnit() const {
+ return ModuleDeclState.isImplementationUnit();
+ }
+
+ /// If we're importing a standard C++20 Named Modules.
+ bool isInImportingCXXNamedModules() const {
+ // NamedModuleImportPath will be non-empty only if we're importing
+ // Standard C++ named modules.
+ return !NamedModuleImportPath.empty() && getLangOpts().CPlusPlusModules &&
+ !IsAtImport;
+ }
+
/// Allocate a new MacroInfo object with the provided SourceLocation.
MacroInfo *AllocateMacroInfo(SourceLocation L);
diff --git a/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h b/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h
index a95fe5686009..69fe2c541607 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h
@@ -506,16 +506,8 @@ public:
assert(isTypeRep((TST) TypeSpecType) && "DeclSpec does not store a type");
return TypeRep;
}
- // Returns the underlying decl, if any.
Decl *getRepAsDecl() const {
- auto *D = getRepAsFoundDecl();
- if (const auto *Using = dyn_cast_or_null<UsingShadowDecl>(D))
- return Using->getTargetDecl();
- return D;
- }
- // Returns the originally found decl, if any.
- Decl *getRepAsFoundDecl() const {
- assert(isDeclRep((TST)TypeSpecType) && "DeclSpec does not store a decl");
+ assert(isDeclRep((TST) TypeSpecType) && "DeclSpec does not store a decl");
return DeclRep;
}
Expr *getRepAsExpr() const {
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Sema.h b/contrib/llvm-project/clang/include/clang/Sema/Sema.h
index 9ab4146aaefe..e57955f16bdd 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Sema.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Sema.h
@@ -238,11 +238,9 @@ namespace threadSafety {
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
-using UnexpandedParameterPack = std::pair<
- llvm::PointerUnion<
- const TemplateTypeParmType *, const SubstTemplateTypeParmPackType *,
- const SubstNonTypeTemplateParmPackExpr *, const NamedDecl *>,
- SourceLocation>;
+typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType *, NamedDecl *>,
+ SourceLocation>
+ UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
@@ -3330,9 +3328,7 @@ public:
SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
- OffsetOfKind OOK,
- UsingShadowDecl*& FoundUsingShadow,
- SkipBodyInfo *SkipBody = nullptr);
+ OffsetOfKind OOK, SkipBodyInfo *SkipBody = nullptr);
DeclResult ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
@@ -7284,24 +7280,34 @@ private:
private:
// The current stack of constraint satisfactions, so we can exit-early.
- llvm::SmallVector<llvm::FoldingSetNodeID, 10> SatisfactionStack;
+ using SatisfactionStackEntryTy =
+ std::pair<const NamedDecl *, llvm::FoldingSetNodeID>;
+ llvm::SmallVector<SatisfactionStackEntryTy, 10>
+ SatisfactionStack;
public:
- void PushSatisfactionStackEntry(const llvm::FoldingSetNodeID &ID) {
- SatisfactionStack.push_back(ID);
+ void PushSatisfactionStackEntry(const NamedDecl *D,
+ const llvm::FoldingSetNodeID &ID) {
+ const NamedDecl *Can = cast<NamedDecl>(D->getCanonicalDecl());
+ SatisfactionStack.emplace_back(Can, ID);
}
void PopSatisfactionStackEntry() { SatisfactionStack.pop_back(); }
- bool SatisfactionStackContains(const llvm::FoldingSetNodeID &ID) const {
- return llvm::find(SatisfactionStack, ID) != SatisfactionStack.end();
+ bool SatisfactionStackContains(const NamedDecl *D,
+ const llvm::FoldingSetNodeID &ID) const {
+ const NamedDecl *Can = cast<NamedDecl>(D->getCanonicalDecl());
+ return llvm::find(SatisfactionStack,
+ SatisfactionStackEntryTy{Can, ID}) !=
+ SatisfactionStack.end();
}
// Resets the current SatisfactionStack for cases where we are instantiating
// constraints as a 'side effect' of normal instantiation in a way that is not
// indicative of recursive definition.
class SatisfactionStackResetRAII {
- llvm::SmallVector<llvm::FoldingSetNodeID, 10> BackupSatisfactionStack;
+ llvm::SmallVector<SatisfactionStackEntryTy, 10>
+ BackupSatisfactionStack;
Sema &SemaRef;
public:
@@ -7314,8 +7320,8 @@ public:
}
};
- void
- SwapSatisfactionStack(llvm::SmallVectorImpl<llvm::FoldingSetNodeID> &NewSS) {
+ void SwapSatisfactionStack(
+ llvm::SmallVectorImpl<SatisfactionStackEntryTy> &NewSS) {
SatisfactionStack.swap(NewSS);
}
diff --git a/contrib/llvm-project/clang/include/clang/Sema/SemaInternal.h b/contrib/llvm-project/clang/include/clang/Sema/SemaInternal.h
index 4eca50919dc7..842eec099540 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/SemaInternal.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/SemaInternal.h
@@ -62,7 +62,7 @@ inline InheritableAttr *getDLLAttr(Decl *D) {
}
/// Retrieve the depth and index of a template parameter.
-inline std::pair<unsigned, unsigned> getDepthAndIndex(const NamedDecl *ND) {
+inline std::pair<unsigned, unsigned> getDepthAndIndex(NamedDecl *ND) {
if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(ND))
return std::make_pair(TTP->getDepth(), TTP->getIndex());
@@ -79,7 +79,7 @@ getDepthAndIndex(UnexpandedParameterPack UPP) {
if (const auto *TTP = UPP.first.dyn_cast<const TemplateTypeParmType *>())
return std::make_pair(TTP->getDepth(), TTP->getIndex());
- return getDepthAndIndex(UPP.first.get<const NamedDecl *>());
+ return getDepthAndIndex(UPP.first.get<NamedDecl *>());
}
class TypoCorrectionConsumer : public VisibleDeclConsumer {
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h
index ea05b9f8ee3f..fcc9c02999b3 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h
@@ -454,7 +454,9 @@ public:
llvm::SMTExprRef OperandExp =
getSymExpr(Solver, Ctx, USE->getOperand(), &OperandTy, hasComparison);
llvm::SMTExprRef UnaryExp =
- fromUnOp(Solver, USE->getOpcode(), OperandExp);
+ OperandTy->isRealFloatingType()
+ ? fromFloatUnOp(Solver, USE->getOpcode(), OperandExp)
+ : fromUnOp(Solver, USE->getOpcode(), OperandExp);
// Currently, without the `support-symbolic-integer-casts=true` option,
// we do not emit `SymbolCast`s for implicit casts.
diff --git a/contrib/llvm-project/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/contrib/llvm-project/clang/include/clang/Support/RISCVVIntrinsicUtils.h
index fc53d70019c5..bf31dced98b2 100644
--- a/contrib/llvm-project/clang/include/clang/Support/RISCVVIntrinsicUtils.h
+++ b/contrib/llvm-project/clang/include/clang/Support/RISCVVIntrinsicUtils.h
@@ -92,25 +92,24 @@ enum class TypeModifier : uint8_t {
LLVM_MARK_AS_BITMASK_ENUM(LMUL1),
};
-struct Policy {
- bool IsUnspecified = false;
+class Policy {
+public:
enum PolicyType {
Undisturbed,
Agnostic,
};
- PolicyType TailPolicy = Agnostic;
- PolicyType MaskPolicy = Agnostic;
- bool HasTailPolicy, HasMaskPolicy;
- Policy(bool HasTailPolicy, bool HasMaskPolicy)
- : IsUnspecified(true), HasTailPolicy(HasTailPolicy),
- HasMaskPolicy(HasMaskPolicy) {}
- Policy(PolicyType TailPolicy, bool HasTailPolicy, bool HasMaskPolicy)
- : TailPolicy(TailPolicy), HasTailPolicy(HasTailPolicy),
- HasMaskPolicy(HasMaskPolicy) {}
- Policy(PolicyType TailPolicy, PolicyType MaskPolicy, bool HasTailPolicy,
- bool HasMaskPolicy)
- : TailPolicy(TailPolicy), MaskPolicy(MaskPolicy),
- HasTailPolicy(HasTailPolicy), HasMaskPolicy(HasMaskPolicy) {}
+
+private:
+ // The default assumption for an RVV instruction is TAMA, as an undisturbed
+ // policy generally will affect the performance of an out-of-order core.
+ const PolicyType TailPolicy = Agnostic;
+ const PolicyType MaskPolicy = Agnostic;
+
+public:
+ Policy() = default;
+ Policy(PolicyType TailPolicy) : TailPolicy(TailPolicy) {}
+ Policy(PolicyType TailPolicy, PolicyType MaskPolicy)
+ : TailPolicy(TailPolicy), MaskPolicy(MaskPolicy) {}
bool isTAMAPolicy() const {
return TailPolicy == Agnostic && MaskPolicy == Agnostic;
@@ -136,17 +135,8 @@ struct Policy {
bool isMUPolicy() const { return MaskPolicy == Undisturbed; }
- bool hasTailPolicy() const { return HasTailPolicy; }
-
- bool hasMaskPolicy() const { return HasMaskPolicy; }
-
- bool isUnspecified() const { return IsUnspecified; }
-
bool operator==(const Policy &Other) const {
- return IsUnspecified == Other.IsUnspecified &&
- TailPolicy == Other.TailPolicy && MaskPolicy == Other.MaskPolicy &&
- HasTailPolicy == Other.HasTailPolicy &&
- HasMaskPolicy == Other.HasMaskPolicy;
+ return TailPolicy == Other.TailPolicy && MaskPolicy == Other.MaskPolicy;
}
bool operator!=(const Policy &Other) const { return !(*this == Other); }
@@ -422,7 +412,6 @@ public:
return IntrinsicTypes;
}
Policy getPolicyAttrs() const {
- assert(PolicyAttrs.IsUnspecified == false);
return PolicyAttrs;
}
unsigned getPolicyAttrsBits() const {
@@ -431,8 +420,6 @@ public:
// The 1st bit simulates the `vma` of RVV
// int PolicyAttrs = 0;
- assert(PolicyAttrs.IsUnspecified == false);
-
if (PolicyAttrs.isTUMAPolicy())
return 2;
if (PolicyAttrs.isTAMAPolicy())
@@ -459,8 +446,7 @@ public:
unsigned NF, PolicyScheme DefaultScheme,
Policy PolicyAttrs);
- static llvm::SmallVector<Policy>
- getSupportedUnMaskedPolicies(bool HasTailPolicy, bool HasMaskPolicy);
+ static llvm::SmallVector<Policy> getSupportedUnMaskedPolicies();
static llvm::SmallVector<Policy>
getSupportedMaskedPolicies(bool HasTailPolicy, bool HasMaskPolicy);
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningService.h b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningService.h
index a8cb15847b78..109cf049a652 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningService.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningService.h
@@ -35,9 +35,13 @@ enum class ScanningOutputFormat {
/// intermodule dependency information.
Make,
- /// This outputs the full module dependency graph suitable for use for
+ /// This outputs the full clang module dependency graph suitable for use for
/// explicitly building modules.
Full,
+
+ /// This outputs the dependency graph for standard c++ modules in P1689R5
+ /// format.
+ P1689,
};
/// The dependency scanning service contains shared configuration and state that
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
index 6af878dbda95..b1a4df141edc 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
@@ -67,6 +67,12 @@ struct FullDependenciesResult {
std::vector<ModuleDeps> DiscoveredModules;
};
+struct P1689Rule {
+ std::string PrimaryOutput;
+ std::optional<P1689ModuleInfo> Provides;
+ std::vector<P1689ModuleInfo> Requires;
+};
+
/// The high-level implementation of the dependency discovery tool that runs on
/// an individual worker thread.
class DependencyScanningTool {
@@ -87,9 +93,24 @@ public:
getDependencyFile(const std::vector<std::string> &CommandLine, StringRef CWD,
std::optional<StringRef> ModuleName = std::nullopt);
- /// Collect the full module dependency graph for the input, ignoring any
- /// modules which have already been seen. If \p ModuleName isn't empty, this
- /// function returns the full dependency information of module \p ModuleName.
+ /// Collect the module dependency in P1689 format for C++20 named modules.
+ ///
+ /// \param MakeformatOutput The output parameter for dependency information
+ /// in make format if the command line requires to generate make-format
+ /// dependency information by `-MD -MF <dep_file>`.
+ ///
+ /// \param MakeformatOutputPath The output parameter for the path to
+ /// \param MakeformatOutput.
+ ///
+ /// \returns A \c StringError with the diagnostic output if clang errors
+ /// occurred, P1689 dependency format rules otherwise.
+ llvm::Expected<P1689Rule>
+ getP1689ModuleDependencyFile(
+ const clang::tooling::CompileCommand &Command, StringRef CWD,
+ std::string &MakeformatOutput, std::string &MakeformatOutputPath);
+
+ /// Given a Clang driver command-line for a translation unit, gather the
+ /// modular dependencies and return the information needed for explicit build.
///
/// \param AlreadySeen This stores modules which have previously been
/// reported. Use the same instance for all calls to this
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h
index 458c4d936c83..6edf2cbe6b53 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h
@@ -41,7 +41,11 @@ class DependencyConsumer {
public:
virtual ~DependencyConsumer() {}
- virtual void handleBuildCommand(Command Cmd) = 0;
+ virtual void handleProvidedAndRequiredStdCXXModules(
+ std::optional<P1689ModuleInfo> Provided,
+ std::vector<P1689ModuleInfo> Requires) {}
+
+ virtual void handleBuildCommand(Command Cmd) {}
virtual void
handleDependencyOutputOpts(const DependencyOutputOptions &Opts) = 0;
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h
index 6e4cec25961d..bce3e066372f 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h
@@ -62,6 +62,27 @@ struct ModuleID {
}
};
+/// P1689ModuleInfo - Represents the needed information of standard C++20
+/// modules for P1689 format.
+struct P1689ModuleInfo {
+ /// The name of the module. This may include `:` for partitions.
+ std::string ModuleName;
+
+ /// Optional. The source path to the module.
+ std::string SourcePath;
+
+ /// If this module is a standard c++ interface unit.
+ bool IsStdCXXModuleInterface = true;
+
+ enum class ModuleType {
+ NamedCXXModule
+ // To be supported
+ // AngleHeaderUnit,
+ // QuoteHeaderUnit
+ };
+ ModuleType Type = ModuleType::NamedCXXModule;
+};
+
/// An output from a module compilation, such as the path of the module file.
enum class ModuleOutputKind {
/// The module file (.pcm). Required.
@@ -181,7 +202,7 @@ public:
ModuleDepCollector(std::unique_ptr<DependencyOutputOptions> Opts,
CompilerInstance &ScanInstance, DependencyConsumer &C,
CompilerInvocation OriginalCI, bool OptimizeArgs,
- bool EagerLoadModules);
+ bool EagerLoadModules, bool IsStdModuleP1689Format);
void attachToPreprocessor(Preprocessor &PP) override;
void attachToASTReader(ASTReader &R) override;
@@ -219,6 +240,12 @@ private:
bool OptimizeArgs;
/// Whether to set up command-lines to load PCM files eagerly.
bool EagerLoadModules;
+ /// If we're generating dependency output in P1689 format
+ /// for standard C++ modules.
+ bool IsStdModuleP1689Format;
+
+ std::optional<P1689ModuleInfo> ProvidedStdCXXModule;
+ std::vector<P1689ModuleInfo> RequiredStdCXXModules;
/// Checks whether the module is known as being prebuilt.
bool isPrebuiltModule(const Module *M);
diff --git a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
index 0403165a8e55..464104139cb2 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
@@ -10916,7 +10916,7 @@ bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
for (unsigned I = OldElts; I < N; ++I)
Value->getArrayInitializedElt(I) = Filler;
- if (HasTrivialConstructor && N == FinalSize) {
+ if (HasTrivialConstructor && N == FinalSize && FinalSize != 1) {
// If we have a trivial constructor, only evaluate it once and copy
// the result into all the array elements.
APValue &FirstResult = Value->getArrayInitializedElt(0);
diff --git a/contrib/llvm-project/clang/lib/AST/Type.cpp b/contrib/llvm-project/clang/lib/AST/Type.cpp
index a713d6e3bd03..54e62a1939f7 100644
--- a/contrib/llvm-project/clang/lib/AST/Type.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Type.cpp
@@ -2487,11 +2487,13 @@ bool QualType::isTrivialType(const ASTContext &Context) const {
return true;
if (const auto *RT = CanonicalType->getAs<RecordType>()) {
if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- // C++11 [class]p6:
- // A trivial class is a class that has a default constructor,
- // has no non-trivial default constructors, and is trivially
- // copyable.
- return ClassDecl->hasDefaultConstructor() &&
+ // C++20 [class]p6:
+ // A trivial class is a class that is trivially copyable, and
+ // has one or more eligible default constructors such that each is
+ // trivial.
+ // FIXME: We should merge this definition of triviality into
+ // CXXRecordDecl::isTrivial. Currently it computes the wrong thing.
+ return ClassDecl->hasTrivialDefaultConstructor() &&
!ClassDecl->hasNonTrivialDefaultConstructor() &&
ClassDecl->isTriviallyCopyable();
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
index dfed95f0513f..997398da7972 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
@@ -687,9 +687,13 @@ void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
if (ArchInfo == llvm::AArch64::INVALID)
return; // Not an architecure, nothing more to do.
+ // Disabling an architecture feature does not affect dependent features
+ if (!Enabled)
+ return;
+
for (const auto *OtherArch : llvm::AArch64::ArchInfos)
if (ArchInfo.implies(*OtherArch))
- Features[OtherArch->getSubArch()] = Enabled;
+ Features[OtherArch->getSubArch()] = true;
// Set any features implied by the architecture
uint64_t Extensions =
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
index f11751a76073..b85d5dc2d347 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
@@ -254,6 +254,7 @@ ARMTargetInfo::ARMTargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: TargetInfo(Triple), FPMath(FP_Default), IsAAPCS(true), LDREX(0),
HW_FP(0) {
+ bool IsFreeBSD = Triple.isOSFreeBSD();
bool IsOpenBSD = Triple.isOSOpenBSD();
bool IsNetBSD = Triple.isOSNetBSD();
@@ -321,7 +322,7 @@ ARMTargetInfo::ARMTargetInfo(const llvm::Triple &Triple,
default:
if (IsNetBSD)
setABI("apcs-gnu");
- else if (IsOpenBSD)
+ else if (IsFreeBSD || IsOpenBSD)
setABI("aapcs-linux");
else
setABI("aapcs");
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
index 6951119e729c..c8197154fff7 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
@@ -426,12 +426,12 @@ public:
} else if ((Triple.getArch() == llvm::Triple::ppc64le)) {
DataLayout = "e-m:e-i64:64-n32:64";
ABI = "elfv2";
- } else if (Triple.isOSFreeBSD() && (Triple.getOSMajorVersion() == 0 || Triple.getOSMajorVersion() >= 13)) {
- DataLayout = "E-m:e-i64:64-n32:64";
- ABI = "elfv2";
} else {
DataLayout = "E-m:e-i64:64-n32:64";
- ABI = "elfv1";
+ if (Triple.isPPC64ELFv2ABI())
+ ABI = "elfv2";
+ else
+ ABI = "elfv1";
}
if (Triple.isOSFreeBSD() || Triple.isOSOpenBSD() || Triple.isMusl()) {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
index 25fda05da033..7c801657b6ac 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
@@ -197,8 +197,8 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
if (ISAInfo->hasExtension("zve32x")) {
Builder.defineMacro("__riscv_vector");
- // Currently we support the v0.10 RISC-V V intrinsics.
- Builder.defineMacro("__riscv_v_intrinsic", Twine(getVersionValue(0, 10)));
+ // Currently we support the v0.11 RISC-V V intrinsics.
+ Builder.defineMacro("__riscv_v_intrinsic", Twine(getVersionValue(0, 11)));
}
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp
index cb3171227530..490e20ce4514 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp
@@ -795,13 +795,13 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasHRESET)
Builder.defineMacro("__HRESET__");
if (HasAMXTILE)
- Builder.defineMacro("__AMXTILE__");
+ Builder.defineMacro("__AMX_TILE__");
if (HasAMXINT8)
- Builder.defineMacro("__AMXINT8__");
+ Builder.defineMacro("__AMX_INT8__");
if (HasAMXBF16)
- Builder.defineMacro("__AMXBF16__");
+ Builder.defineMacro("__AMX_BF16__");
if (HasAMXFP16)
- Builder.defineMacro("__AMXFP16__");
+ Builder.defineMacro("__AMX_FP16__");
if (HasCMPCCXADD)
Builder.defineMacro("__CMPCCXADD__");
if (HasRAOINT)
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
index a600768b2074..78646996eac2 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
@@ -379,9 +379,8 @@ public:
/// zero if no specific type is applicable, e.g. if the ABI expects the "this"
/// parameter to point to some artificial offset in a complete object due to
/// vbases being reordered.
- virtual const CXXRecordDecl *
- getThisArgumentTypeForMethod(const CXXMethodDecl *MD) {
- return MD->getParent();
+ virtual const CXXRecordDecl *getThisArgumentTypeForMethod(GlobalDecl GD) {
+ return cast<CXXMethodDecl>(GD.getDecl())->getParent();
}
/// Perform ABI-specific "this" argument adjustment required prior to
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
index dfa552161d7c..ee5b76ab2120 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
@@ -322,7 +322,9 @@ CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
SmallVector<CanQualType, 16> argTypes;
SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
- argTypes.push_back(DeriveThisType(MD->getParent(), MD));
+
+ const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(GD);
+ argTypes.push_back(DeriveThisType(ThisType, MD));
bool PassParams = true;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
index b889a4e05ee1..a9f3434589f2 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
@@ -33,10 +33,12 @@ struct MemberCallInfo {
}
static MemberCallInfo
-commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
+commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, GlobalDecl GD,
llvm::Value *This, llvm::Value *ImplicitParam,
QualType ImplicitParamTy, const CallExpr *CE,
CallArgList &Args, CallArgList *RtlArgs) {
+ auto *MD = cast<CXXMethodDecl>(GD.getDecl());
+
assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
isa<CXXOperatorCallExpr>(CE));
assert(MD->isInstance() &&
@@ -44,7 +46,7 @@ commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
// Push the this ptr.
const CXXRecordDecl *RD =
- CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(MD);
+ CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(GD);
Args.add(RValue::get(This), CGF.getTypes().DeriveThisType(RD, MD));
// If there is an implicit parameter (e.g. VTT), emit it.
@@ -110,7 +112,7 @@ RValue CodeGenFunction::EmitCXXDestructorCall(
}
CallArgList Args;
- commonEmitCXXMemberOrOperatorCall(*this, DtorDecl, This, ImplicitParam,
+ commonEmitCXXMemberOrOperatorCall(*this, Dtor, This, ImplicitParam,
ImplicitParamTy, CE, Args, nullptr);
return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee,
ReturnValueSlot(), Args, nullptr, CE && CE == MustTailCall,
@@ -285,7 +287,8 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
assert(ReturnValue.isNull() && "Constructor shouldn't have return value");
CallArgList Args;
commonEmitCXXMemberOrOperatorCall(
- *this, Ctor, This.getPointer(*this), /*ImplicitParam=*/nullptr,
+ *this, {Ctor, Ctor_Complete}, This.getPointer(*this),
+ /*ImplicitParam=*/nullptr,
/*ImplicitParamTy=*/QualType(), CE, Args, nullptr);
EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index ae785cce09f9..52d442cc587f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -235,11 +235,24 @@ public:
void EmitCXXDestructors(const CXXDestructorDecl *D) override;
- const CXXRecordDecl *
- getThisArgumentTypeForMethod(const CXXMethodDecl *MD) override {
- if (MD->isVirtual() && !isa<CXXDestructorDecl>(MD)) {
+ const CXXRecordDecl *getThisArgumentTypeForMethod(GlobalDecl GD) override {
+ auto *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ if (MD->isVirtual()) {
+ GlobalDecl LookupGD = GD;
+ if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ // Complete dtors take a pointer to the complete object,
+ // thus don't need adjustment.
+ if (GD.getDtorType() == Dtor_Complete)
+ return MD->getParent();
+
+ // There's only Dtor_Deleting in vftable but it shares the this
+ // adjustment with the base one, so look up the deleting one instead.
+ LookupGD = GlobalDecl(DD, Dtor_Deleting);
+ }
MethodVFTableLocation ML =
- CGM.getMicrosoftVTableContext().getMethodVFTableLocation(MD);
+ CGM.getMicrosoftVTableContext().getMethodVFTableLocation(LookupGD);
+
// The vbases might be ordered differently in the final overrider object
// and the complete object, so the "this" argument may sometimes point to
// memory that has no particular type (e.g. past the complete object).
diff --git a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
index be1dbe8480c6..9e2d7a85d100 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
@@ -2381,9 +2381,7 @@ class X86_64ABIInfo : public ABIInfo {
return false;
const llvm::Triple &Triple = getTarget().getTriple();
- if (Triple.isOSDarwin() || Triple.isPS())
- return false;
- if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
+ if (Triple.isOSDarwin() || Triple.isPS() || Triple.isOSFreeBSD())
return false;
return true;
}
@@ -7418,18 +7416,28 @@ public:
};
class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
+ ASTContext &Ctx;
+
+ const SystemZABIInfo &getABIInfo() const {
+ return static_cast<const SystemZABIInfo&>(TargetCodeGenInfo::getABIInfo());
+ }
+
// These are used for speeding up the search for a visible vector ABI.
mutable bool HasVisibleVecABIFlag = false;
mutable std::set<const Type *> SeenTypes;
- // Returns true (the first time) if Ty is or found to make use of a vector
- // type (e.g. as a function argument).
- bool isVectorTypeBased(const Type *Ty) const;
+ // Returns true (the first time) if Ty is, or is found to include, a vector
+ // type that exposes the vector ABI. This is any vector >=16 bytes which
+ // with vector support are aligned to only 8 bytes. When IsParam is true,
+ // the type belongs to a value as passed between functions. If it is a
+ // vector <=16 bytes it will be passed in a vector register (if supported).
+ bool isVectorTypeBased(const Type *Ty, bool IsParam) const;
public:
SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector, bool SoftFloatABI)
: TargetCodeGenInfo(
- std::make_unique<SystemZABIInfo>(CGT, HasVector, SoftFloatABI)) {
+ std::make_unique<SystemZABIInfo>(CGT, HasVector, SoftFloatABI)),
+ Ctx(CGT.getContext()) {
SwiftInfo =
std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/false);
}
@@ -7439,9 +7447,9 @@ public:
// indicating a visible vector ABI is added. Eventually this will result in
// a GNU attribute indicating the vector ABI of the module. Ty is the type
// of a variable or function parameter that is globally visible.
- void handleExternallyVisibleObjABI(const Type *Ty,
- CodeGen::CodeGenModule &M) const {
- if (!HasVisibleVecABIFlag && isVectorTypeBased(Ty)) {
+ void handleExternallyVisibleObjABI(const Type *Ty, CodeGen::CodeGenModule &M,
+ bool IsParam) const {
+ if (!HasVisibleVecABIFlag && isVectorTypeBased(Ty, IsParam)) {
M.getModule().addModuleFlag(llvm::Module::Warning,
"s390x-visible-vector-ABI", 1);
HasVisibleVecABIFlag = true;
@@ -7457,11 +7465,13 @@ public:
// variable or function.
if (const auto *VD = dyn_cast<VarDecl>(D)) {
if (VD->isExternallyVisible())
- handleExternallyVisibleObjABI(VD->getType().getTypePtr(), M);
+ handleExternallyVisibleObjABI(VD->getType().getTypePtr(), M,
+ /*IsParam*/false);
}
else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->isExternallyVisible())
- handleExternallyVisibleObjABI(FD->getType().getTypePtr(), M);
+ handleExternallyVisibleObjABI(FD->getType().getTypePtr(), M,
+ /*IsParam*/false);
}
}
@@ -7571,17 +7581,18 @@ QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
- for (const auto &I : CXXRD->bases()) {
- QualType Base = I.getType();
+ if (CXXRD->hasDefinition())
+ for (const auto &I : CXXRD->bases()) {
+ QualType Base = I.getType();
- // Empty bases don't affect things either way.
- if (isEmptyRecord(getContext(), Base, true))
- continue;
+ // Empty bases don't affect things either way.
+ if (isEmptyRecord(getContext(), Base, true))
+ continue;
- if (!Found.isNull())
- return Ty;
- Found = GetSingleElementType(Base);
- }
+ if (!Found.isNull())
+ return Ty;
+ Found = GetSingleElementType(Base);
+ }
// Check the fields.
for (const auto *FD : RD->fields()) {
@@ -7635,7 +7646,8 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
bool IsVector = false;
CharUnits UnpaddedSize;
CharUnits DirectAlign;
- SZCGI.handleExternallyVisibleObjABI(Ty.getTypePtr(), CGT.getCGM());
+ SZCGI.handleExternallyVisibleObjABI(Ty.getTypePtr(), CGT.getCGM(),
+ /*IsParam*/true);
if (IsIndirect) {
DirectTy = llvm::PointerType::getUnqual(DirectTy);
UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
@@ -7843,35 +7855,57 @@ void SystemZABIInfo::computeInfo(CGFunctionInfo &FI) const {
// Check if a vararg vector argument is passed, in which case the
// vector ABI becomes visible as the va_list could be passed on to
// other functions.
- SZCGI.handleExternallyVisibleObjABI(I.type.getTypePtr(), CGT.getCGM());
+ SZCGI.handleExternallyVisibleObjABI(I.type.getTypePtr(), CGT.getCGM(),
+ /*IsParam*/true);
}
}
-bool SystemZTargetCodeGenInfo::isVectorTypeBased(const Type *Ty) const {
- while (Ty->isPointerType() || Ty->isArrayType())
- Ty = Ty->getPointeeOrArrayElementType();
+bool SystemZTargetCodeGenInfo::isVectorTypeBased(const Type *Ty,
+ bool IsParam) const {
if (!SeenTypes.insert(Ty).second)
return false;
- if (Ty->isVectorType())
- return true;
+
+ if (IsParam) {
+ // A narrow (<16 bytes) vector will as a parameter also expose the ABI as
+ // it will be passed in a vector register. A wide (>16 bytes) vector will
+ // be passed via "hidden" pointer where any extra alignment is not
+ // required (per GCC).
+ const Type *SingleEltTy =
+ getABIInfo().GetSingleElementType(QualType(Ty, 0)).getTypePtr();
+ bool SingleVecEltStruct = SingleEltTy != Ty && SingleEltTy->isVectorType() &&
+ Ctx.getTypeSize(SingleEltTy) == Ctx.getTypeSize(Ty);
+ if (Ty->isVectorType() || SingleVecEltStruct)
+ return Ctx.getTypeSize(Ty) / 8 <= 16;
+ }
+
+ // Assume pointers are dereferenced.
+ while (Ty->isPointerType() || Ty->isArrayType())
+ Ty = Ty->getPointeeOrArrayElementType();
+
+ // Vectors >= 16 bytes expose the ABI through alignment requirements.
+ if (Ty->isVectorType() && Ctx.getTypeSize(Ty) / 8 >= 16)
+ return true;
+
if (const auto *RecordTy = Ty->getAs<RecordType>()) {
const RecordDecl *RD = RecordTy->getDecl();
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
if (CXXRD->hasDefinition())
for (const auto &I : CXXRD->bases())
- if (isVectorTypeBased(I.getType().getTypePtr()))
+ if (isVectorTypeBased(I.getType().getTypePtr(), /*IsParam*/false))
return true;
for (const auto *FD : RD->fields())
- if (isVectorTypeBased(FD->getType().getTypePtr()))
+ if (isVectorTypeBased(FD->getType().getTypePtr(), /*IsParam*/false))
return true;
}
+
if (const auto *FT = Ty->getAs<FunctionType>())
- if (isVectorTypeBased(FT->getReturnType().getTypePtr()))
+ if (isVectorTypeBased(FT->getReturnType().getTypePtr(), /*IsParam*/true))
return true;
if (const FunctionProtoType *Proto = Ty->getAs<FunctionProtoType>())
for (auto ParamType : Proto->getParamTypes())
- if (isVectorTypeBased(ParamType.getTypePtr()))
+ if (isVectorTypeBased(ParamType.getTypePtr(), /*IsParam*/true))
return true;
+
return false;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/Driver.cpp b/contrib/llvm-project/clang/lib/Driver/Driver.cpp
index 661d9977fbc5..a268f2fa8fc5 100644
--- a/contrib/llvm-project/clang/lib/Driver/Driver.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Driver.cpp
@@ -1873,14 +1873,12 @@ int Driver::ExecuteCompilation(
C.CleanupFileMap(C.getFailureResultFiles(), JA, true);
}
-#if LLVM_ON_UNIX
- // llvm/lib/Support/Unix/Signals.inc will exit with a special return code
+ // llvm/lib/Support/*/Signals.inc will exit with a special return code
// for SIGPIPE. Do not print diagnostics for this case.
if (CommandRes == EX_IOERR) {
Res = CommandRes;
continue;
}
-#endif
// Print extra information about abnormal failures, if possible.
//
diff --git a/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp b/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
index 52bee6a755ff..068a34a54a92 100644
--- a/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
@@ -19,6 +19,7 @@
#include "llvm/Support/SpecialCaseList.h"
#include "llvm/Support/TargetParser.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/RISCVTargetParser.h"
#include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h"
#include <memory>
@@ -545,7 +546,8 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
if ((Kinds & SanitizerKind::ShadowCallStack) &&
((TC.getTriple().isAArch64() &&
!llvm::AArch64::isX18ReservedByDefault(TC.getTriple())) ||
- TC.getTriple().isRISCV()) &&
+ (TC.getTriple().isRISCV() &&
+ !llvm::RISCV::isX18ReservedByDefault(TC.getTriple()))) &&
!Args.hasArg(options::OPT_ffixed_x18) && DiagnoseErrors) {
D.Diag(diag::err_drv_argument_only_allowed_with)
<< lastArgumentForMask(D, Args, Kinds & SanitizerKind::ShadowCallStack)
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp
index 6ec736bc701b..e3c025fb2468 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp
@@ -34,53 +34,60 @@ static std::string getPPCGenericTargetCPU(const llvm::Triple &T) {
return "ppc";
}
-/// getPPCTargetCPU - Get the (LLVM) name of the PowerPC cpu we are targeting.
-std::string ppc::getPPCTargetCPU(const ArgList &Args, const llvm::Triple &T) {
- if (Arg *A = Args.getLastArg(clang::driver::options::OPT_mcpu_EQ)) {
- StringRef CPUName = A->getValue();
-
- // Clang/LLVM does not actually support code generation
- // for the 405 CPU. However, there are uses of this CPU ID
- // in projects that previously used GCC and rely on Clang
- // accepting it. Clang has always ignored it and passed the
- // generic CPU ID to the back end.
- if (CPUName == "generic" || CPUName == "405")
+static std::string normalizeCPUName(StringRef CPUName, const llvm::Triple &T) {
+ // Clang/LLVM does not actually support code generation
+ // for the 405 CPU. However, there are uses of this CPU ID
+ // in projects that previously used GCC and rely on Clang
+ // accepting it. Clang has always ignored it and passed the
+ // generic CPU ID to the back end.
+ if (CPUName == "generic" || CPUName == "405")
+ return getPPCGenericTargetCPU(T);
+
+ if (CPUName == "native") {
+ std::string CPU = std::string(llvm::sys::getHostCPUName());
+ if (!CPU.empty() && CPU != "generic")
+ return CPU;
+ else
return getPPCGenericTargetCPU(T);
+ }
- if (CPUName == "native") {
- std::string CPU = std::string(llvm::sys::getHostCPUName());
- if (!CPU.empty() && CPU != "generic")
- return CPU;
- else
- return getPPCGenericTargetCPU(T);
- }
+ return llvm::StringSwitch<const char *>(CPUName)
+ .Case("common", "generic")
+ .Case("440fp", "440")
+ .Case("630", "pwr3")
+ .Case("G3", "g3")
+ .Case("G4", "g4")
+ .Case("G4+", "g4+")
+ .Case("8548", "e500")
+ .Case("G5", "g5")
+ .Case("power3", "pwr3")
+ .Case("power4", "pwr4")
+ .Case("power5", "pwr5")
+ .Case("power5x", "pwr5x")
+ .Case("power6", "pwr6")
+ .Case("power6x", "pwr6x")
+ .Case("power7", "pwr7")
+ .Case("power8", "pwr8")
+ .Case("power9", "pwr9")
+ .Case("power10", "pwr10")
+ .Case("future", "future")
+ .Case("powerpc", "ppc")
+ .Case("powerpc64", "ppc64")
+ .Case("powerpc64le", "ppc64le")
+ .Default(CPUName.data());
+}
- return llvm::StringSwitch<const char *>(CPUName)
- .Case("common", "generic")
- .Case("440fp", "440")
- .Case("630", "pwr3")
- .Case("G3", "g3")
- .Case("G4", "g4")
- .Case("G4+", "g4+")
- .Case("8548", "e500")
- .Case("G5", "g5")
- .Case("power3", "pwr3")
- .Case("power4", "pwr4")
- .Case("power5", "pwr5")
- .Case("power5x", "pwr5x")
- .Case("power6", "pwr6")
- .Case("power6x", "pwr6x")
- .Case("power7", "pwr7")
- .Case("power8", "pwr8")
- .Case("power9", "pwr9")
- .Case("power10", "pwr10")
- .Case("future", "future")
- .Case("powerpc", "ppc")
- .Case("powerpc64", "ppc64")
- .Case("powerpc64le", "ppc64le")
- .Default(CPUName.data());
- }
+/// Get the (LLVM) name of the PowerPC cpu we are tuning for.
+std::string ppc::getPPCTuneCPU(const ArgList &Args, const llvm::Triple &T) {
+ if (Arg *A = Args.getLastArg(clang::driver::options::OPT_mtune_EQ))
+ return normalizeCPUName(A->getValue(), T);
+ return getPPCGenericTargetCPU(T);
+}
+/// Get the (LLVM) name of the PowerPC cpu we are targeting.
+std::string ppc::getPPCTargetCPU(const ArgList &Args, const llvm::Triple &T) {
+ if (Arg *A = Args.getLastArg(clang::driver::options::OPT_mcpu_EQ))
+ return normalizeCPUName(A->getValue(), T);
return getPPCGenericTargetCPU(T);
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.h
index cd2b47d392b6..97ac45083852 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.h
@@ -37,6 +37,8 @@ FloatABI getPPCFloatABI(const Driver &D, const llvm::opt::ArgList &Args);
std::string getPPCTargetCPU(const llvm::opt::ArgList &Args,
const llvm::Triple &T);
+std::string getPPCTuneCPU(const llvm::opt::ArgList &Args,
+ const llvm::Triple &T);
const char *getPPCAsmModeForCPU(StringRef Name);
ReadGOTPtrMode getPPCReadGOTPtrMode(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
index 3d40e19c83a5..ec6860113b7e 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
@@ -1989,22 +1989,19 @@ void Clang::AddMIPSTargetArgs(const ArgList &Args,
void Clang::AddPPCTargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
+ const llvm::Triple &T = getToolChain().getTriple();
if (const Arg *A = Args.getLastArg(options::OPT_mtune_EQ)) {
CmdArgs.push_back("-tune-cpu");
- if (strcmp(A->getValue(), "native") == 0)
- CmdArgs.push_back(Args.MakeArgString(llvm::sys::getHostCPUName()));
- else
- CmdArgs.push_back(A->getValue());
+ std::string CPU = ppc::getPPCTuneCPU(Args, T);
+ CmdArgs.push_back(Args.MakeArgString(CPU));
}
// Select the ABI to use.
const char *ABIName = nullptr;
- const llvm::Triple &T = getToolChain().getTriple();
if (T.isOSBinFormatELF()) {
switch (getToolChain().getArch()) {
case llvm::Triple::ppc64: {
- if ((T.isOSFreeBSD() && T.getOSMajorVersion() >= 13) ||
- T.isOSOpenBSD() || T.isMusl())
+ if (T.isPPC64ELFv2ABI())
ABIName = "elfv2";
else
ABIName = "elfv1";
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
index 484c8c070264..aa125bb308e8 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
@@ -443,24 +443,19 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("--gpu-name");
CmdArgs.push_back(Args.MakeArgString(CudaArchToString(gpu_arch)));
CmdArgs.push_back("--output-file");
- const char *OutputFileName = Args.MakeArgString(TC.getInputFilename(Output));
+ std::string OutputFileName = TC.getInputFilename(Output);
// If we are invoking `nvlink` internally we need to output a `.cubin` file.
- // Checking if the output is a temporary is the cleanest way to determine
- // this. Putting this logic in `getInputFilename` isn't an option because it
- // relies on the compilation.
// FIXME: This should hopefully be removed if NVIDIA updates their tooling.
- if (Output.isFilename() &&
- llvm::find(C.getTempFiles(), Output.getFilename()) !=
- C.getTempFiles().end()) {
+ if (!C.getInputArgs().getLastArg(options::OPT_c)) {
SmallString<256> Filename(Output.getFilename());
llvm::sys::path::replace_extension(Filename, "cubin");
- OutputFileName = Args.MakeArgString(Filename);
+ OutputFileName = Filename.str();
}
if (Output.isFilename() && OutputFileName != Output.getFilename())
- C.addTempFile(OutputFileName);
+ C.addTempFile(Args.MakeArgString(OutputFileName));
- CmdArgs.push_back(OutputFileName);
+ CmdArgs.push_back(Args.MakeArgString(OutputFileName));
for (const auto &II : Inputs)
CmdArgs.push_back(Args.MakeArgString(II.getFilename()));
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
index 64935227b07e..2230295ccd74 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
@@ -85,16 +85,7 @@ void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
else
CmdArgs.push_back("-mfpu=softvfp");
- switch (getToolChain().getTriple().getEnvironment()) {
- case llvm::Triple::GNUEABIHF:
- case llvm::Triple::GNUEABI:
- case llvm::Triple::EABI:
- CmdArgs.push_back("-meabi=5");
- break;
-
- default:
- CmdArgs.push_back("-matpcs");
- }
+ CmdArgs.push_back("-meabi=5");
break;
}
case llvm::Triple::sparc:
@@ -176,10 +167,8 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("/libexec/ld-elf.so.1");
}
const llvm::Triple &T = ToolChain.getTriple();
- if (T.getOSMajorVersion() >= 9) {
- if (Arch == llvm::Triple::arm || Arch == llvm::Triple::sparc || T.isX86())
- CmdArgs.push_back("--hash-style=both");
- }
+ if (Arch == llvm::Triple::arm || Arch == llvm::Triple::sparc || T.isX86())
+ CmdArgs.push_back("--hash-style=both");
CmdArgs.push_back("--enable-new-dtags");
}
@@ -396,17 +385,11 @@ FreeBSD::FreeBSD(const Driver &D, const llvm::Triple &Triple,
getFilePaths().push_back(concat(getDriver().SysRoot, "/usr/lib"));
}
-ToolChain::CXXStdlibType FreeBSD::GetDefaultCXXStdlibType() const {
- unsigned Major = getTriple().getOSMajorVersion();
- if (Major >= 10 || Major == 0)
- return ToolChain::CST_Libcxx;
- return ToolChain::CST_Libstdcxx;
-}
-
unsigned FreeBSD::GetDefaultDwarfVersion() const {
- if (getTriple().getOSMajorVersion() < 12)
- return 2;
- return 4;
+ unsigned Major = getTriple().getOSMajorVersion();
+ if (Major >= 12 || Major == 0)
+ return 4;
+ return 2;
}
void FreeBSD::AddClangSystemIncludeArgs(
@@ -449,30 +432,14 @@ void FreeBSD::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
concat(getDriver().SysRoot, "/usr/include/c++/v1"));
}
-void FreeBSD::addLibStdCxxIncludePaths(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const {
- addLibStdCXXIncludePaths(concat(getDriver().SysRoot, "/usr/include/c++/4.2"),
- "", "", DriverArgs, CC1Args);
-}
-
void FreeBSD::AddCXXStdlibLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
- CXXStdlibType Type = GetCXXStdlibType(Args);
unsigned Major = getTriple().getOSMajorVersion();
bool Profiling = Args.hasArg(options::OPT_pg) && Major != 0 && Major < 14;
- switch (Type) {
- case ToolChain::CST_Libcxx:
- CmdArgs.push_back(Profiling ? "-lc++_p" : "-lc++");
- if (Args.hasArg(options::OPT_fexperimental_library))
- CmdArgs.push_back("-lc++experimental");
- break;
-
- case ToolChain::CST_Libstdcxx:
- CmdArgs.push_back(Profiling ? "-lstdc++_p" : "-lstdc++");
- break;
- }
+ CmdArgs.push_back(Profiling ? "-lc++_p" : "-lc++");
+ if (Args.hasArg(options::OPT_fexperimental_library))
+ CmdArgs.push_back("-lc++experimental");
}
void FreeBSD::AddCudaIncludeArgs(const ArgList &DriverArgs,
@@ -491,21 +458,6 @@ Tool *FreeBSD::buildAssembler() const {
Tool *FreeBSD::buildLinker() const { return new tools::freebsd::Linker(*this); }
-llvm::ExceptionHandling FreeBSD::GetExceptionModel(const ArgList &Args) const {
- // FreeBSD uses SjLj exceptions on ARM oabi.
- switch (getTriple().getEnvironment()) {
- case llvm::Triple::GNUEABIHF:
- case llvm::Triple::GNUEABI:
- case llvm::Triple::EABI:
- return llvm::ExceptionHandling::None;
- default:
- if (getTriple().getArch() == llvm::Triple::arm ||
- getTriple().getArch() == llvm::Triple::thumb)
- return llvm::ExceptionHandling::SjLj;
- return llvm::ExceptionHandling::None;
- }
-}
-
bool FreeBSD::HasNativeLLVMSupport() const { return true; }
ToolChain::UnwindTableLevel
@@ -550,8 +502,9 @@ SanitizerMask FreeBSD::getSupportedSanitizers() const {
void FreeBSD::addClangTargetOptions(const ArgList &DriverArgs,
ArgStringList &CC1Args,
Action::OffloadKind) const {
+ unsigned Major = getTriple().getOSMajorVersion();
if (!DriverArgs.hasFlag(options::OPT_fuse_init_array,
options::OPT_fno_use_init_array,
- getTriple().getOSMajorVersion() >= 12))
+ (Major >= 12 || Major == 0)))
CC1Args.push_back("-fno-use-init-array");
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.h
index 18832dad9884..cec67d84a2ce 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.h
@@ -61,12 +61,16 @@ public:
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- CXXStdlibType GetDefaultCXXStdlibType() const override;
+
+ RuntimeLibType GetDefaultRuntimeLibType() const override {
+ return ToolChain::RLT_CompilerRT;
+ }
+ CXXStdlibType GetDefaultCXXStdlibType() const override {
+ return ToolChain::CST_Libcxx;
+ }
+
void addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- void
- addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override;
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
@@ -74,8 +78,6 @@ public:
void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- llvm::ExceptionHandling
- GetExceptionModel(const llvm::opt::ArgList &Args) const override;
UnwindTableLevel
getDefaultUnwindTableLevel(const llvm::opt::ArgList &Args) const override;
bool isPIEDefault(const llvm::opt::ArgList &Args) const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
index 9bdc3a791779..ea6d7d697770 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
@@ -447,13 +447,13 @@ SanitizerMask Fuchsia::getDefaultSanitizers() const {
SanitizerMask Res;
switch (getTriple().getArch()) {
case llvm::Triple::aarch64:
+ case llvm::Triple::riscv64:
Res |= SanitizerKind::ShadowCallStack;
break;
case llvm::Triple::x86_64:
Res |= SanitizerKind::SafeStack;
break;
default:
- // TODO: Enable SafeStack on RISC-V once tested.
break;
}
return Res;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
index 908484fcc0b8..bac486bab885 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
@@ -200,6 +200,16 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_u_Group);
Args.AddLastArg(CmdArgs, options::OPT_Z_Flag);
+ // Add asan_dynamic as the first import lib before other libs. This allows
+ // asan to be initialized as early as possible to increase its instrumentation
+ // coverage to include other user DLLs which has not been built with asan.
+ if (Sanitize.needsAsanRt() && !Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ // MinGW always links against a shared MSVCRT.
+ CmdArgs.push_back(
+ TC.getCompilerRTArgString(Args, "asan_dynamic", ToolChain::FT_Shared));
+ }
+
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_mdll)) {
CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("dllcrt2.o")));
diff --git a/contrib/llvm-project/clang/lib/Format/Format.cpp b/contrib/llvm-project/clang/lib/Format/Format.cpp
index f37c3f983635..0d3fde90ab38 100644
--- a/contrib/llvm-project/clang/lib/Format/Format.cpp
+++ b/contrib/llvm-project/clang/lib/Format/Format.cpp
@@ -348,8 +348,11 @@ struct ScalarEnumerationTraits<FormatStyle::IndentExternBlockStyle> {
template <> struct MappingTraits<FormatStyle::IntegerLiteralSeparatorStyle> {
static void mapping(IO &IO, FormatStyle::IntegerLiteralSeparatorStyle &Base) {
IO.mapOptional("Binary", Base.Binary);
+ IO.mapOptional("BinaryMinDigits", Base.BinaryMinDigits);
IO.mapOptional("Decimal", Base.Decimal);
+ IO.mapOptional("DecimalMinDigits", Base.DecimalMinDigits);
IO.mapOptional("Hex", Base.Hex);
+ IO.mapOptional("HexMinDigits", Base.HexMinDigits);
}
};
@@ -1392,7 +1395,10 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.InsertBraces = false;
LLVMStyle.InsertNewlineAtEOF = false;
LLVMStyle.InsertTrailingCommas = FormatStyle::TCS_None;
- LLVMStyle.IntegerLiteralSeparator = {/*Binary=*/0, /*Decimal=*/0, /*Hex=*/0};
+ LLVMStyle.IntegerLiteralSeparator = {
+ /*Binary=*/0, /*BinaryMinDigits=*/0,
+ /*Decimal=*/0, /*DecimalMinDigits=*/0,
+ /*Hex=*/0, /*HexMinDigits=*/0};
LLVMStyle.JavaScriptQuotes = FormatStyle::JSQS_Leave;
LLVMStyle.JavaScriptWrapImports = true;
LLVMStyle.KeepEmptyLinesAtTheStartOfBlocks = true;
diff --git a/contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp b/contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp
index ef07ab06760b..44034e44adec 100644
--- a/contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp
@@ -69,6 +69,12 @@ IntegerLiteralSeparatorFixer::process(const Environment &Env,
if (SkipBinary && SkipDecimal && SkipHex)
return {};
+ const auto BinaryMinDigits =
+ std::max((int)Option.BinaryMinDigits, Binary + 1);
+ const auto DecimalMinDigits =
+ std::max((int)Option.DecimalMinDigits, Decimal + 1);
+ const auto HexMinDigits = std::max((int)Option.HexMinDigits, Hex + 1);
+
const auto &SourceMgr = Env.getSourceManager();
AffectedRangeManager AffectedRangeMgr(SourceMgr, Env.getCharRanges());
@@ -106,17 +112,18 @@ IntegerLiteralSeparatorFixer::process(const Environment &Env,
(IsBase16 && SkipHex) || B == Base::Other) {
continue;
}
+ if (Style.isCpp()) {
+ if (const auto Pos = Text.find_first_of("_i"); Pos != StringRef::npos) {
+ Text = Text.substr(0, Pos);
+ Length = Pos;
+ }
+ }
if ((IsBase10 && Text.find_last_of(".eEfFdDmM") != StringRef::npos) ||
(IsBase16 && Text.find_last_of(".pP") != StringRef::npos)) {
continue;
}
- if (((IsBase2 && Binary < 0) || (IsBase10 && Decimal < 0) ||
- (IsBase16 && Hex < 0)) &&
- Text.find(Separator) == StringRef::npos) {
- continue;
- }
const auto Start = Text[0] == '0' ? 2 : 0;
- auto End = Text.find_first_of("uUlLzZn");
+ auto End = Text.find_first_of("uUlLzZn", Start);
if (End == StringRef::npos)
End = Length;
if (Start > 0 || End < Length) {
@@ -124,16 +131,30 @@ IntegerLiteralSeparatorFixer::process(const Environment &Env,
Text = Text.substr(Start, Length);
}
auto DigitsPerGroup = Decimal;
- if (IsBase2)
+ auto MinDigits = DecimalMinDigits;
+ if (IsBase2) {
DigitsPerGroup = Binary;
- else if (IsBase16)
+ MinDigits = BinaryMinDigits;
+ } else if (IsBase16) {
DigitsPerGroup = Hex;
- if (DigitsPerGroup > 0 && checkSeparator(Text, DigitsPerGroup))
+ MinDigits = HexMinDigits;
+ }
+ const auto SeparatorCount = Text.count(Separator);
+ const int DigitCount = Length - SeparatorCount;
+ const bool RemoveSeparator = DigitsPerGroup < 0 || DigitCount < MinDigits;
+ if (RemoveSeparator && SeparatorCount == 0)
continue;
+ if (!RemoveSeparator && SeparatorCount > 0 &&
+ checkSeparator(Text, DigitsPerGroup)) {
+ continue;
+ }
+ const auto &Formatted =
+ format(Text, DigitsPerGroup, DigitCount, RemoveSeparator);
+ assert(Formatted != Text);
if (Start > 0)
Location = Location.getLocWithOffset(Start);
- cantFail(Result.add(tooling::Replacement(SourceMgr, Location, Length,
- format(Text, DigitsPerGroup))));
+ cantFail(Result.add(
+ tooling::Replacement(SourceMgr, Location, Length, Formatted)));
}
return {Result, 0};
@@ -150,9 +171,9 @@ bool IntegerLiteralSeparatorFixer::checkSeparator(
return false;
I = 0;
} else {
- ++I;
if (I == DigitsPerGroup)
return false;
+ ++I;
}
}
@@ -160,23 +181,20 @@ bool IntegerLiteralSeparatorFixer::checkSeparator(
}
std::string IntegerLiteralSeparatorFixer::format(const StringRef IntegerLiteral,
- int DigitsPerGroup) const {
+ int DigitsPerGroup,
+ int DigitCount,
+ bool RemoveSeparator) const {
assert(DigitsPerGroup != 0);
std::string Formatted;
- if (DigitsPerGroup < 0) {
+ if (RemoveSeparator) {
for (auto C : IntegerLiteral)
if (C != Separator)
Formatted.push_back(C);
return Formatted;
}
- int DigitCount = 0;
- for (auto C : IntegerLiteral)
- if (C != Separator)
- ++DigitCount;
-
int Remainder = DigitCount % DigitsPerGroup;
int I = 0;
diff --git a/contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.h b/contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.h
index 156bf5c14fca..2c158e4473bf 100644
--- a/contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.h
+++ b/contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.h
@@ -27,7 +27,8 @@ public:
private:
bool checkSeparator(const StringRef IntegerLiteral, int DigitsPerGroup) const;
- std::string format(const StringRef IntegerLiteral, int DigitsPerGroup) const;
+ std::string format(const StringRef IntegerLiteral, int DigitsPerGroup,
+ int DigitCount, bool RemoveSeparator) const;
char Separator;
};
diff --git a/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp
index cef8b36ff758..609b412380f8 100644
--- a/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp
@@ -280,8 +280,11 @@ const FormatToken *LeftRightQualifierAlignmentFixer::analyzeRight(
// The case `const Foo &&` -> `Foo const &&`
// The case `const std::Foo &&` -> `std::Foo const &&`
// The case `const std::Foo<T> &&` -> `std::Foo<T> const &&`
- while (Next && Next->isOneOf(tok::identifier, tok::coloncolon))
+ // However, `const Bar::*` remains the same.
+ while (Next && Next->isOneOf(tok::identifier, tok::coloncolon) &&
+ !Next->startsSequence(tok::coloncolon, tok::star)) {
Next = Next->Next;
+ }
if (Next && Next->is(TT_TemplateOpener)) {
Next = Next->MatchingParen;
// Move to the end of any template class members e.g.
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
index 49c30ca78deb..ca651eaa9440 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
@@ -3833,6 +3833,9 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return true;
if (Style.isCpp()) {
+ // Space between UDL and dot: auto b = 4s .count();
+ if (Right.is(tok::period) && Left.is(tok::numeric_constant))
+ return true;
// Space between import <iostream>.
// or import .....;
if (Left.is(Keywords.kw_import) && Right.isOneOf(tok::less, tok::ellipsis))
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
index 3e58b6f90559..7a49b189b481 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
@@ -2739,16 +2739,17 @@ void UnwrappedLineParser::handleAttributes() {
// Handle AttributeMacro, e.g. `if (x) UNLIKELY`.
if (FormatTok->is(TT_AttributeMacro))
nextToken();
- handleCppAttributes();
+ if (FormatTok->is(tok::l_square))
+ handleCppAttributes();
}
bool UnwrappedLineParser::handleCppAttributes() {
// Handle [[likely]] / [[unlikely]] attributes.
- if (FormatTok->is(tok::l_square) && tryToParseSimpleAttribute()) {
- parseSquare();
- return true;
- }
- return false;
+ assert(FormatTok->is(tok::l_square));
+ if (!tryToParseSimpleAttribute())
+ return false;
+ parseSquare();
+ return true;
}
/// Returns whether \c Tok begins a block.
@@ -3855,7 +3856,7 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
// An [[attribute]] can be before the identifier.
while (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::hashhash,
tok::kw___attribute, tok::kw___declspec,
- tok::kw_alignas, tok::l_square, tok::r_square) ||
+ tok::kw_alignas, tok::l_square) ||
((Style.Language == FormatStyle::LK_Java || Style.isJavaScript()) &&
FormatTok->isOneOf(tok::period, tok::comma))) {
if (Style.isJavaScript() &&
@@ -3869,21 +3870,15 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
continue;
}
}
+ if (FormatTok->is(tok::l_square) && handleCppAttributes())
+ continue;
bool IsNonMacroIdentifier =
FormatTok->is(tok::identifier) &&
FormatTok->TokenText != FormatTok->TokenText.upper();
nextToken();
- // We can have macros or attributes in between 'class' and the class name.
- if (!IsNonMacroIdentifier) {
- if (FormatTok->is(tok::l_paren)) {
- parseParens();
- } else if (FormatTok->is(TT_AttributeSquare)) {
- parseSquare();
- // Consume the closing TT_AttributeSquare.
- if (FormatTok->Next && FormatTok->is(TT_AttributeSquare))
- nextToken();
- }
- }
+ // We can have macros in between 'class' and the class name.
+ if (!IsNonMacroIdentifier && FormatTok->is(tok::l_paren))
+ parseParens();
}
// Note that parsing away template declarations here leads to incorrectly
diff --git a/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp b/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
index ecc1c4cf51c1..ac9f8f8ed51c 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
@@ -1983,14 +1983,7 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
Module = PP->getHeaderSearchInfo().lookupModule(
ModuleName, ImportLoc, /*AllowSearch*/ true,
/*AllowExtraModuleMapSearch*/ !IsInclusionDirective);
- /// FIXME: perhaps we should (a) look for a module using the module name
- // to file map (PrebuiltModuleFiles) and (b) diagnose if still not found?
- //if (Module == nullptr) {
- // getDiagnostics().Report(ModuleNameLoc, diag::err_module_not_found)
- // << ModuleName;
- // DisableGeneratingGlobalModuleIndex = true;
- // return ModuleLoadResult();
- //}
+
MM.cacheModuleLoad(*Path[0].first, Module);
} else {
ModuleLoadResult Result = findOrCompileModuleAndReadAST(
diff --git a/contrib/llvm-project/clang/lib/Headers/immintrin.h b/contrib/llvm-project/clang/lib/Headers/immintrin.h
index 6967b46fdb24..0d2e8be6e486 100644
--- a/contrib/llvm-project/clang/lib/Headers/immintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/immintrin.h
@@ -524,7 +524,7 @@ _storebe_i64(void * __P, long long __D) {
#include <invpcidintrin.h>
#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AMXFP16__)
+ defined(__AMX_FP16__)
#include <amxfp16intrin.h>
#endif
@@ -534,7 +534,7 @@ _storebe_i64(void * __P, long long __D) {
#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AMXTILE__) || defined(__AMXINT8__) || defined(__AMXBF16__)
+ defined(__AMX_TILE__) || defined(__AMX_INT8__) || defined(__AMX_BF16__)
#include <amxintrin.h>
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/smmintrin.h b/contrib/llvm-project/clang/lib/Headers/smmintrin.h
index 2111c24f31a6..16d8855a1c0b 100644
--- a/contrib/llvm-project/clang/lib/Headers/smmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/smmintrin.h
@@ -1145,7 +1145,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_testnzc_si128(__m128i __M,
/// A 128-bit integer vector containing the bits to be tested.
/// \returns TRUE if the bits specified in the operand are all set to 1; FALSE
/// otherwise.
-#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V)))
+#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_set1_epi32(-1))
/// Tests whether the specified bits in a 128-bit integer vector are
/// neither all zeros nor all ones.
diff --git a/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp b/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
index bbc271e5611e..76d0d53ed31d 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
@@ -1637,35 +1637,14 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
Tok.setKind(tok::string_literal);
} else if (II == Ident__FLT_EVAL_METHOD__) {
// __FLT_EVAL_METHOD__ is set to the default value.
- if (getTUFPEvalMethod() ==
- LangOptions::FPEvalMethodKind::FEM_Indeterminable) {
- // This is possible if `AllowFPReassoc` or `AllowReciprocal` is enabled.
- // These modes can be triggered via the command line option `-ffast-math`
- // or via a `pragam float_control`.
- // __FLT_EVAL_METHOD__ expands to -1.
- // The `minus` operator is the next token we read from the stream.
- auto Toks = std::make_unique<Token[]>(1);
- OS << "-";
- Tok.setKind(tok::minus);
- // Push the token `1` to the stream.
- Token NumberToken;
- NumberToken.startToken();
- NumberToken.setKind(tok::numeric_constant);
- NumberToken.setLiteralData("1");
- NumberToken.setLength(1);
- Toks[0] = NumberToken;
- EnterTokenStream(std::move(Toks), 1, /*DisableMacroExpansion*/ false,
- /*IsReinject*/ false);
- } else {
- OS << getTUFPEvalMethod();
- // __FLT_EVAL_METHOD__ expands to a simple numeric value.
- Tok.setKind(tok::numeric_constant);
- if (getLastFPEvalPragmaLocation().isValid()) {
- // The program is ill-formed. The value of __FLT_EVAL_METHOD__ is
- // altered by the pragma.
- Diag(Tok, diag::err_illegal_use_of_flt_eval_macro);
- Diag(getLastFPEvalPragmaLocation(), diag::note_pragma_entered_here);
- }
+ OS << getTUFPEvalMethod();
+ // __FLT_EVAL_METHOD__ expands to a simple numeric value.
+ Tok.setKind(tok::numeric_constant);
+ if (getLastFPEvalPragmaLocation().isValid()) {
+ // The program is ill-formed. The value of __FLT_EVAL_METHOD__ is altered
+ // by the pragma.
+ Diag(Tok, diag::err_illegal_use_of_flt_eval_macro);
+ Diag(getLastFPEvalPragmaLocation(), diag::note_pragma_entered_here);
}
} else if (II == Ident__COUNTER__) {
// __COUNTER__ expands to a simple numeric value.
diff --git a/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp b/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
index fe9adb5685e3..0d411abf8f1c 100644
--- a/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
@@ -207,11 +207,6 @@ void Preprocessor::Initialize(const TargetInfo &Target,
else
// Set initial value of __FLT_EVAL_METHOD__ from the command line.
setCurrentFPEvalMethod(SourceLocation(), getLangOpts().getFPEvalMethod());
- // When `-ffast-math` option is enabled, it triggers several driver math
- // options to be enabled. Among those, only one the following two modes
- // affect the eval-method: reciprocal or reassociate.
- if (getLangOpts().AllowFPReassoc || getLangOpts().AllowRecip)
- setCurrentFPEvalMethod(SourceLocation(), LangOptions::FEM_Indeterminable);
}
void Preprocessor::InitializeForModelFile() {
@@ -873,6 +868,7 @@ bool Preprocessor::HandleIdentifier(Token &Identifier) {
CurLexerKind != CLK_CachingLexer) {
ModuleImportLoc = Identifier.getLocation();
NamedModuleImportPath.clear();
+ IsAtImport = true;
ModuleImportExpectsIdentifier = true;
CurLexerKind = CLK_LexAfterModuleImport;
}
@@ -940,6 +936,7 @@ void Preprocessor::Lex(Token &Result) {
case tok::semi:
TrackGMFState.handleSemi();
StdCXXImportSeqState.handleSemi();
+ ModuleDeclState.handleSemi();
break;
case tok::header_name:
case tok::annot_header_unit:
@@ -948,6 +945,13 @@ void Preprocessor::Lex(Token &Result) {
case tok::kw_export:
TrackGMFState.handleExport();
StdCXXImportSeqState.handleExport();
+ ModuleDeclState.handleExport();
+ break;
+ case tok::colon:
+ ModuleDeclState.handleColon();
+ break;
+ case tok::period:
+ ModuleDeclState.handlePeriod();
break;
case tok::identifier:
if (Result.getIdentifierInfo()->isModulesImport()) {
@@ -956,18 +960,25 @@ void Preprocessor::Lex(Token &Result) {
if (StdCXXImportSeqState.afterImportSeq()) {
ModuleImportLoc = Result.getLocation();
NamedModuleImportPath.clear();
+ IsAtImport = false;
ModuleImportExpectsIdentifier = true;
CurLexerKind = CLK_LexAfterModuleImport;
}
break;
} else if (Result.getIdentifierInfo() == getIdentifierInfo("module")) {
TrackGMFState.handleModule(StdCXXImportSeqState.afterTopLevelSeq());
+ ModuleDeclState.handleModule();
break;
+ } else {
+ ModuleDeclState.handleIdentifier(Result.getIdentifierInfo());
+ if (ModuleDeclState.isModuleCandidate())
+ break;
}
[[fallthrough]];
default:
TrackGMFState.handleMisc();
StdCXXImportSeqState.handleMisc();
+ ModuleDeclState.handleMisc();
break;
}
}
@@ -1151,6 +1162,15 @@ bool Preprocessor::LexAfterModuleImport(Token &Result) {
if (NamedModuleImportPath.empty() && getLangOpts().CPlusPlusModules) {
if (LexHeaderName(Result))
return true;
+
+ if (Result.is(tok::colon) && ModuleDeclState.isNamedModule()) {
+ std::string Name = ModuleDeclState.getPrimaryName().str();
+ Name += ":";
+ NamedModuleImportPath.push_back(
+ {getIdentifierInfo(Name), Result.getLocation()});
+ CurLexerKind = CLK_LexAfterModuleImport;
+ return true;
+ }
} else {
Lex(Result);
}
@@ -1164,9 +1184,10 @@ bool Preprocessor::LexAfterModuleImport(Token &Result) {
/*DisableMacroExpansion*/ true, /*IsReinject*/ false);
};
+ bool ImportingHeader = Result.is(tok::header_name);
// Check for a header-name.
SmallVector<Token, 32> Suffix;
- if (Result.is(tok::header_name)) {
+ if (ImportingHeader) {
// Enter the header-name token into the token stream; a Lex action cannot
// both return a token and cache tokens (doing so would corrupt the token
// cache if the call to Lex comes from CachingLex / PeekAhead).
@@ -1244,8 +1265,8 @@ bool Preprocessor::LexAfterModuleImport(Token &Result) {
if (ModuleImportExpectsIdentifier && Result.getKind() == tok::identifier) {
// We expected to see an identifier here, and we did; continue handling
// identifiers.
- NamedModuleImportPath.push_back(std::make_pair(Result.getIdentifierInfo(),
- Result.getLocation()));
+ NamedModuleImportPath.push_back(
+ std::make_pair(Result.getIdentifierInfo(), Result.getLocation()));
ModuleImportExpectsIdentifier = false;
CurLexerKind = CLK_LexAfterModuleImport;
return true;
@@ -1285,7 +1306,8 @@ bool Preprocessor::LexAfterModuleImport(Token &Result) {
std::string FlatModuleName;
if (getLangOpts().ModulesTS || getLangOpts().CPlusPlusModules) {
for (auto &Piece : NamedModuleImportPath) {
- if (!FlatModuleName.empty())
+ // If the FlatModuleName ends with colon, it implies it is a partition.
+ if (!FlatModuleName.empty() && FlatModuleName.back() != ':')
FlatModuleName += ".";
FlatModuleName += Piece.first->getName();
}
@@ -1296,7 +1318,8 @@ bool Preprocessor::LexAfterModuleImport(Token &Result) {
}
Module *Imported = nullptr;
- if (getLangOpts().Modules) {
+ // We don't/shouldn't load the standard c++20 modules when preprocessing.
+ if (getLangOpts().Modules && !isInImportingCXXNamedModules()) {
Imported = TheModuleLoader.loadModule(ModuleImportLoc,
NamedModuleImportPath,
Module::Hidden,
@@ -1304,6 +1327,7 @@ bool Preprocessor::LexAfterModuleImport(Token &Result) {
if (Imported)
makeModuleVisible(Imported, SemiLoc);
}
+
if (Callbacks)
Callbacks->moduleImport(ModuleImportLoc, NamedModuleImportPath, Imported);
diff --git a/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp b/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp
index c6968b9f417e..ebe7dd66c118 100644
--- a/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp
@@ -1020,8 +1020,16 @@ static void updateConsecutiveMacroArgTokens(SourceManager &SM,
SourceLocation Limit =
SM.getComposedLoc(BeginFID, SM.getFileIDSize(BeginFID));
Partition = All.take_while([&](const Token &T) {
- return T.getLocation() >= BeginLoc && T.getLocation() < Limit &&
- NearLast(T.getLocation());
+ // NOTE: the Limit is included! The lexer recovery only ever inserts a
+ // single token past the end of the FileID, specifically the ) when a
+ // macro-arg containing a comma should be guarded by parentheses.
+ //
+ // It is safe to include the Limit here because SourceManager allocates
+ // FileSize + 1 for each SLocEntry.
+ //
+ // See https://github.com/llvm/llvm-project/issues/60722.
+ return T.getLocation() >= BeginLoc && T.getLocation() <= Limit
+ && NearLast(T.getLocation());
});
}
assert(!Partition.empty());
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp b/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
index c7fd1156928c..e6812ac72c88 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
@@ -4965,7 +4965,6 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
bool IsDependent = false;
const char *PrevSpec = nullptr;
unsigned DiagID;
- UsingShadowDecl* FoundUsing = nullptr;
Decl *TagDecl =
Actions.ActOnTag(getCurScope(), DeclSpec::TST_enum, TUK, StartLoc, SS,
Name, NameLoc, attrs, AS, DS.getModulePrivateSpecLoc(),
@@ -4974,7 +4973,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
BaseType, DSC == DeclSpecContext::DSC_type_specifier,
DSC == DeclSpecContext::DSC_template_param ||
DSC == DeclSpecContext::DSC_template_type_arg,
- OffsetOfState, FoundUsing, &SkipBody).get();
+ OffsetOfState, &SkipBody).get();
if (SkipBody.ShouldSkip) {
assert(TUK == Sema::TUK_Definition && "can only skip a definition");
@@ -4984,8 +4983,8 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
T.skipToEnd();
if (DS.SetTypeSpecType(DeclSpec::TST_enum, StartLoc,
- NameLoc.isValid() ? NameLoc : StartLoc, PrevSpec,
- DiagID, FoundUsing ? FoundUsing : TagDecl, Owned,
+ NameLoc.isValid() ? NameLoc : StartLoc,
+ PrevSpec, DiagID, TagDecl, Owned,
Actions.getASTContext().getPrintingPolicy()))
Diag(StartLoc, DiagID) << PrevSpec;
return;
@@ -5039,8 +5038,8 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
}
if (DS.SetTypeSpecType(DeclSpec::TST_enum, StartLoc,
- NameLoc.isValid() ? NameLoc : StartLoc, PrevSpec,
- DiagID, FoundUsing ? FoundUsing : TagDecl, Owned,
+ NameLoc.isValid() ? NameLoc : StartLoc,
+ PrevSpec, DiagID, TagDecl, Owned,
Actions.getASTContext().getPrintingPolicy()))
Diag(StartLoc, DiagID) << PrevSpec;
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp b/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
index b26ec00cfedf..227c1df2bddd 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
@@ -1934,7 +1934,6 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// Create the tag portion of the class or class template.
DeclResult TagOrTempResult = true; // invalid
TypeResult TypeResult = true; // invalid
- UsingShadowDecl *FoundUsing = nullptr;
bool Owned = false;
Sema::SkipBodyInfo SkipBody;
@@ -2075,7 +2074,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
DSC == DeclSpecContext::DSC_type_specifier,
DSC == DeclSpecContext::DSC_template_param ||
DSC == DeclSpecContext::DSC_template_type_arg,
- OffsetOfState, FoundUsing, &SkipBody);
+ OffsetOfState, &SkipBody);
// If ActOnTag said the type was dependent, try again with the
// less common call.
@@ -2134,7 +2133,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
} else if (!TagOrTempResult.isInvalid()) {
Result = DS.SetTypeSpecType(
TagType, StartLoc, NameLoc.isValid() ? NameLoc : StartLoc, PrevSpec,
- DiagID, FoundUsing ? FoundUsing : TagOrTempResult.get(), Owned, Policy);
+ DiagID, TagOrTempResult.get(), Owned, Policy);
} else {
DS.SetTypeSpecError();
return;
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp b/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
index 392ed29467a9..66d937ac5742 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
@@ -2629,12 +2629,6 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
Comps.back().U.IdentInfo = Tok.getIdentifierInfo();
Comps.back().LocStart = Comps.back().LocEnd = ConsumeToken();
- enum class Kind { MemberAccess, ArraySubscript };
- auto DiagExt = [&](SourceLocation Loc, Kind K) {
- Diag(Loc, diag::ext_offsetof_member_designator)
- << (K == Kind::ArraySubscript) << (OOK == Sema::OOK_Macro);
- };
-
// FIXME: This loop leaks the index expressions on error.
while (true) {
if (Tok.is(tok::period)) {
@@ -2648,7 +2642,6 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
SkipUntil(tok::r_paren, StopAtSemi);
return ExprError();
}
- DiagExt(Comps.back().LocStart, Kind::MemberAccess);
Comps.back().U.IdentInfo = Tok.getIdentifierInfo();
Comps.back().LocEnd = ConsumeToken();
} else if (Tok.is(tok::l_square)) {
@@ -2666,7 +2659,6 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
SkipUntil(tok::r_paren, StopAtSemi);
return Res;
}
- DiagExt(Comps.back().LocStart, Kind::ArraySubscript);
Comps.back().U.E = Res.get();
ST.consumeClose();
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
index 42f582724564..3ea97f6aa8f2 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
@@ -565,13 +565,6 @@ void Sema::ActOnPragmaFloatControl(SourceLocation Loc,
case PFC_Precise:
NewFPFeatures.setFPPreciseEnabled(true);
FpPragmaStack.Act(Loc, Action, StringRef(), NewFPFeatures);
- if (PP.getCurrentFPEvalMethod() ==
- LangOptions::FPEvalMethodKind::FEM_Indeterminable &&
- PP.getLastFPEvalPragmaLocation().isValid())
- // A preceding `pragma float_control(precise,off)` has changed
- // the value of the evaluation method.
- // Set it back to its old value.
- PP.setCurrentFPEvalMethod(SourceLocation(), PP.getLastFPEvalMethod());
break;
case PFC_NoPrecise:
if (CurFPFeatures.getExceptionMode() == LangOptions::FPE_Strict)
@@ -581,10 +574,6 @@ void Sema::ActOnPragmaFloatControl(SourceLocation Loc,
else
NewFPFeatures.setFPPreciseEnabled(false);
FpPragmaStack.Act(Loc, Action, StringRef(), NewFPFeatures);
- PP.setLastFPEvalMethod(PP.getCurrentFPEvalMethod());
- // `AllowFPReassoc` or `AllowReciprocal` option is enabled.
- PP.setCurrentFPEvalMethod(
- Loc, LangOptions::FPEvalMethodKind::FEM_Indeterminable);
break;
case PFC_Except:
if (!isPreciseFPEnabled())
@@ -608,12 +597,6 @@ void Sema::ActOnPragmaFloatControl(SourceLocation Loc,
}
FpPragmaStack.Act(Loc, Action, StringRef(), NewFPFeatures);
NewFPFeatures = FpPragmaStack.CurrentValue;
- if (CurFPFeatures.getAllowFPReassociate() ||
- CurFPFeatures.getAllowReciprocal())
- // Since we are popping the pragma, we don't want to be passing
- // a location here.
- PP.setCurrentFPEvalMethod(SourceLocation(),
- CurFPFeatures.getFPEvalMethod());
break;
}
CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp b/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
index 4d4b2482d046..a92bbde113fc 100755
--- a/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
@@ -150,11 +150,19 @@ bool Sema::CheckConstraintExpression(const Expr *ConstraintExpression,
namespace {
struct SatisfactionStackRAII {
Sema &SemaRef;
- SatisfactionStackRAII(Sema &SemaRef, llvm::FoldingSetNodeID FSNID)
+ bool Inserted = false;
+ SatisfactionStackRAII(Sema &SemaRef, const NamedDecl *ND,
+ llvm::FoldingSetNodeID FSNID)
: SemaRef(SemaRef) {
- SemaRef.PushSatisfactionStackEntry(FSNID);
+ if (ND) {
+ SemaRef.PushSatisfactionStackEntry(ND, FSNID);
+ Inserted = true;
+ }
+ }
+ ~SatisfactionStackRAII() {
+ if (Inserted)
+ SemaRef.PopSatisfactionStackEntry();
}
- ~SatisfactionStackRAII() { SemaRef.PopSatisfactionStackEntry(); }
};
} // namespace
@@ -273,7 +281,8 @@ calculateConstraintSatisfaction(Sema &S, const Expr *ConstraintExpr,
}
static bool
-DiagRecursiveConstraintEval(Sema &S, llvm::FoldingSetNodeID &ID, const Expr *E,
+DiagRecursiveConstraintEval(Sema &S, llvm::FoldingSetNodeID &ID,
+ const NamedDecl *Templ, const Expr *E,
const MultiLevelTemplateArgumentList &MLTAL) {
E->Profile(ID, S.Context, /*Canonical=*/true);
for (const auto &List : MLTAL)
@@ -286,7 +295,7 @@ DiagRecursiveConstraintEval(Sema &S, llvm::FoldingSetNodeID &ID, const Expr *E,
// expression, or when trying to determine the constexpr-ness of special
// members. Otherwise we could just use the
// Sema::InstantiatingTemplate::isAlreadyBeingInstantiated function.
- if (S.SatisfactionStackContains(ID)) {
+ if (S.SatisfactionStackContains(Templ, ID)) {
S.Diag(E->getExprLoc(), diag::err_constraint_depends_on_self)
<< const_cast<Expr *>(E) << E->getSourceRange();
return true;
@@ -317,13 +326,14 @@ static ExprResult calculateConstraintSatisfaction(
return ExprError();
llvm::FoldingSetNodeID ID;
- if (DiagRecursiveConstraintEval(S, ID, AtomicExpr, MLTAL)) {
+ if (Template &&
+ DiagRecursiveConstraintEval(S, ID, Template, AtomicExpr, MLTAL)) {
Satisfaction.IsSatisfied = false;
Satisfaction.ContainsErrors = true;
return ExprEmpty();
}
- SatisfactionStackRAII StackRAII(S, ID);
+ SatisfactionStackRAII StackRAII(S, Template, ID);
// We do not want error diagnostics escaping here.
Sema::SFINAETrap Trap(S);
@@ -1132,8 +1142,7 @@ substituteParameterMappings(Sema &S, NormalizedConstraint &N,
Sema::InstantiatingTemplate Inst(
S, ArgsAsWritten->arguments().front().getSourceRange().getBegin(),
Sema::InstantiatingTemplate::ParameterMappingSubstitution{}, Concept,
- SourceRange(ArgsAsWritten->arguments()[0].getSourceRange().getBegin(),
- ArgsAsWritten->arguments().back().getSourceRange().getEnd()));
+ ArgsAsWritten->arguments().front().getSourceRange());
if (S.SubstTemplateArguments(*Atomic.ParameterMapping, MLTAL, SubstArgs))
return true;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
index 79c08adb8fab..9678e30699c8 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
@@ -1562,7 +1562,7 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
const auto *OpDeleteType =
OpDeleteQualType.getTypePtr()->castAs<FunctionProtoType>();
if (OpDeleteType->getNumParams() > DeleteArgs.size() &&
- S.getASTContext().hasSameType(
+ S.getASTContext().hasSameUnqualifiedType(
OpDeleteType->getParamType(DeleteArgs.size()), FrameSize->getType()))
DeleteArgs.push_back(FrameSize);
@@ -1579,7 +1579,7 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
// So we are not forced to pass alignment to the deallocation function.
if (S.getLangOpts().CoroAlignedAllocation &&
OpDeleteType->getNumParams() > DeleteArgs.size() &&
- S.getASTContext().hasSameType(
+ S.getASTContext().hasSameUnqualifiedType(
OpDeleteType->getParamType(DeleteArgs.size()),
FrameAlignment->getType()))
DeleteArgs.push_back(FrameAlignment);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
index e2b921bfe78f..051fad04219f 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
@@ -13088,9 +13088,10 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// C++ [module.import/6] external definitions are not permitted in header
// units.
if (getLangOpts().CPlusPlusModules && currentModuleIsHeaderUnit() &&
- VDecl->isThisDeclarationADefinition() &&
+ !VDecl->isInvalidDecl() && VDecl->isThisDeclarationADefinition() &&
VDecl->getFormalLinkage() == Linkage::ExternalLinkage &&
- !VDecl->isInline()) {
+ !VDecl->isInline() && !VDecl->isTemplated() &&
+ !isa<VarTemplateSpecializationDecl>(VDecl)) {
Diag(VDecl->getLocation(), diag::err_extern_def_in_header_unit);
VDecl->setInvalidDecl();
}
@@ -15259,9 +15260,10 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
// FIXME: Consider an alternate location for the test where the inlined()
// state is complete.
if (getLangOpts().CPlusPlusModules && currentModuleIsHeaderUnit() &&
+ !FD->isInvalidDecl() && !FD->isInlined() &&
+ BodyKind != FnBodyKind::Delete && BodyKind != FnBodyKind::Default &&
FD->getFormalLinkage() == Linkage::ExternalLinkage &&
- !FD->isInvalidDecl() && BodyKind != FnBodyKind::Delete &&
- BodyKind != FnBodyKind::Default && !FD->isInlined()) {
+ !FD->isTemplated() && !FD->isTemplateInstantiation()) {
assert(FD->isThisDeclarationADefinition());
Diag(FD->getLocation(), diag::err_extern_def_in_header_unit);
FD->setInvalidDecl();
@@ -16616,8 +16618,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
- OffsetOfKind OOK, UsingShadowDecl *&FoundUsingShadow,
- SkipBodyInfo *SkipBody) {
+ OffsetOfKind OOK, SkipBodyInfo *SkipBody) {
// If this is not a definition, it must have a name.
IdentifierInfo *OrigName = Name;
assert((Name != nullptr || TUK == TUK_Definition) &&
@@ -17052,7 +17053,6 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// redefinition if either context is within the other.
if (auto *Shadow = dyn_cast<UsingShadowDecl>(DirectPrevDecl)) {
auto *OldTag = dyn_cast<TagDecl>(PrevDecl);
- FoundUsingShadow = Shadow;
if (SS.isEmpty() && TUK != TUK_Reference && TUK != TUK_Friend &&
isDeclInScope(Shadow, SearchDC, S, isMemberSpecialization) &&
!(OldTag && isAcceptableTagRedeclContext(
@@ -18871,10 +18871,24 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
ProcessDeclAttributeList(S, Record, Attrs);
// Check to see if a FieldDecl is a pointer to a function.
- auto IsFunctionPointer = [&](const Decl *D) {
+ auto IsFunctionPointerOrForwardDecl = [&](const Decl *D) {
const FieldDecl *FD = dyn_cast<FieldDecl>(D);
- if (!FD)
+ if (!FD) {
+ // Check whether this is a forward declaration that was inserted by
+ // Clang. This happens when a non-forward declared / defined type is
+ // used, e.g.:
+ //
+ // struct foo {
+ // struct bar *(*f)();
+ // struct bar *(*g)();
+ // };
+ //
+ // "struct bar" shows up in the decl AST as a "RecordDecl" with an
+ // incomplete definition.
+ if (const auto *TD = dyn_cast<TagDecl>(D))
+ return !TD->isCompleteDefinition();
return false;
+ }
QualType FieldType = FD->getType().getDesugaredType(Context);
if (isa<PointerType>(FieldType)) {
QualType PointeeType = cast<PointerType>(FieldType)->getPointeeType();
@@ -18888,7 +18902,7 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
if (!getLangOpts().CPlusPlus &&
(Record->hasAttr<RandomizeLayoutAttr>() ||
(!Record->hasAttr<NoRandomizeLayoutAttr>() &&
- llvm::all_of(Record->decls(), IsFunctionPointer))) &&
+ llvm::all_of(Record->decls(), IsFunctionPointerOrForwardDecl))) &&
!Record->isUnion() && !getLangOpts().RandstructSeed.empty() &&
!Record->isRandomized()) {
SmallVector<Decl *, 32> NewDeclOrdering;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
index 348092fc62e8..df83442a8cd1 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
@@ -16977,7 +16977,6 @@ DeclResult Sema::ActOnTemplatedFriendTag(
if (SS.isEmpty()) {
bool Owned = false;
bool IsDependent = false;
- UsingShadowDecl* FoundUsing = nullptr;
return ActOnTag(S, TagSpec, TUK_Friend, TagLoc, SS, Name, NameLoc, Attr,
AS_public,
/*ModulePrivateLoc=*/SourceLocation(),
@@ -16986,7 +16985,7 @@ DeclResult Sema::ActOnTemplatedFriendTag(
/*ScopedEnumUsesClassTag=*/false,
/*UnderlyingType=*/TypeResult(),
/*IsTypeSpecifier=*/false,
- /*IsTemplateParamOrArg=*/false, /*OOK=*/OOK_Outside, FoundUsing);
+ /*IsTemplateParamOrArg=*/false, /*OOK=*/OOK_Outside);
}
NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
index e3eef9323b2f..abf5a72e7308 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
@@ -1483,13 +1483,14 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
// Otherwise, if the type contains a placeholder type, it is replaced by the
// type determined by placeholder type deduction.
DeducedType *Deduced = Ty->getContainedDeducedType();
- if (Deduced && isa<DeducedTemplateSpecializationType>(Deduced)) {
+ if (Deduced && !Deduced->isDeduced() &&
+ isa<DeducedTemplateSpecializationType>(Deduced)) {
Ty = DeduceTemplateSpecializationFromInitializer(TInfo, Entity,
Kind, Exprs);
if (Ty.isNull())
return ExprError();
Entity = InitializedEntity::InitializeTemporary(TInfo, Ty);
- } else if (Deduced) {
+ } else if (Deduced && !Deduced->isDeduced()) {
MultiExprArg Inits = Exprs;
if (ListInitialization) {
auto *ILE = cast<InitListExpr>(Exprs[0]);
@@ -2016,7 +2017,8 @@ ExprResult Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// C++11 [dcl.spec.auto]p6. Deduce the type which 'auto' stands in for.
auto *Deduced = AllocType->getContainedDeducedType();
- if (Deduced && isa<DeducedTemplateSpecializationType>(Deduced)) {
+ if (Deduced && !Deduced->isDeduced() &&
+ isa<DeducedTemplateSpecializationType>(Deduced)) {
if (ArraySize)
return ExprError(
Diag(*ArraySize ? (*ArraySize)->getExprLoc() : TypeRange.getBegin(),
@@ -2030,7 +2032,7 @@ ExprResult Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
AllocTypeInfo, Entity, Kind, Exprs);
if (AllocType.isNull())
return ExprError();
- } else if (Deduced) {
+ } else if (Deduced && !Deduced->isDeduced()) {
MultiExprArg Inits = Exprs;
bool Braced = (initStyle == CXXNewExpr::ListInit);
if (Braced) {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp b/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
index f52c0247f01c..194239ab0e10 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
@@ -337,20 +337,29 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
}
case ModuleDeclKind::Implementation: {
- std::pair<IdentifierInfo *, SourceLocation> ModuleNameLoc(
- PP.getIdentifierInfo(ModuleName), Path[0].second);
// C++20 A module-declaration that contains neither an export-
// keyword nor a module-partition implicitly imports the primary
// module interface unit of the module as if by a module-import-
// declaration.
+ std::pair<IdentifierInfo *, SourceLocation> ModuleNameLoc(
+ PP.getIdentifierInfo(ModuleName), Path[0].second);
+
+ // The module loader will assume we're trying to import the module that
+ // we're building if `LangOpts.CurrentModule` equals to 'ModuleName'.
+ // Change the value for `LangOpts.CurrentModule` temporarily to make the
+ // module loader work properly.
+ const_cast<LangOptions&>(getLangOpts()).CurrentModule = "";
Mod = getModuleLoader().loadModule(ModuleLoc, {ModuleNameLoc},
Module::AllVisible,
/*IsInclusionDirective=*/false);
+ const_cast<LangOptions&>(getLangOpts()).CurrentModule = ModuleName;
+
if (!Mod) {
Diag(ModuleLoc, diag::err_module_not_defined) << ModuleName;
// Create an empty module interface unit for error recovery.
Mod = Map.createModuleForInterfaceUnit(ModuleLoc, ModuleName);
}
+
} break;
case ModuleDeclKind::PartitionImplementation:
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp b/contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp
index 7716dfb15458..fedc314f2965 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp
@@ -192,7 +192,7 @@ void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
PolicyScheme MaskedPolicyScheme =
static_cast<PolicyScheme>(Record.MaskedPolicyScheme);
- const Policy DefaultPolicy(Record.HasTailPolicy, Record.HasMaskPolicy);
+ const Policy DefaultPolicy;
llvm::SmallVector<PrototypeDescriptor> ProtoSeq =
RVVIntrinsic::computeBuiltinTypes(BasicProtoSeq, /*IsMasked=*/false,
@@ -208,8 +208,7 @@ void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
bool UnMaskedHasPolicy = UnMaskedPolicyScheme != PolicyScheme::SchemeNone;
bool MaskedHasPolicy = MaskedPolicyScheme != PolicyScheme::SchemeNone;
SmallVector<Policy> SupportedUnMaskedPolicies =
- RVVIntrinsic::getSupportedUnMaskedPolicies(Record.HasTailPolicy,
- Record.HasMaskPolicy);
+ RVVIntrinsic::getSupportedUnMaskedPolicies();
SmallVector<Policy> SupportedMaskedPolicies =
RVVIntrinsic::getSupportedMaskedPolicies(Record.HasTailPolicy,
Record.HasMaskPolicy);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
index 4b144c239fa4..890cea1dfb0e 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
@@ -2540,8 +2540,6 @@ private:
TInfo->getType(), TInfo, LocEnd, Ctor);
Guide->setImplicit();
Guide->setParams(Params);
- if (Ctor && Ctor->getTrailingRequiresClause())
- Guide->setTrailingRequiresClause(Ctor->getTrailingRequiresClause());
for (auto *Param : Params)
Param->setDeclContext(Guide);
@@ -10181,14 +10179,11 @@ Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
bool Owned = false;
bool IsDependent = false;
- UsingShadowDecl* FoundUsing = nullptr;
- Decl *TagD =
- ActOnTag(S, TagSpec, Sema::TUK_Reference, KWLoc, SS, Name, NameLoc, Attr,
- AS_none, /*ModulePrivateLoc=*/SourceLocation(),
+ Decl *TagD = ActOnTag(S, TagSpec, Sema::TUK_Reference, KWLoc, SS, Name,
+ NameLoc, Attr, AS_none, /*ModulePrivateLoc=*/SourceLocation(),
MultiTemplateParamsArg(), Owned, IsDependent, SourceLocation(),
false, TypeResult(), /*IsTypeSpecifier*/ false,
- /*IsTemplateParamOrArg*/ false, /*OOK=*/OOK_Outside, FoundUsing)
- .get();
+ /*IsTemplateParamOrArg*/ false, /*OOK=*/OOK_Outside).get();
assert(!IsDependent && "explicit instantiation of dependent name not yet handled");
if (!TagD)
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
index 9e48a2a35a34..1fe2d3fac685 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -756,11 +756,8 @@ private:
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
S.collectUnexpandedParameterPacks(Pattern, Unexpanded);
for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
- UnexpandedParameterPack U = Unexpanded[I];
- if (U.first.is<const SubstTemplateTypeParmPackType *>() ||
- U.first.is<const SubstNonTypeTemplateParmPackExpr *>())
- continue;
- auto [Depth, Index] = getDepthAndIndex(U);
+ unsigned Depth, Index;
+ std::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
if (Depth == Info.getDeducedDepth())
AddPack(Index);
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
index 01a435668d88..86268b504cbb 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
@@ -89,23 +89,6 @@ namespace {
return true;
}
- bool
- VisitSubstTemplateTypeParmPackTypeLoc(SubstTemplateTypeParmPackTypeLoc TL) {
- Unexpanded.push_back({TL.getTypePtr(), TL.getNameLoc()});
- return true;
- }
-
- bool VisitSubstTemplateTypeParmPackType(SubstTemplateTypeParmPackType *T) {
- Unexpanded.push_back({T, SourceLocation()});
- return true;
- }
-
- bool
- VisitSubstNonTypeTemplateParmPackExpr(SubstNonTypeTemplateParmPackExpr *E) {
- Unexpanded.push_back({E, E->getParameterPackLocation()});
- return true;
- }
-
/// Record occurrences of function and non-type template
/// parameter packs in an expression.
bool VisitDeclRefExpr(DeclRefExpr *E) {
@@ -324,8 +307,7 @@ Sema::DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
auto *TTPD = dyn_cast<TemplateTypeParmDecl>(LocalPack);
return TTPD && TTPD->getTypeForDecl() == TTPT;
}
- return declaresSameEntity(Pack.first.get<const NamedDecl *>(),
- LocalPack);
+ return declaresSameEntity(Pack.first.get<NamedDecl *>(), LocalPack);
};
if (llvm::any_of(LSI->LocalPacks, DeclaresThisPack))
LambdaParamPackReferences.push_back(Pack);
@@ -377,7 +359,7 @@ Sema::DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
= Unexpanded[I].first.dyn_cast<const TemplateTypeParmType *>())
Name = TTP->getIdentifier();
else
- Name = Unexpanded[I].first.get<const NamedDecl *>()->getIdentifier();
+ Name = Unexpanded[I].first.get<NamedDecl *>()->getIdentifier();
if (Name && NamesKnown.insert(Name).second)
Names.push_back(Name);
@@ -440,7 +422,7 @@ bool Sema::DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE) {
llvm::SmallPtrSet<NamedDecl*, 8> ParmSet(Parms.begin(), Parms.end());
SmallVector<UnexpandedParameterPack, 2> UnexpandedParms;
for (auto Parm : Unexpanded)
- if (ParmSet.contains(Parm.first.dyn_cast<const NamedDecl *>()))
+ if (ParmSet.contains(Parm.first.dyn_cast<NamedDecl *>()))
UnexpandedParms.push_back(Parm);
if (UnexpandedParms.empty())
return false;
@@ -692,95 +674,109 @@ bool Sema::CheckParameterPacksForExpansion(
bool &RetainExpansion, std::optional<unsigned> &NumExpansions) {
ShouldExpand = true;
RetainExpansion = false;
- std::pair<const IdentifierInfo *, SourceLocation> FirstPack;
- std::optional<std::pair<unsigned, SourceLocation>> PartialExpansion;
- std::optional<unsigned> CurNumExpansions;
+ std::pair<IdentifierInfo *, SourceLocation> FirstPack;
+ bool HaveFirstPack = false;
+ std::optional<unsigned> NumPartialExpansions;
+ SourceLocation PartiallySubstitutedPackLoc;
- for (auto [P, Loc] : Unexpanded) {
+ for (UnexpandedParameterPack ParmPack : Unexpanded) {
// Compute the depth and index for this parameter pack.
- std::optional<std::pair<unsigned, unsigned>> Pos;
+ unsigned Depth = 0, Index = 0;
+ IdentifierInfo *Name;
+ bool IsVarDeclPack = false;
+
+ if (const TemplateTypeParmType *TTP =
+ ParmPack.first.dyn_cast<const TemplateTypeParmType *>()) {
+ Depth = TTP->getDepth();
+ Index = TTP->getIndex();
+ Name = TTP->getIdentifier();
+ } else {
+ NamedDecl *ND = ParmPack.first.get<NamedDecl *>();
+ if (isa<VarDecl>(ND))
+ IsVarDeclPack = true;
+ else
+ std::tie(Depth, Index) = getDepthAndIndex(ND);
+
+ Name = ND->getIdentifier();
+ }
+
+ // Determine the size of this argument pack.
unsigned NewPackSize;
- const auto *ND = P.dyn_cast<const NamedDecl *>();
- if (ND && isa<VarDecl>(ND)) {
- const auto *DAP =
- CurrentInstantiationScope->findInstantiationOf(ND)
- ->dyn_cast<LocalInstantiationScope::DeclArgumentPack *>();
- if (!DAP) {
+ if (IsVarDeclPack) {
+ // Figure out whether we're instantiating to an argument pack or not.
+ typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
+
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> *Instantiation =
+ CurrentInstantiationScope->findInstantiationOf(
+ ParmPack.first.get<NamedDecl *>());
+ if (Instantiation->is<DeclArgumentPack *>()) {
+ // We could expand this function parameter pack.
+ NewPackSize = Instantiation->get<DeclArgumentPack *>()->size();
+ } else {
// We can't expand this function parameter pack, so we can't expand
// the pack expansion.
ShouldExpand = false;
continue;
}
- NewPackSize = DAP->size();
- } else if (ND) {
- Pos = getDepthAndIndex(ND);
- } else if (const auto *TTP = P.dyn_cast<const TemplateTypeParmType *>()) {
- Pos = {TTP->getDepth(), TTP->getIndex()};
- ND = TTP->getDecl();
- // FIXME: We either should have some fallback for canonical TTP, or
- // never have canonical TTP here.
- } else if (const auto *STP =
- P.dyn_cast<const SubstTemplateTypeParmPackType *>()) {
- NewPackSize = STP->getNumArgs();
- ND = STP->getReplacedParameter();
} else {
- const auto *SEP = P.get<const SubstNonTypeTemplateParmPackExpr *>();
- NewPackSize = SEP->getArgumentPack().pack_size();
- ND = SEP->getParameterPack();
- }
-
- if (Pos) {
// If we don't have a template argument at this depth/index, then we
// cannot expand the pack expansion. Make a note of this, but we still
// want to check any parameter packs we *do* have arguments for.
- if (Pos->first >= TemplateArgs.getNumLevels() ||
- !TemplateArgs.hasTemplateArgument(Pos->first, Pos->second)) {
+ if (Depth >= TemplateArgs.getNumLevels() ||
+ !TemplateArgs.hasTemplateArgument(Depth, Index)) {
ShouldExpand = false;
continue;
}
+
// Determine the size of the argument pack.
- NewPackSize = TemplateArgs(Pos->first, Pos->second).pack_size();
- // C++0x [temp.arg.explicit]p9:
- // Template argument deduction can extend the sequence of template
- // arguments corresponding to a template parameter pack, even when the
- // sequence contains explicitly specified template arguments.
- if (CurrentInstantiationScope)
- if (const NamedDecl *PartialPack =
- CurrentInstantiationScope->getPartiallySubstitutedPack();
- PartialPack && getDepthAndIndex(PartialPack) == *Pos) {
+ NewPackSize = TemplateArgs(Depth, Index).pack_size();
+ }
+
+ // C++0x [temp.arg.explicit]p9:
+ // Template argument deduction can extend the sequence of template
+ // arguments corresponding to a template parameter pack, even when the
+ // sequence contains explicitly specified template arguments.
+ if (!IsVarDeclPack && CurrentInstantiationScope) {
+ if (NamedDecl *PartialPack =
+ CurrentInstantiationScope->getPartiallySubstitutedPack()) {
+ unsigned PartialDepth, PartialIndex;
+ std::tie(PartialDepth, PartialIndex) = getDepthAndIndex(PartialPack);
+ if (PartialDepth == Depth && PartialIndex == Index) {
RetainExpansion = true;
// We don't actually know the new pack size yet.
- PartialExpansion = {NewPackSize, Loc};
+ NumPartialExpansions = NewPackSize;
+ PartiallySubstitutedPackLoc = ParmPack.second;
continue;
}
+ }
}
- // FIXME: Workaround for Canonical TTP.
- const IdentifierInfo *Name = ND ? ND->getIdentifier() : nullptr;
- if (!CurNumExpansions) {
+ if (!NumExpansions) {
// The is the first pack we've seen for which we have an argument.
// Record it.
- CurNumExpansions = NewPackSize;
- FirstPack = {Name, Loc};
- } else if (NewPackSize != *CurNumExpansions) {
+ NumExpansions = NewPackSize;
+ FirstPack.first = Name;
+ FirstPack.second = ParmPack.second;
+ HaveFirstPack = true;
+ continue;
+ }
+
+ if (NewPackSize != *NumExpansions) {
// C++0x [temp.variadic]p5:
// All of the parameter packs expanded by a pack expansion shall have
// the same number of arguments specified.
- Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict)
- << FirstPack.first << Name << *CurNumExpansions << NewPackSize
- << SourceRange(FirstPack.second) << SourceRange(Loc);
+ if (HaveFirstPack)
+ Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict)
+ << FirstPack.first << Name << *NumExpansions << NewPackSize
+ << SourceRange(FirstPack.second) << SourceRange(ParmPack.second);
+ else
+ Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict_multilevel)
+ << Name << *NumExpansions << NewPackSize
+ << SourceRange(ParmPack.second);
return true;
}
}
- if (NumExpansions && CurNumExpansions &&
- *NumExpansions != *CurNumExpansions) {
- Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict_multilevel)
- << FirstPack.first << *CurNumExpansions << *NumExpansions
- << SourceRange(FirstPack.second);
- return true;
- }
-
// If we're performing a partial expansion but we also have a full expansion,
// expand to the number of common arguments. For example, given:
//
@@ -790,18 +786,17 @@ bool Sema::CheckParameterPacksForExpansion(
//
// ... a call to 'A<int, int>().f<int>' should expand the pack once and
// retain an expansion.
- if (PartialExpansion) {
- if (CurNumExpansions && *CurNumExpansions < PartialExpansion->first) {
+ if (NumPartialExpansions) {
+ if (NumExpansions && *NumExpansions < *NumPartialExpansions) {
NamedDecl *PartialPack =
CurrentInstantiationScope->getPartiallySubstitutedPack();
Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict_partial)
- << PartialPack << PartialExpansion->first << *CurNumExpansions
- << SourceRange(PartialExpansion->second);
+ << PartialPack << *NumPartialExpansions << *NumExpansions
+ << SourceRange(PartiallySubstitutedPackLoc);
return true;
}
- NumExpansions = PartialExpansion->first;
- } else {
- NumExpansions = CurNumExpansions;
+
+ NumExpansions = NumPartialExpansions;
}
return false;
@@ -814,48 +809,47 @@ std::optional<unsigned> Sema::getNumArgumentsInExpansion(
CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseType(Pattern);
std::optional<unsigned> Result;
- auto setResultSz = [&Result](unsigned Size) {
- assert((!Result || *Result == Size) && "inconsistent pack sizes");
- Result = Size;
- };
- auto setResultPos = [&](const std::pair<unsigned, unsigned> &Pos) -> bool {
- unsigned Depth = Pos.first, Index = Pos.second;
- if (Depth >= TemplateArgs.getNumLevels() ||
- !TemplateArgs.hasTemplateArgument(Depth, Index))
- // The pattern refers to an unknown template argument. We're not ready to
- // expand this pack yet.
- return true;
- // Determine the size of the argument pack.
- setResultSz(TemplateArgs(Depth, Index).pack_size());
- return false;
- };
+ for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
+ // Compute the depth and index for this parameter pack.
+ unsigned Depth;
+ unsigned Index;
- for (auto [I, _] : Unexpanded) {
- if (const auto *TTP = I.dyn_cast<const TemplateTypeParmType *>()) {
- if (setResultPos({TTP->getDepth(), TTP->getIndex()}))
- return std::nullopt;
- } else if (const auto *STP =
- I.dyn_cast<const SubstTemplateTypeParmPackType *>()) {
- setResultSz(STP->getNumArgs());
- } else if (const auto *SEP =
- I.dyn_cast<const SubstNonTypeTemplateParmPackExpr *>()) {
- setResultSz(SEP->getArgumentPack().pack_size());
+ if (const TemplateTypeParmType *TTP =
+ Unexpanded[I].first.dyn_cast<const TemplateTypeParmType *>()) {
+ Depth = TTP->getDepth();
+ Index = TTP->getIndex();
} else {
- const auto *ND = I.get<const NamedDecl *>();
- // Function parameter pack or init-capture pack.
+ NamedDecl *ND = Unexpanded[I].first.get<NamedDecl *>();
if (isa<VarDecl>(ND)) {
- const auto *DAP =
- CurrentInstantiationScope->findInstantiationOf(ND)
- ->dyn_cast<LocalInstantiationScope::DeclArgumentPack *>();
- if (!DAP)
+ // Function parameter pack or init-capture pack.
+ typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
+
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> *Instantiation =
+ CurrentInstantiationScope->findInstantiationOf(
+ Unexpanded[I].first.get<NamedDecl *>());
+ if (Instantiation->is<Decl *>())
// The pattern refers to an unexpanded pack. We're not ready to expand
// this pack yet.
return std::nullopt;
- setResultSz(DAP->size());
- } else if (setResultPos(getDepthAndIndex(ND))) {
- return std::nullopt;
+
+ unsigned Size = Instantiation->get<DeclArgumentPack *>()->size();
+ assert((!Result || *Result == Size) && "inconsistent pack sizes");
+ Result = Size;
+ continue;
}
+
+ std::tie(Depth, Index) = getDepthAndIndex(ND);
}
+ if (Depth >= TemplateArgs.getNumLevels() ||
+ !TemplateArgs.hasTemplateArgument(Depth, Index))
+ // The pattern refers to an unknown template argument. We're not ready to
+ // expand this pack yet.
+ return std::nullopt;
+
+ // Determine the size of the argument pack.
+ unsigned Size = TemplateArgs(Depth, Index).pack_size();
+ assert((!Result || *Result == Size) && "inconsistent pack sizes");
+ Result = Size;
}
return Result;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaType.cpp b/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
index 89d819a77dcb..8cb1ed28fe3e 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
@@ -1588,9 +1588,6 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// TypeQuals handled by caller.
Result = Context.getTypeDeclType(D);
- if (const auto *Using =
- dyn_cast_or_null<UsingShadowDecl>(DS.getRepAsFoundDecl()))
- Result = Context.getUsingType(Using, Result);
// In both C and C++, make an ElaboratedType.
ElaboratedTypeKeyword Keyword
@@ -6256,9 +6253,6 @@ namespace {
void VisitTagTypeLoc(TagTypeLoc TL) {
TL.setNameLoc(DS.getTypeSpecTypeNameLoc());
}
- void VisitUsingTypeLoc(UsingTypeLoc TL) {
- TL.setNameLoc(DS.getTypeSpecTypeNameLoc());
- }
void VisitAtomicTypeLoc(AtomicTypeLoc TL) {
// An AtomicTypeLoc can come from either an _Atomic(...) type specifier
// or an _Atomic qualifier.
diff --git a/contrib/llvm-project/clang/lib/Sema/TreeTransform.h b/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
index 6a05ecc5370f..48bb28b56cd3 100644
--- a/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
+++ b/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
@@ -4569,7 +4569,7 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
getSema(),
Uneval ? Sema::ExpressionEvaluationContext::Unevaluated
: Sema::ExpressionEvaluationContext::ConstantEvaluated,
- /*LambdaContextDecl=*/nullptr, /*ExprContext=*/
+ Sema::ReuseLambdaContextDecl, /*ExprContext=*/
Sema::ExpressionEvaluationContextRecord::EK_TemplateArgument);
Expr *InputExpr = Input.getSourceExpression();
@@ -5897,7 +5897,6 @@ bool TreeTransform<Derived>::TransformFunctionTypeParams(
= dyn_cast<PackExpansionType>(OldType)) {
// We have a function parameter pack that may need to be expanded.
QualType Pattern = Expansion->getPattern();
- NumExpansions = Expansion->getNumExpansions();
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
index b35ab1fe23ce..b85d0adb8eaf 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
@@ -41,6 +41,10 @@ void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS,
if (!RetE)
return;
+ // Skip "body farmed" functions.
+ if (RetE->getSourceRange().isInvalid())
+ return;
+
SVal V = C.getSVal(RetE);
const MemRegion *R = V.getAsRegion();
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
index a7f149b87e79..c3bd4876faf2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -766,7 +766,7 @@ PathDiagnosticPieceRef PathDiagnosticBuilder::generateDiagForSwitchOP(
case Stmt::CaseStmtClass: {
os << "Control jumps to 'case ";
const auto *Case = cast<CaseStmt>(S);
- const Expr *LHS = Case->getLHS()->IgnoreParenCasts();
+ const Expr *LHS = Case->getLHS()->IgnoreParenImpCasts();
// Determine if it is an enum.
bool GetRawInt = true;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index 1017dff2b0f3..a275d36286d3 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -2678,7 +2678,18 @@ EquivalenceClass::simplify(SValBuilder &SVB, RangeSet::Factory &F,
if (OldState == State)
continue;
- assert(find(State, MemberSym) == find(State, SimplifiedMemberSym));
+ // Be aware that `SimplifiedMemberSym` might refer to an already dead
+ // symbol. In that case, the eqclass of that might not be the same as the
+ // eqclass of `MemberSym`. This is because the dead symbols are not
+ // preserved in the `ClassMap`, hence
+ // `find(State, SimplifiedMemberSym)` will result in a trivial eqclass
+ // compared to the eqclass of `MemberSym`.
+ // These eqclasses should be the same if `SimplifiedMemberSym` is alive.
+ // --> assert(find(State, MemberSym) == find(State, SimplifiedMemberSym))
+ //
+ // Note that `MemberSym` must be alive here since that is from the
+ // `ClassMembers` where all the symbols are alive.
+
// Remove the old and more complex symbol.
State = find(State, MemberSym).removeMember(State, MemberSym);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
index 46948c12617c..49855305cecc 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -1849,8 +1849,12 @@ std::optional<SVal> RegionStoreManager::getSValFromInitListExpr(
// Go to the nested initializer list.
ILE = IL;
}
- llvm_unreachable(
- "Unhandled InitListExpr sub-expressions or invalid offsets.");
+
+ assert(ILE);
+
+ // FIXME: Unhandeled InitListExpr sub-expression, possibly constructing an
+ // enum?
+ return std::nullopt;
}
/// Returns an SVal, if possible, for the specified position in a string
diff --git a/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index 25084dd98e5c..86da7e86f831 100644
--- a/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -966,40 +966,26 @@ llvm::SmallVector<PrototypeDescriptor> RVVIntrinsic::computeBuiltinTypes(
return NewPrototype;
}
-llvm::SmallVector<Policy>
-RVVIntrinsic::getSupportedUnMaskedPolicies(bool HasTailPolicy,
- bool HasMaskPolicy) {
- return {
- Policy(Policy::PolicyType::Undisturbed, HasTailPolicy,
- HasMaskPolicy), // TU
- Policy(Policy::PolicyType::Agnostic, HasTailPolicy, HasMaskPolicy)}; // TA
+llvm::SmallVector<Policy> RVVIntrinsic::getSupportedUnMaskedPolicies() {
+ return {Policy(Policy::PolicyType::Undisturbed)}; // TU
}
llvm::SmallVector<Policy>
RVVIntrinsic::getSupportedMaskedPolicies(bool HasTailPolicy,
bool HasMaskPolicy) {
if (HasTailPolicy && HasMaskPolicy)
- return {
- Policy(Policy::PolicyType::Undisturbed, Policy::PolicyType::Agnostic,
- HasTailPolicy, HasMaskPolicy), // TUMA
- Policy(Policy::PolicyType::Agnostic, Policy::PolicyType::Agnostic,
- HasTailPolicy, HasMaskPolicy), // TAMA
- Policy(Policy::PolicyType::Undisturbed, Policy::PolicyType::Undisturbed,
- HasTailPolicy, HasMaskPolicy), // TUMU
- Policy(Policy::PolicyType::Agnostic, Policy::PolicyType::Undisturbed,
- HasTailPolicy, HasMaskPolicy)}; // TAMU
+ return {Policy(Policy::PolicyType::Undisturbed,
+ Policy::PolicyType::Agnostic), // TUM
+ Policy(Policy::PolicyType::Undisturbed,
+ Policy::PolicyType::Undisturbed), // TUMU
+ Policy(Policy::PolicyType::Agnostic,
+ Policy::PolicyType::Undisturbed)}; // MU
if (HasTailPolicy && !HasMaskPolicy)
return {Policy(Policy::PolicyType::Undisturbed,
- Policy::PolicyType::Agnostic, HasTailPolicy,
- HasMaskPolicy), // TUM
- Policy(Policy::PolicyType::Agnostic, Policy::PolicyType::Agnostic,
- HasTailPolicy, HasMaskPolicy)}; // TAM
+ Policy::PolicyType::Agnostic)}; // TU
if (!HasTailPolicy && HasMaskPolicy)
- return {Policy(Policy::PolicyType::Agnostic, Policy::PolicyType::Agnostic,
- HasTailPolicy, HasMaskPolicy), // MA
- Policy(Policy::PolicyType::Agnostic,
- Policy::PolicyType::Undisturbed, HasTailPolicy,
- HasMaskPolicy)}; // MU
+ return {Policy(Policy::PolicyType::Agnostic,
+ Policy::PolicyType::Undisturbed)}; // MU
llvm_unreachable("An RVV instruction should not be without both tail policy "
"and mask policy");
}
@@ -1016,46 +1002,34 @@ void RVVIntrinsic::updateNamesAndPolicy(bool IsMasked, bool HasPolicy,
OverloadedName += suffix;
};
- if (PolicyAttrs.isUnspecified()) {
- PolicyAttrs.IsUnspecified = false;
- if (IsMasked) {
+ // This follows the naming guideline under riscv-c-api-doc to add the
+ // `__riscv_` suffix for all RVV intrinsics.
+ Name = "__riscv_" + Name;
+ OverloadedName = "__riscv_" + OverloadedName;
+
+ if (IsMasked) {
+ if (PolicyAttrs.isTUMUPolicy())
+ appendPolicySuffix("_tumu");
+ else if (PolicyAttrs.isTUMAPolicy())
+ appendPolicySuffix("_tum");
+ else if (PolicyAttrs.isTAMUPolicy())
+ appendPolicySuffix("_mu");
+ else if (PolicyAttrs.isTAMAPolicy()) {
Name += "_m";
if (HasPolicy)
BuiltinName += "_tama";
else
BuiltinName += "_m";
- } else {
+ } else
+ llvm_unreachable("Unhandled policy condition");
+ } else {
+ if (PolicyAttrs.isTUPolicy())
+ appendPolicySuffix("_tu");
+ else if (PolicyAttrs.isTAPolicy()) {
if (HasPolicy)
BuiltinName += "_ta";
- }
- } else {
- if (IsMasked) {
- if (PolicyAttrs.isTUMAPolicy() && !PolicyAttrs.hasMaskPolicy())
- appendPolicySuffix("_tum");
- else if (PolicyAttrs.isTAMAPolicy() && !PolicyAttrs.hasMaskPolicy())
- appendPolicySuffix("_tam");
- else if (PolicyAttrs.isMUPolicy() && !PolicyAttrs.hasTailPolicy())
- appendPolicySuffix("_mu");
- else if (PolicyAttrs.isMAPolicy() && !PolicyAttrs.hasTailPolicy())
- appendPolicySuffix("_ma");
- else if (PolicyAttrs.isTUMUPolicy())
- appendPolicySuffix("_tumu");
- else if (PolicyAttrs.isTAMUPolicy())
- appendPolicySuffix("_tamu");
- else if (PolicyAttrs.isTUMAPolicy())
- appendPolicySuffix("_tuma");
- else if (PolicyAttrs.isTAMAPolicy())
- appendPolicySuffix("_tama");
- else
- llvm_unreachable("Unhandled policy condition");
- } else {
- if (PolicyAttrs.isTUPolicy())
- appendPolicySuffix("_tu");
- else if (PolicyAttrs.isTAPolicy())
- appendPolicySuffix("_ta");
- else
- llvm_unreachable("Unhandled policy condition");
- }
+ } else
+ llvm_unreachable("Unhandled policy condition");
}
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
index 3fcef00a5780..ae1662237e87 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
@@ -40,67 +40,69 @@ DependencyScanningTool::DependencyScanningTool(
llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS)
: Worker(Service, std::move(FS)) {}
-llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
- const std::vector<std::string> &CommandLine, StringRef CWD,
- std::optional<StringRef> ModuleName) {
- /// Prints out all of the gathered dependencies into a string.
- class MakeDependencyPrinterConsumer : public DependencyConsumer {
- public:
- void handleBuildCommand(Command) override {}
+namespace {
+/// Prints out all of the gathered dependencies into a string.
+class MakeDependencyPrinterConsumer : public DependencyConsumer {
+public:
+ void handleBuildCommand(Command) override {}
+
+ void
+ handleDependencyOutputOpts(const DependencyOutputOptions &Opts) override {
+ this->Opts = std::make_unique<DependencyOutputOptions>(Opts);
+ }
- void
- handleDependencyOutputOpts(const DependencyOutputOptions &Opts) override {
- this->Opts = std::make_unique<DependencyOutputOptions>(Opts);
- }
+ void handleFileDependency(StringRef File) override {
+ Dependencies.push_back(std::string(File));
+ }
- void handleFileDependency(StringRef File) override {
- Dependencies.push_back(std::string(File));
- }
+ void handlePrebuiltModuleDependency(PrebuiltModuleDep PMD) override {
+ // Same as `handleModuleDependency`.
+ }
- void handlePrebuiltModuleDependency(PrebuiltModuleDep PMD) override {
- // Same as `handleModuleDependency`.
- }
+ void handleModuleDependency(ModuleDeps MD) override {
+ // These are ignored for the make format as it can't support the full
+ // set of deps, and handleFileDependency handles enough for implicitly
+ // built modules to work.
+ }
- void handleModuleDependency(ModuleDeps MD) override {
- // These are ignored for the make format as it can't support the full
- // set of deps, and handleFileDependency handles enough for implicitly
- // built modules to work.
- }
+ void handleContextHash(std::string Hash) override {}
- void handleContextHash(std::string Hash) override {}
+ std::string lookupModuleOutput(const ModuleID &ID,
+ ModuleOutputKind Kind) override {
+ llvm::report_fatal_error("unexpected call to lookupModuleOutput");
+ }
- std::string lookupModuleOutput(const ModuleID &ID,
- ModuleOutputKind Kind) override {
- llvm::report_fatal_error("unexpected call to lookupModuleOutput");
- }
+ void printDependencies(std::string &S) {
+ assert(Opts && "Handled dependency output options.");
- void printDependencies(std::string &S) {
- assert(Opts && "Handled dependency output options.");
-
- class DependencyPrinter : public DependencyFileGenerator {
- public:
- DependencyPrinter(DependencyOutputOptions &Opts,
- ArrayRef<std::string> Dependencies)
- : DependencyFileGenerator(Opts) {
- for (const auto &Dep : Dependencies)
- addDependency(Dep);
- }
-
- void printDependencies(std::string &S) {
- llvm::raw_string_ostream OS(S);
- outputDependencyFile(OS);
- }
- };
-
- DependencyPrinter Generator(*Opts, Dependencies);
- Generator.printDependencies(S);
- }
+ class DependencyPrinter : public DependencyFileGenerator {
+ public:
+ DependencyPrinter(DependencyOutputOptions &Opts,
+ ArrayRef<std::string> Dependencies)
+ : DependencyFileGenerator(Opts) {
+ for (const auto &Dep : Dependencies)
+ addDependency(Dep);
+ }
- private:
- std::unique_ptr<DependencyOutputOptions> Opts;
- std::vector<std::string> Dependencies;
- };
+ void printDependencies(std::string &S) {
+ llvm::raw_string_ostream OS(S);
+ outputDependencyFile(OS);
+ }
+ };
+
+ DependencyPrinter Generator(*Opts, Dependencies);
+ Generator.printDependencies(S);
+ }
+
+protected:
+ std::unique_ptr<DependencyOutputOptions> Opts;
+ std::vector<std::string> Dependencies;
+};
+} // anonymous namespace
+llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
+ const std::vector<std::string> &CommandLine, StringRef CWD,
+ std::optional<StringRef> ModuleName) {
MakeDependencyPrinterConsumer Consumer;
auto Result =
Worker.computeDependencies(CWD, CommandLine, Consumer, ModuleName);
@@ -111,6 +113,50 @@ llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
return Output;
}
+llvm::Expected<P1689Rule> DependencyScanningTool::getP1689ModuleDependencyFile(
+ const CompileCommand &Command, StringRef CWD,
+ std::string &MakeformatOutput, std::string &MakeformatOutputPath) {
+ class P1689ModuleDependencyPrinterConsumer
+ : public MakeDependencyPrinterConsumer {
+ public:
+ P1689ModuleDependencyPrinterConsumer(P1689Rule &Rule,
+ const CompileCommand &Command)
+ : Filename(Command.Filename), Rule(Rule) {
+ Rule.PrimaryOutput = Command.Output;
+ }
+
+ void handleProvidedAndRequiredStdCXXModules(
+ std::optional<P1689ModuleInfo> Provided,
+ std::vector<P1689ModuleInfo> Requires) override {
+ Rule.Provides = Provided;
+ if (Rule.Provides)
+ Rule.Provides->SourcePath = Filename.str();
+ Rule.Requires = Requires;
+ }
+
+ StringRef getMakeFormatDependencyOutputPath() {
+ if (Opts->OutputFormat != DependencyOutputFormat::Make)
+ return {};
+ return Opts->OutputFile;
+ }
+
+ private:
+ StringRef Filename;
+ P1689Rule &Rule;
+ };
+
+ P1689Rule Rule;
+ P1689ModuleDependencyPrinterConsumer Consumer(Rule, Command);
+ auto Result = Worker.computeDependencies(CWD, Command.CommandLine, Consumer);
+ if (Result)
+ return std::move(Result);
+
+ MakeformatOutputPath = Consumer.getMakeFormatDependencyOutputPath();
+ if (!MakeformatOutputPath.empty())
+ Consumer.printDependencies(MakeformatOutput);
+ return Rule;
+}
+
llvm::Expected<FullDependenciesResult>
DependencyScanningTool::getFullDependencies(
const std::vector<std::string> &CommandLine, StringRef CWD,
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
index b54b8de9157e..8eb0328d6322 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
@@ -247,10 +247,12 @@ public:
std::make_shared<DependencyConsumerForwarder>(
std::move(Opts), WorkingDirectory, Consumer));
break;
+ case ScanningOutputFormat::P1689:
case ScanningOutputFormat::Full:
MDC = std::make_shared<ModuleDepCollector>(
std::move(Opts), ScanInstance, Consumer, OriginalInvocation,
- OptimizeArgs, EagerLoadModules);
+ OptimizeArgs, EagerLoadModules,
+ Format == ScanningOutputFormat::P1689);
ScanInstance.addDependencyCollector(MDC);
break;
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
index cb1c66b8d63f..d1cbf79a843e 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
@@ -339,6 +339,14 @@ void ModuleDepCollectorPP::InclusionDirective(
void ModuleDepCollectorPP::moduleImport(SourceLocation ImportLoc,
ModuleIdPath Path,
const Module *Imported) {
+ if (MDC.ScanInstance.getPreprocessor().isInImportingCXXNamedModules()) {
+ P1689ModuleInfo RequiredModule;
+ RequiredModule.ModuleName = Path[0].first->getName().str();
+ RequiredModule.Type = P1689ModuleInfo::ModuleType::NamedCXXModule;
+ MDC.RequiredStdCXXModules.push_back(RequiredModule);
+ return;
+ }
+
handleImport(Imported);
}
@@ -361,6 +369,21 @@ void ModuleDepCollectorPP::EndOfMainFile() {
.getFileEntryForID(MainFileID)
->getName());
+ auto &PP = MDC.ScanInstance.getPreprocessor();
+ if (PP.isInNamedModule()) {
+ P1689ModuleInfo ProvidedModule;
+ ProvidedModule.ModuleName = PP.getNamedModuleName();
+ ProvidedModule.Type = P1689ModuleInfo::ModuleType::NamedCXXModule;
+ ProvidedModule.IsStdCXXModuleInterface = PP.isInNamedInterfaceUnit();
+ // Don't put implementation (non partition) unit as Provide.
+ // Put the module as required instead. Since the implementation
+ // unit will import the primary module implicitly.
+ if (PP.isInImplementationUnit())
+ MDC.RequiredStdCXXModules.push_back(ProvidedModule);
+ else
+ MDC.ProvidedStdCXXModule = ProvidedModule;
+ }
+
if (!MDC.ScanInstance.getPreprocessorOpts().ImplicitPCHInclude.empty())
MDC.addFileDep(MDC.ScanInstance.getPreprocessorOpts().ImplicitPCHInclude);
@@ -374,6 +397,10 @@ void ModuleDepCollectorPP::EndOfMainFile() {
MDC.Consumer.handleDependencyOutputOpts(*MDC.Opts);
+ if (MDC.IsStdModuleP1689Format)
+ MDC.Consumer.handleProvidedAndRequiredStdCXXModules(
+ MDC.ProvidedStdCXXModule, MDC.RequiredStdCXXModules);
+
for (auto &&I : MDC.ModularDeps)
MDC.Consumer.handleModuleDependency(*I.second);
@@ -548,10 +575,12 @@ void ModuleDepCollectorPP::addAffectingClangModule(
ModuleDepCollector::ModuleDepCollector(
std::unique_ptr<DependencyOutputOptions> Opts,
CompilerInstance &ScanInstance, DependencyConsumer &C,
- CompilerInvocation OriginalCI, bool OptimizeArgs, bool EagerLoadModules)
+ CompilerInvocation OriginalCI, bool OptimizeArgs, bool EagerLoadModules,
+ bool IsStdModuleP1689Format)
: ScanInstance(ScanInstance), Consumer(C), Opts(std::move(Opts)),
OriginalInvocation(std::move(OriginalCI)), OptimizeArgs(OptimizeArgs),
- EagerLoadModules(EagerLoadModules) {}
+ EagerLoadModules(EagerLoadModules),
+ IsStdModuleP1689Format(IsStdModuleP1689Format) {}
void ModuleDepCollector::attachToPreprocessor(Preprocessor &PP) {
PP.addPPCallbacks(std::make_unique<ModuleDepCollectorPP>(*this));
diff --git a/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp
index 668e1072c065..6926bbdf8d0f 100644
--- a/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -521,10 +521,9 @@ void RVVEmitter::createRVVIntrinsics(
StringRef MaskedIRName = R->getValueAsString("MaskedIRName");
unsigned NF = R->getValueAsInt("NF");
- const Policy DefaultPolicy(HasTailPolicy, HasMaskPolicy);
+ const Policy DefaultPolicy;
SmallVector<Policy> SupportedUnMaskedPolicies =
- RVVIntrinsic::getSupportedUnMaskedPolicies(HasTailPolicy,
- HasMaskPolicy);
+ RVVIntrinsic::getSupportedUnMaskedPolicies();
SmallVector<Policy> SupportedMaskedPolicies =
RVVIntrinsic::getSupportedMaskedPolicies(HasTailPolicy, HasMaskPolicy);
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c
index c5d7ae31afce..f5ad530c7e88 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c
@@ -924,6 +924,9 @@ int CONSTRUCTOR_ATTRIBUTE __cpu_indicator_init(void) {
#define HWCAP_SB (1 << 29)
#endif
+#ifndef AT_HWCAP2
+#define AT_HWCAP2 26
+#endif
#ifndef HWCAP2_DCPODP
#define HWCAP2_DCPODP (1 << 0)
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
index 9687f9030294..ff62fc4af430 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
@@ -585,6 +585,7 @@ static void GetTls(uptr *addr, uptr *size) {
*addr = (uptr)tcb->tcb_dtv[1];
}
}
+#else
#error "Unknown OS"
#endif
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
index c647ab107ec5..ac2afe42e269 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
@@ -231,8 +231,6 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
// Check that tool command lines are simple and that complete escaping is
// unnecessary.
CHECK(!internal_strchr(arg, '"') && "quotes in args unsupported");
- CHECK(!internal_strstr(arg, "\\\\") &&
- "double backslashes in args unsupported");
CHECK(arglen > 0 && arg[arglen - 1] != '\\' &&
"args ending in backslash and empty args unsupported");
command_line.append("\"%s\" ", arg);
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/ranges_binary_search.h b/contrib/llvm-project/libcxx/include/__algorithm/ranges_binary_search.h
index b2a8977652fb..d72d4e057401 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/ranges_binary_search.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/ranges_binary_search.h
@@ -36,7 +36,7 @@ struct __fn {
_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr
bool operator()(_Iter __first, _Sent __last, const _Type& __value, _Comp __comp = {}, _Proj __proj = {}) const {
auto __ret = std::__lower_bound_impl<_RangeAlgPolicy>(__first, __last, __value, __comp, __proj);
- return __ret != __last && !std::invoke(__comp, __value, std::invoke(__proj, *__first));
+ return __ret != __last && !std::invoke(__comp, __value, std::invoke(__proj, *__ret));
}
template <forward_range _Range, class _Type, class _Proj = identity,
@@ -46,7 +46,7 @@ struct __fn {
auto __first = ranges::begin(__r);
auto __last = ranges::end(__r);
auto __ret = std::__lower_bound_impl<_RangeAlgPolicy>(__first, __last, __value, __comp, __proj);
- return __ret != __last && !std::invoke(__comp, __value, std::invoke(__proj, *__first));
+ return __ret != __last && !std::invoke(__comp, __value, std::invoke(__proj, *__ret));
}
};
} // namespace __binary_search
diff --git a/contrib/llvm-project/libcxx/include/__algorithm/sort.h b/contrib/llvm-project/libcxx/include/__algorithm/sort.h
index a7d2d55a06f8..a236be0a4daf 100644
--- a/contrib/llvm-project/libcxx/include/__algorithm/sort.h
+++ b/contrib/llvm-project/libcxx/include/__algorithm/sort.h
@@ -11,15 +11,10 @@
#include <__algorithm/comp.h>
#include <__algorithm/comp_ref_type.h>
-#include <__algorithm/iter_swap.h>
#include <__algorithm/iterator_operations.h>
#include <__algorithm/min_element.h>
#include <__algorithm/partial_sort.h>
#include <__algorithm/unwrap_iter.h>
-#include <__assert>
-#include <__bit/blsr.h>
-#include <__bit/countl.h>
-#include <__bit/countr.h>
#include <__config>
#include <__debug>
#include <__debug_utils/randomize_range.h>
@@ -28,10 +23,11 @@
#include <__iterator/iterator_traits.h>
#include <__memory/destruct_n.h>
#include <__memory/unique_ptr.h>
-#include <__type_traits/conditional.h>
#include <__type_traits/is_arithmetic.h>
+#include <__type_traits/is_trivially_copy_assignable.h>
+#include <__type_traits/is_trivially_copy_constructible.h>
#include <__utility/move.h>
-#include <__utility/pair.h>
+#include <bit>
#include <climits>
#include <cstdint>
@@ -132,7 +128,8 @@ template <class _AlgPolicy, class _Compare, class _ForwardIterator>
_LIBCPP_HIDE_FROM_ABI
unsigned __sort4(_ForwardIterator __x1, _ForwardIterator __x2, _ForwardIterator __x3, _ForwardIterator __x4,
_Compare __c) {
- using _Ops = _IterOps<_AlgPolicy>;
+ using _Ops = _IterOps<_AlgPolicy>;
+
unsigned __r = std::__sort3<_AlgPolicy, _Compare>(__x1, __x2, __x3, __c);
if (__c(*__x4, *__x3)) {
_Ops::iter_swap(__x3, __x4);
@@ -187,7 +184,7 @@ _LIBCPP_HIDE_FROM_ABI unsigned __sort5_wrap_policy(
_Compare __c) {
using _WrappedComp = typename _WrapAlgPolicy<_AlgPolicy, _Compare>::type;
_WrappedComp __wrapped_comp(__c);
- return std::__sort5<_WrappedComp, _ForwardIterator>(
+ return std::__sort5<_WrappedComp>(
std::move(__x1), std::move(__x2), std::move(__x3), std::move(__x4), std::move(__x5), __wrapped_comp);
}
@@ -212,13 +209,6 @@ using __use_branchless_sort =
integral_constant<bool, __is_cpp17_contiguous_iterator<_Iter>::value && sizeof(_Tp) <= sizeof(void*) &&
is_arithmetic<_Tp>::value && __is_simple_comparator<_Compare>::value>;
-namespace __detail {
-
-// Size in bits for the bitset in use.
-enum { __block_size = sizeof(uint64_t) * 8 };
-
-} // namespace __detail
-
// Ensures that __c(*__x, *__y) is true by swapping *__x and *__y if necessary.
template <class _Compare, class _RandomAccessIterator>
inline _LIBCPP_HIDE_FROM_ABI void __cond_swap(_RandomAccessIterator __x, _RandomAccessIterator __y, _Compare __c) {
@@ -278,15 +268,10 @@ __sort4_maybe_branchless(_RandomAccessIterator __x1, _RandomAccessIterator __x2,
std::__sort4<_AlgPolicy, _Compare>(__x1, __x2, __x3, __x4, __c);
}
-template <class _AlgPolicy, class _Compare, class _RandomAccessIterator>
+template <class, class _Compare, class _RandomAccessIterator>
inline _LIBCPP_HIDE_FROM_ABI __enable_if_t<__use_branchless_sort<_Compare, _RandomAccessIterator>::value, void>
-__sort5_maybe_branchless(
- _RandomAccessIterator __x1,
- _RandomAccessIterator __x2,
- _RandomAccessIterator __x3,
- _RandomAccessIterator __x4,
- _RandomAccessIterator __x5,
- _Compare __c) {
+__sort5_maybe_branchless(_RandomAccessIterator __x1, _RandomAccessIterator __x2, _RandomAccessIterator __x3,
+ _RandomAccessIterator __x4, _RandomAccessIterator __x5, _Compare __c) {
std::__cond_swap<_Compare>(__x1, __x2, __c);
std::__cond_swap<_Compare>(__x4, __x5, __c);
std::__partially_sorted_swap<_Compare>(__x3, __x4, __x5, __c);
@@ -315,48 +300,34 @@ _LIBCPP_CONSTEXPR_SINCE_CXX14 void __selection_sort(_BidirectionalIterator __fir
}
}
-// Sort the iterator range [__first, __last) using the comparator __comp using
-// the insertion sort algorithm.
template <class _AlgPolicy, class _Compare, class _BidirectionalIterator>
_LIBCPP_HIDE_FROM_ABI
void __insertion_sort(_BidirectionalIterator __first, _BidirectionalIterator __last, _Compare __comp) {
using _Ops = _IterOps<_AlgPolicy>;
typedef typename iterator_traits<_BidirectionalIterator>::value_type value_type;
- if (__first == __last)
- return;
- _BidirectionalIterator __i = __first;
- for (++__i; __i != __last; ++__i) {
- _BidirectionalIterator __j = __i;
- --__j;
- if (__comp(*__i, *__j)) {
- value_type __t(_Ops::__iter_move(__i));
- _BidirectionalIterator __k = __j;
- __j = __i;
- do {
+ if (__first != __last) {
+ _BidirectionalIterator __i = __first;
+ for (++__i; __i != __last; ++__i) {
+ _BidirectionalIterator __j = __i;
+ value_type __t(_Ops::__iter_move(__j));
+ for (_BidirectionalIterator __k = __i; __k != __first && __comp(__t, *--__k); --__j)
*__j = _Ops::__iter_move(__k);
- __j = __k;
- } while (__j != __first && __comp(__t, *--__k));
*__j = std::move(__t);
}
}
}
-// Sort the iterator range [__first, __last) using the comparator __comp using
-// the insertion sort algorithm. Insertion sort has two loops, outer and inner.
-// The implementation below has not bounds check (unguarded) for the inner loop.
-// Assumes that there is an element in the position (__first - 1) and that each
-// element in the input range is greater or equal to the element at __first - 1.
template <class _AlgPolicy, class _Compare, class _RandomAccessIterator>
-_LIBCPP_HIDE_FROM_ABI void
-__insertion_sort_unguarded(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp) {
+_LIBCPP_HIDE_FROM_ABI
+void __insertion_sort_3(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp) {
using _Ops = _IterOps<_AlgPolicy>;
+
typedef typename iterator_traits<_RandomAccessIterator>::difference_type difference_type;
typedef typename iterator_traits<_RandomAccessIterator>::value_type value_type;
- if (__first == __last)
- return;
- for (_RandomAccessIterator __i = __first + difference_type(1); __i != __last; ++__i) {
- _RandomAccessIterator __j = __i - difference_type(1);
+ _RandomAccessIterator __j = __first + difference_type(2);
+ std::__sort3_maybe_branchless<_AlgPolicy, _Compare>(__first, __first + difference_type(1), __j, __comp);
+ for (_RandomAccessIterator __i = __j + difference_type(1); __i != __last; ++__i) {
if (__comp(*__i, *__j)) {
value_type __t(_Ops::__iter_move(__i));
_RandomAccessIterator __k = __j;
@@ -364,9 +335,10 @@ __insertion_sort_unguarded(_RandomAccessIterator __first, _RandomAccessIterator
do {
*__j = _Ops::__iter_move(__k);
__j = __k;
- } while (__comp(__t, *--__k)); // No need for bounds check due to the assumption stated above.
+ } while (__j != __first && __comp(__t, *--__k));
*__j = std::move(__t);
}
+ __j = __i;
}
}
@@ -387,7 +359,7 @@ _LIBCPP_HIDDEN bool __insertion_sort_incomplete(
return true;
case 2:
if (__comp(*--__last, *__first))
- _Ops::iter_swap(__first, __last);
+ _IterOps<_AlgPolicy>::iter_swap(__first, __last);
return true;
case 3:
std::__sort3_maybe_branchless<_AlgPolicy, _Compare>(__first, __first + difference_type(1), --__last, __comp);
@@ -456,336 +428,17 @@ void __insertion_sort_move(_BidirectionalIterator __first1, _BidirectionalIterat
}
}
-template <class _AlgPolicy, class _RandomAccessIterator>
-inline _LIBCPP_HIDE_FROM_ABI void __swap_bitmap_pos(
- _RandomAccessIterator __first, _RandomAccessIterator __last, uint64_t& __left_bitset, uint64_t& __right_bitset) {
- using _Ops = _IterOps<_AlgPolicy>;
- typedef typename std::iterator_traits<_RandomAccessIterator>::difference_type difference_type;
- // Swap one pair on each iteration as long as both bitsets have at least one
- // element for swapping.
- while (__left_bitset != 0 && __right_bitset != 0) {
- difference_type tz_left = __libcpp_ctz(__left_bitset);
- __left_bitset = __libcpp_blsr(__left_bitset);
- difference_type tz_right = __libcpp_ctz(__right_bitset);
- __right_bitset = __libcpp_blsr(__right_bitset);
- _Ops::iter_swap(__first + tz_left, __last - tz_right);
- }
-}
-
-template <class _Compare,
- class _RandomAccessIterator,
- class _ValueType = typename iterator_traits<_RandomAccessIterator>::value_type>
-inline _LIBCPP_HIDE_FROM_ABI void
-__populate_left_bitset(_RandomAccessIterator __first, _Compare __comp, _ValueType& __pivot, uint64_t& __left_bitset) {
- // Possible vectorization. With a proper "-march" flag, the following loop
- // will be compiled into a set of SIMD instructions.
- _RandomAccessIterator __iter = __first;
- for (int __j = 0; __j < __detail::__block_size;) {
- bool __comp_result = !__comp(*__iter, __pivot);
- __left_bitset |= (static_cast<uint64_t>(__comp_result) << __j);
- __j++;
- ++__iter;
- }
-}
-
-template <class _Compare,
- class _RandomAccessIterator,
- class _ValueType = typename iterator_traits<_RandomAccessIterator>::value_type>
-inline _LIBCPP_HIDE_FROM_ABI void
-__populate_right_bitset(_RandomAccessIterator __lm1, _Compare __comp, _ValueType& __pivot, uint64_t& __right_bitset) {
- // Possible vectorization. With a proper "-march" flag, the following loop
- // will be compiled into a set of SIMD instructions.
- _RandomAccessIterator __iter = __lm1;
- for (int __j = 0; __j < __detail::__block_size;) {
- bool __comp_result = __comp(*__iter, __pivot);
- __right_bitset |= (static_cast<uint64_t>(__comp_result) << __j);
- __j++;
- --__iter;
- }
-}
-
-template <class _AlgPolicy,
- class _Compare,
- class _RandomAccessIterator,
- class _ValueType = typename iterator_traits<_RandomAccessIterator>::value_type>
-inline _LIBCPP_HIDE_FROM_ABI void __bitset_partition_partial_blocks(
- _RandomAccessIterator& __first,
- _RandomAccessIterator& __lm1,
- _Compare __comp,
- _ValueType& __pivot,
- uint64_t& __left_bitset,
- uint64_t& __right_bitset) {
- typedef typename std::iterator_traits<_RandomAccessIterator>::difference_type difference_type;
- difference_type __remaining_len = __lm1 - __first + 1;
- difference_type __l_size;
- difference_type __r_size;
- if (__left_bitset == 0 && __right_bitset == 0) {
- __l_size = __remaining_len / 2;
- __r_size = __remaining_len - __l_size;
- } else if (__left_bitset == 0) {
- // We know at least one side is a full block.
- __l_size = __remaining_len - __detail::__block_size;
- __r_size = __detail::__block_size;
- } else { // if (__right_bitset == 0)
- __l_size = __detail::__block_size;
- __r_size = __remaining_len - __detail::__block_size;
- }
- // Record the comparison outcomes for the elements currently on the left side.
- if (__left_bitset == 0) {
- _RandomAccessIterator __iter = __first;
- for (int j = 0; j < __l_size; j++) {
- bool __comp_result = !__comp(*__iter, __pivot);
- __left_bitset |= (static_cast<uint64_t>(__comp_result) << j);
- ++__iter;
- }
- }
- // Record the comparison outcomes for the elements currently on the right
- // side.
- if (__right_bitset == 0) {
- _RandomAccessIterator __iter = __lm1;
- for (int j = 0; j < __r_size; j++) {
- bool __comp_result = __comp(*__iter, __pivot);
- __right_bitset |= (static_cast<uint64_t>(__comp_result) << j);
- --__iter;
- }
- }
- std::__swap_bitmap_pos<_AlgPolicy, _RandomAccessIterator>(__first, __lm1, __left_bitset, __right_bitset);
- __first += (__left_bitset == 0) ? __l_size : 0;
- __lm1 -= (__right_bitset == 0) ? __r_size : 0;
-}
-
-template <class _AlgPolicy, class _RandomAccessIterator>
-inline _LIBCPP_HIDE_FROM_ABI void __swap_bitmap_pos_within(
- _RandomAccessIterator& __first, _RandomAccessIterator& __lm1, uint64_t& __left_bitset, uint64_t& __right_bitset) {
- using _Ops = _IterOps<_AlgPolicy>;
- typedef typename std::iterator_traits<_RandomAccessIterator>::difference_type difference_type;
- if (__left_bitset) {
- // Swap within the left side. Need to find set positions in the reverse
- // order.
- while (__left_bitset != 0) {
- difference_type __tz_left = __detail::__block_size - 1 - __libcpp_clz(__left_bitset);
- __left_bitset &= (static_cast<uint64_t>(1) << __tz_left) - 1;
- _RandomAccessIterator it = __first + __tz_left;
- if (it != __lm1) {
- _Ops::iter_swap(it, __lm1);
- }
- --__lm1;
- }
- __first = __lm1 + difference_type(1);
- } else if (__right_bitset) {
- // Swap within the right side. Need to find set positions in the reverse
- // order.
- while (__right_bitset != 0) {
- difference_type __tz_right = __detail::__block_size - 1 - __libcpp_clz(__right_bitset);
- __right_bitset &= (static_cast<uint64_t>(1) << __tz_right) - 1;
- _RandomAccessIterator it = __lm1 - __tz_right;
- if (it != __first) {
- _Ops::iter_swap(it, __first);
- }
- ++__first;
- }
- }
-}
-
-// Partition [__first, __last) using the comparator __comp. *__first has the
-// chosen pivot. Elements that are equivalent are kept to the left of the
-// pivot. Returns the iterator for the pivot and a bool value which is true if
-// the provided range is already sorted, false otherwise. We assume that the
-// length of the range is at least three elements.
-//
-// __bitset_partition uses bitsets for storing outcomes of the comparisons
-// between the pivot and other elements.
-template <class _AlgPolicy, class _RandomAccessIterator, class _Compare>
-_LIBCPP_HIDE_FROM_ABI std::pair<_RandomAccessIterator, bool>
-__bitset_partition(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp) {
- using _Ops = _IterOps<_AlgPolicy>;
- typedef typename std::iterator_traits<_RandomAccessIterator>::value_type value_type;
- typedef typename std::iterator_traits<_RandomAccessIterator>::difference_type difference_type;
- _LIBCPP_ASSERT(__last - __first >= difference_type(3), "");
-
- _RandomAccessIterator __begin = __first;
- value_type __pivot(_Ops::__iter_move(__first));
- // Find the first element greater than the pivot.
- if (__comp(__pivot, *(__last - difference_type(1)))) {
- // Not guarded since we know the last element is greater than the pivot.
- while (!__comp(__pivot, *++__first)) {
- }
- } else {
- while (++__first < __last && !__comp(__pivot, *__first)) {
- }
- }
- // Find the last element less than or equal to the pivot.
- if (__first < __last) {
- // It will be always guarded because __introsort will do the median-of-three
- // before calling this.
- while (__comp(__pivot, *--__last)) {
- }
- }
- // If the first element greater than the pivot is at or after the
- // last element less than or equal to the pivot, then we have covered the
- // entire range without swapping elements. This implies the range is already
- // partitioned.
- bool __already_partitioned = __first >= __last;
- if (!__already_partitioned) {
- _Ops::iter_swap(__first, __last);
- ++__first;
- }
-
- // In [__first, __last) __last is not inclusive. From now on, it uses last
- // minus one to be inclusive on both sides.
- _RandomAccessIterator __lm1 = __last - difference_type(1);
- uint64_t __left_bitset = 0;
- uint64_t __right_bitset = 0;
-
- // Reminder: length = __lm1 - __first + 1.
- while (__lm1 - __first >= 2 * __detail::__block_size - 1) {
- // Record the comparison outcomes for the elements currently on the left
- // side.
- if (__left_bitset == 0)
- std::__populate_left_bitset<_Compare>(__first, __comp, __pivot, __left_bitset);
- // Record the comparison outcomes for the elements currently on the right
- // side.
- if (__right_bitset == 0)
- std::__populate_right_bitset<_Compare>(__lm1, __comp, __pivot, __right_bitset);
- // Swap the elements recorded to be the candidates for swapping in the
- // bitsets.
- std::__swap_bitmap_pos<_AlgPolicy, _RandomAccessIterator>(__first, __lm1, __left_bitset, __right_bitset);
- // Only advance the iterator if all the elements that need to be moved to
- // other side were moved.
- __first += (__left_bitset == 0) ? difference_type(__detail::__block_size) : difference_type(0);
- __lm1 -= (__right_bitset == 0) ? difference_type(__detail::__block_size) : difference_type(0);
- }
- // Now, we have a less-than a block worth of elements on at least one of the
- // sides.
- std::__bitset_partition_partial_blocks<_AlgPolicy, _Compare>(
- __first, __lm1, __comp, __pivot, __left_bitset, __right_bitset);
- // At least one the bitsets would be empty. For the non-empty one, we need to
- // properly partition the elements that appear within that bitset.
- std::__swap_bitmap_pos_within<_AlgPolicy>(__first, __lm1, __left_bitset, __right_bitset);
-
- // Move the pivot to its correct position.
- _RandomAccessIterator __pivot_pos = __first - difference_type(1);
- if (__begin != __pivot_pos) {
- *__begin = _Ops::__iter_move(__pivot_pos);
- }
- *__pivot_pos = std::move(__pivot);
- return std::make_pair(__pivot_pos, __already_partitioned);
-}
-
-// Partition [__first, __last) using the comparator __comp. *__first has the
-// chosen pivot. Elements that are equivalent are kept to the right of the
-// pivot. Returns the iterator for the pivot and a bool value which is true if
-// the provided range is already sorted, false otherwise. We assume that the
-// length of the range is at least three elements.
-template <class _AlgPolicy, class _RandomAccessIterator, class _Compare>
-_LIBCPP_HIDE_FROM_ABI std::pair<_RandomAccessIterator, bool>
-__partition_with_equals_on_right(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp) {
- using _Ops = _IterOps<_AlgPolicy>;
- typedef typename iterator_traits<_RandomAccessIterator>::difference_type difference_type;
- typedef typename std::iterator_traits<_RandomAccessIterator>::value_type value_type;
- _LIBCPP_ASSERT(__last - __first >= difference_type(3), "");
- _RandomAccessIterator __begin = __first;
- value_type __pivot(_Ops::__iter_move(__first));
- // Find the first element greater or equal to the pivot. It will be always
- // guarded because __introsort will do the median-of-three before calling
- // this.
- while (__comp(*++__first, __pivot))
- ;
-
- // Find the last element less than the pivot.
- if (__begin == __first - difference_type(1)) {
- while (__first < __last && !__comp(*--__last, __pivot))
- ;
- } else {
- // Guarded.
- while (!__comp(*--__last, __pivot))
- ;
- }
-
- // If the first element greater than or equal to the pivot is at or after the
- // last element less than the pivot, then we have covered the entire range
- // without swapping elements. This implies the range is already partitioned.
- bool __already_partitioned = __first >= __last;
- // Go through the remaining elements. Swap pairs of elements (one to the
- // right of the pivot and the other to left of the pivot) that are not on the
- // correct side of the pivot.
- while (__first < __last) {
- _Ops::iter_swap(__first, __last);
- while (__comp(*++__first, __pivot))
- ;
- while (!__comp(*--__last, __pivot))
- ;
- }
- // Move the pivot to its correct position.
- _RandomAccessIterator __pivot_pos = __first - difference_type(1);
- if (__begin != __pivot_pos) {
- *__begin = _Ops::__iter_move(__pivot_pos);
- }
- *__pivot_pos = std::move(__pivot);
- return std::make_pair(__pivot_pos, __already_partitioned);
-}
-
-// Similar to the above function. Elements equivalent to the pivot are put to
-// the left of the pivot. Returns the iterator to the pivot element.
-template <class _AlgPolicy, class _RandomAccessIterator, class _Compare>
-_LIBCPP_HIDE_FROM_ABI _RandomAccessIterator
-__partition_with_equals_on_left(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp) {
+template <class _AlgPolicy, class _Compare, class _RandomAccessIterator>
+void __introsort(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp,
+ typename iterator_traits<_RandomAccessIterator>::difference_type __depth) {
using _Ops = _IterOps<_AlgPolicy>;
- typedef typename iterator_traits<_RandomAccessIterator>::difference_type difference_type;
- typedef typename std::iterator_traits<_RandomAccessIterator>::value_type value_type;
- _RandomAccessIterator __begin = __first;
- value_type __pivot(_Ops::__iter_move(__first));
- if (__comp(__pivot, *(__last - difference_type(1)))) {
- // Guarded.
- while (!__comp(__pivot, *++__first)) {
- }
- } else {
- while (++__first < __last && !__comp(__pivot, *__first)) {
- }
- }
-
- if (__first < __last) {
- // It will be always guarded because __introsort will do the
- // median-of-three before calling this.
- while (__comp(__pivot, *--__last)) {
- }
- }
- while (__first < __last) {
- _Ops::iter_swap(__first, __last);
- while (!__comp(__pivot, *++__first))
- ;
- while (__comp(__pivot, *--__last))
- ;
- }
- _RandomAccessIterator __pivot_pos = __first - difference_type(1);
- if (__begin != __pivot_pos) {
- *__begin = _Ops::__iter_move(__pivot_pos);
- }
- *__pivot_pos = std::move(__pivot);
- return __first;
-}
-// The main sorting function. Implements introsort combined with other ideas:
-// - option of using block quick sort for partitioning,
-// - guarded and unguarded insertion sort for small lengths,
-// - Tuckey's ninther technique for computing the pivot,
-// - check on whether partition was not required.
-// The implementation is partly based on Orson Peters' pattern-defeating
-// quicksort, published at: <https://github.com/orlp/pdqsort>.
-template <class _AlgPolicy, class _Compare, class _RandomAccessIterator, bool _UseBitSetPartition>
-void __introsort(_RandomAccessIterator __first,
- _RandomAccessIterator __last,
- _Compare __comp,
- typename iterator_traits<_RandomAccessIterator>::difference_type __depth,
- bool __leftmost = true) {
- using _Ops = _IterOps<_AlgPolicy>;
typedef typename iterator_traits<_RandomAccessIterator>::difference_type difference_type;
- using _Comp_ref = __comp_ref_type<_Compare>;
- // Upper bound for using insertion sort for sorting.
- _LIBCPP_CONSTEXPR difference_type __limit = 24;
- // Lower bound for using Tuckey's ninther technique for median computation.
- _LIBCPP_CONSTEXPR difference_type __ninther_threshold = 128;
+ typedef typename iterator_traits<_RandomAccessIterator>::value_type value_type;
+ const difference_type __limit =
+ is_trivially_copy_constructible<value_type>::value && is_trivially_copy_assignable<value_type>::value ? 30 : 6;
while (true) {
+ __restart:
difference_type __len = __last - __first;
switch (__len) {
case 0:
@@ -793,7 +446,7 @@ void __introsort(_RandomAccessIterator __first,
return;
case 2:
if (__comp(*--__last, *__first))
- _Ops::iter_swap(__first, __last);
+ _IterOps<_AlgPolicy>::iter_swap(__first, __last);
return;
case 3:
std::__sort3_maybe_branchless<_AlgPolicy, _Compare>(__first, __first + difference_type(1), --__last, __comp);
@@ -808,60 +461,127 @@ void __introsort(_RandomAccessIterator __first,
--__last, __comp);
return;
}
- // Use insertion sort if the length of the range is below the specified limit.
- if (__len < __limit) {
- if (__leftmost) {
- std::__insertion_sort<_AlgPolicy, _Compare>(__first, __last, __comp);
- } else {
- std::__insertion_sort_unguarded<_AlgPolicy, _Compare>(__first, __last, __comp);
- }
+ if (__len <= __limit) {
+ std::__insertion_sort_3<_AlgPolicy, _Compare>(__first, __last, __comp);
return;
}
+ // __len > 5
if (__depth == 0) {
// Fallback to heap sort as Introsort suggests.
std::__partial_sort<_AlgPolicy, _Compare>(__first, __last, __last, __comp);
return;
}
--__depth;
+ _RandomAccessIterator __m = __first;
+ _RandomAccessIterator __lm1 = __last;
+ --__lm1;
+ unsigned __n_swaps;
{
- difference_type __half_len = __len / 2;
- // Use Tuckey's ninther technique or median of 3 for pivot selection
- // depending on the length of the range being sorted.
- if (__len > __ninther_threshold) {
- std::__sort3<_AlgPolicy, _Compare>(__first, __first + __half_len, __last - difference_type(1), __comp);
- std::__sort3<_AlgPolicy, _Compare>(
- __first + difference_type(1), __first + (__half_len - 1), __last - difference_type(2), __comp);
- std::__sort3<_AlgPolicy, _Compare>(
- __first + difference_type(2), __first + (__half_len + 1), __last - difference_type(3), __comp);
- std::__sort3<_AlgPolicy, _Compare>(
- __first + (__half_len - 1), __first + __half_len, __first + (__half_len + 1), __comp);
- _Ops::iter_swap(__first, __first + __half_len);
+ difference_type __delta;
+ if (__len >= 1000) {
+ __delta = __len / 2;
+ __m += __delta;
+ __delta /= 2;
+ __n_swaps = std::__sort5_wrap_policy<_AlgPolicy, _Compare>(
+ __first, __first + __delta, __m, __m + __delta, __lm1, __comp);
} else {
- std::__sort3<_AlgPolicy, _Compare>(__first + __half_len, __first, __last - difference_type(1), __comp);
+ __delta = __len / 2;
+ __m += __delta;
+ __n_swaps = std::__sort3<_AlgPolicy, _Compare>(__first, __m, __lm1, __comp);
}
}
- // The elements to the left of the current iterator range are already
- // sorted. If the current iterator range to be sorted is not the
- // leftmost part of the entire iterator range and the pivot is same as
- // the highest element in the range to the left, then we know that all
- // the elements in the range [first, pivot] would be equal to the pivot,
- // assuming the equal elements are put on the left side when
- // partitioned. This also means that we do not need to sort the left
- // side of the partition.
- if (!__leftmost && !__comp(*(__first - difference_type(1)), *__first)) {
- __first = std::__partition_with_equals_on_left<_AlgPolicy, _RandomAccessIterator, _Comp_ref>(
- __first, __last, _Comp_ref(__comp));
- continue;
+ // *__m is median
+ // partition [__first, __m) < *__m and *__m <= [__m, __last)
+ // (this inhibits tossing elements equivalent to __m around unnecessarily)
+ _RandomAccessIterator __i = __first;
+ _RandomAccessIterator __j = __lm1;
+ // j points beyond range to be tested, *__m is known to be <= *__lm1
+ // The search going up is known to be guarded but the search coming down isn't.
+ // Prime the downward search with a guard.
+ if (!__comp(*__i, *__m)) // if *__first == *__m
+ {
+ // *__first == *__m, *__first doesn't go in first part
+ // manually guard downward moving __j against __i
+ while (true) {
+ if (__i == --__j) {
+ // *__first == *__m, *__m <= all other elements
+ // Parition instead into [__first, __i) == *__first and *__first < [__i, __last)
+ ++__i; // __first + 1
+ __j = __last;
+ if (!__comp(*__first, *--__j)) // we need a guard if *__first == *(__last-1)
+ {
+ while (true) {
+ if (__i == __j)
+ return; // [__first, __last) all equivalent elements
+ if (__comp(*__first, *__i)) {
+ _Ops::iter_swap(__i, __j);
+ ++__n_swaps;
+ ++__i;
+ break;
+ }
+ ++__i;
+ }
+ }
+ // [__first, __i) == *__first and *__first < [__j, __last) and __j == __last - 1
+ if (__i == __j)
+ return;
+ while (true) {
+ while (!__comp(*__first, *__i))
+ ++__i;
+ while (__comp(*__first, *--__j))
+ ;
+ if (__i >= __j)
+ break;
+ _Ops::iter_swap(__i, __j);
+ ++__n_swaps;
+ ++__i;
+ }
+ // [__first, __i) == *__first and *__first < [__i, __last)
+ // The first part is sorted, sort the second part
+ // std::__sort<_Compare>(__i, __last, __comp);
+ __first = __i;
+ goto __restart;
+ }
+ if (__comp(*__j, *__m)) {
+ _Ops::iter_swap(__i, __j);
+ ++__n_swaps;
+ break; // found guard for downward moving __j, now use unguarded partition
+ }
+ }
+ }
+ // It is known that *__i < *__m
+ ++__i;
+ // j points beyond range to be tested, *__m is known to be <= *__lm1
+ // if not yet partitioned...
+ if (__i < __j) {
+ // known that *(__i - 1) < *__m
+ // known that __i <= __m
+ while (true) {
+ // __m still guards upward moving __i
+ while (__comp(*__i, *__m))
+ ++__i;
+ // It is now known that a guard exists for downward moving __j
+ while (!__comp(*--__j, *__m))
+ ;
+ if (__i > __j)
+ break;
+ _Ops::iter_swap(__i, __j);
+ ++__n_swaps;
+ // It is known that __m != __j
+ // If __m just moved, follow it
+ if (__m == __i)
+ __m = __j;
+ ++__i;
+ }
+ }
+ // [__first, __i) < *__m and *__m <= [__i, __last)
+ if (__i != __m && __comp(*__m, *__i)) {
+ _Ops::iter_swap(__i, __m);
+ ++__n_swaps;
}
- // Use bitset partition only if asked for.
- auto __ret =
- _UseBitSetPartition
- ? std::__bitset_partition<_AlgPolicy, _RandomAccessIterator, _Compare>(__first, __last, __comp)
- : std::__partition_with_equals_on_right<_AlgPolicy, _RandomAccessIterator, _Compare>(__first, __last, __comp);
- _RandomAccessIterator __i = __ret.first;
// [__first, __i) < *__i and *__i <= [__i+1, __last)
// If we were given a perfect partition, see if insertion sort is quick...
- if (__ret.second) {
+ if (__n_swaps == 0) {
using _WrappedComp = typename _WrapAlgPolicy<_AlgPolicy, _Compare>::type;
_WrappedComp __wrapped_comp(__comp);
bool __fs = std::__insertion_sort_incomplete<_WrappedComp>(__first, __i, __wrapped_comp);
@@ -877,11 +597,14 @@ void __introsort(_RandomAccessIterator __first,
}
}
}
- // Sort the left partiton recursively and the right partition with tail recursion elimination.
- std::__introsort<_AlgPolicy, _Compare, _RandomAccessIterator, _UseBitSetPartition>(
- __first, __i, __comp, __depth, __leftmost);
- __leftmost = false;
- __first = ++__i;
+ // sort smaller range with recursive call and larger with tail recursion elimination
+ if (__i - __first < __last - __i) {
+ std::__introsort<_AlgPolicy, _Compare>(__first, __i, __comp, __depth);
+ __first = ++__i;
+ } else {
+ std::__introsort<_AlgPolicy, _Compare>(__i + difference_type(1), __last, __comp, __depth);
+ __last = __i;
+ }
}
}
@@ -913,14 +636,7 @@ _LIBCPP_HIDDEN void __sort(_RandomAccessIterator __first, _RandomAccessIterator
using _AlgPolicy = typename _Unwrap::_AlgPolicy;
using _Compare = typename _Unwrap::_Comp;
_Compare __comp = _Unwrap::__get_comp(__wrapped_comp);
- // Only use bitset partitioning for arithmetic types. We should also check
- // that the default comparator is in use so that we are sure that there are no
- // branches in the comparator.
- std::__introsort<_AlgPolicy,
- _Compare,
- _RandomAccessIterator,
- __use_branchless_sort<_Compare, _RandomAccessIterator>::value>(
- __first, __last, __comp, __depth_limit);
+ std::__introsort<_AlgPolicy, _Compare>(__first, __last, __comp, __depth_limit);
}
template <class _Compare, class _Tp>
diff --git a/contrib/llvm-project/libcxx/include/__config b/contrib/llvm-project/libcxx/include/__config
index b9203e640a3e..581ada45b3f0 100644
--- a/contrib/llvm-project/libcxx/include/__config
+++ b/contrib/llvm-project/libcxx/include/__config
@@ -23,6 +23,7 @@
#endif
#if defined(__apple_build_version__)
+// Given AppleClang XX.Y.Z, _LIBCPP_APPLE_CLANG_VER is XXYZ (e.g. AppleClang 14.0.3 => 1403)
# define _LIBCPP_COMPILER_CLANG_BASED
# define _LIBCPP_APPLE_CLANG_VER (__apple_build_version__ / 10000)
#elif defined(__clang__)
@@ -37,7 +38,7 @@
// _LIBCPP_VERSION represents the version of libc++, which matches the version of LLVM.
// Given a LLVM release LLVM XX.YY.ZZ (e.g. LLVM 16.0.1 == 16.00.01), _LIBCPP_VERSION is
// defined to XXYYZZ.
-# define _LIBCPP_VERSION 160000
+# define _LIBCPP_VERSION 160001
# define _LIBCPP_CONCAT_IMPL(_X, _Y) _X##_Y
# define _LIBCPP_CONCAT(_X, _Y) _LIBCPP_CONCAT_IMPL(_X, _Y)
@@ -134,6 +135,15 @@
# define _LIBCPP_ABI_DO_NOT_EXPORT_VECTOR_BASE_COMMON
// According to the Standard, `bitset::operator[] const` returns bool
# define _LIBCPP_ABI_BITSET_VECTOR_BOOL_CONST_SUBSCRIPT_RETURN_BOOL
+// Fix the implementation of CityHash used for std::hash<fundamental-type>.
+// This is an ABI break because `std::hash` will return a different result,
+// which means that hashing the same object in translation units built against
+// different versions of libc++ can return inconsistent results. This is especially
+// tricky since std::hash is used in the implementation of unordered containers.
+//
+// The incorrect implementation of CityHash has the problem that it drops some
+// bits on the floor.
+# define _LIBCPP_ABI_FIX_CITYHASH_IMPLEMENTATION
// Remove the base 10 implementation of std::to_chars from the dylib.
// The implementation moved to the header, but we still export the symbols from
// the dylib for backwards compatibility.
@@ -629,7 +639,11 @@ typedef __char32_t char32_t;
# else
# define _LIBCPP_HIDE_FROM_ABI _LIBCPP_HIDDEN _LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION
# endif
-# define _LIBCPP_HIDE_FROM_ABI_VIRTUAL _LIBCPP_HIDDEN _LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION
+# define _LIBCPP_HIDE_FROM_ABI_VIRTUAL _LIBCPP_HIDDEN _LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION
+
+// This macro provides a HIDE_FROM_ABI equivalent that can be applied to extern
+// "C" function, as those lack mangling.
+# define _LIBCPP_HIDE_FROM_ABI_C _LIBCPP_HIDDEN _LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION
# ifdef _LIBCPP_BUILDING_LIBRARY
# if _LIBCPP_ABI_VERSION > 1
@@ -891,7 +905,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD _LIBCPP_END_NAMESPACE_STD
// Try to find out if RTTI is disabled.
# if !defined(__cpp_rtti) || __cpp_rtti < 199711L
-# define _LIBCPP_NO_RTTI
+# define _LIBCPP_HAS_NO_RTTI
# endif
# ifndef _LIBCPP_WEAK
@@ -1223,12 +1237,12 @@ _LIBCPP_BEGIN_NAMESPACE_STD _LIBCPP_END_NAMESPACE_STD
// functions are declared by the C library.
# define _LIBCPP_HAS_NO_C8RTOMB_MBRTOC8
// GNU libc 2.36 and newer declare c8rtomb() and mbrtoc8() in C++ modes if
-// __cpp_char8_t is defined or if C2X extensions are enabled. Unfortunately,
-// determining the latter depends on internal GNU libc details. If the
-// __cpp_char8_t feature test macro is not defined, then a char8_t typedef
-// will be declared as well.
-# if defined(_LIBCPP_GLIBC_PREREQ) && defined(__GLIBC_USE)
-# if _LIBCPP_GLIBC_PREREQ(2, 36) && (defined(__cpp_char8_t) || __GLIBC_USE(ISOC2X))
+// __cpp_char8_t is defined or if C2X extensions are enabled. Determining
+// the latter depends on internal GNU libc details that are not appropriate
+// to depend on here, so any declarations present when __cpp_char8_t is not
+// defined are ignored.
+# if defined(_LIBCPP_GLIBC_PREREQ)
+# if _LIBCPP_GLIBC_PREREQ(2, 36) && defined(__cpp_char8_t)
# undef _LIBCPP_HAS_NO_C8RTOMB_MBRTOC8
# endif
# endif
@@ -1246,6 +1260,12 @@ _LIBCPP_BEGIN_NAMESPACE_STD _LIBCPP_END_NAMESPACE_STD
# define _LIBCPP_CTAD_SUPPORTED_FOR_TYPE(_ClassName) static_assert(true, "")
#endif
+// TODO(varconst): currently, there are bugs in Clang's intrinsics when handling Objective-C++ `id`, so don't use
+// compiler intrinsics in the Objective-C++ mode.
+# ifdef __OBJC__
+# define _LIBCPP_WORKAROUND_OBJCXX_COMPILER_INTRINSICS
+# endif
+
#endif // __cplusplus
#endif // _LIBCPP___CONFIG
diff --git a/contrib/llvm-project/libcxx/include/__expected/expected.h b/contrib/llvm-project/libcxx/include/__expected/expected.h
index e1f590c65efe..ca3e8a59922d 100644
--- a/contrib/llvm-project/libcxx/include/__expected/expected.h
+++ b/contrib/llvm-project/libcxx/include/__expected/expected.h
@@ -292,7 +292,8 @@ private:
"be reverted to the previous state in case an exception is thrown during the assignment.");
_T2 __tmp(std::move(__oldval));
std::destroy_at(std::addressof(__oldval));
- __exception_guard __trans([&] { std::construct_at(std::addressof(__oldval), std::move(__tmp)); });
+ auto __trans =
+ std::__make_exception_guard([&] { std::construct_at(std::addressof(__oldval), std::move(__tmp)); });
std::construct_at(std::addressof(__newval), std::forward<_Args>(__args)...);
__trans.__complete();
}
@@ -451,7 +452,7 @@ public:
if constexpr (is_nothrow_move_constructible_v<_Err>) {
_Err __tmp(std::move(__with_err.__union_.__unex_));
std::destroy_at(std::addressof(__with_err.__union_.__unex_));
- __exception_guard __trans([&] {
+ auto __trans = std::__make_exception_guard([&] {
std::construct_at(std::addressof(__with_err.__union_.__unex_), std::move(__tmp));
});
std::construct_at(std::addressof(__with_err.__union_.__val_), std::move(__with_val.__union_.__val_));
@@ -464,7 +465,7 @@ public:
"that it can be reverted to the previous state in case an exception is thrown during swap.");
_Tp __tmp(std::move(__with_val.__union_.__val_));
std::destroy_at(std::addressof(__with_val.__union_.__val_));
- __exception_guard __trans([&] {
+ auto __trans = std::__make_exception_guard([&] {
std::construct_at(std::addressof(__with_val.__union_.__val_), std::move(__tmp));
});
std::construct_at(std::addressof(__with_val.__union_.__unex_), std::move(__with_err.__union_.__unex_));
diff --git a/contrib/llvm-project/libcxx/include/__format/concepts.h b/contrib/llvm-project/libcxx/include/__format/concepts.h
index fe4a7b9625ce..ba8d8e316226 100644
--- a/contrib/llvm-project/libcxx/include/__format/concepts.h
+++ b/contrib/llvm-project/libcxx/include/__format/concepts.h
@@ -66,9 +66,8 @@ concept formattable = __formattable<_Tp, _CharT>;
// TODO FMT Add a test to validate we fail when using that concept after P2165
// has been implemented.
template <class _Tp>
-concept __fmt_pair_like = __is_specialization_v<_Tp, pair> ||
- // Use a requires since tuple_size_v may fail to instantiate,
- (__is_specialization_v<_Tp, tuple> && requires { tuple_size_v<_Tp> == 2; });
+concept __fmt_pair_like =
+ __is_specialization_v<_Tp, pair> || (__is_specialization_v<_Tp, tuple> && tuple_size_v<_Tp> == 2);
# endif //_LIBCPP_STD_VER > 20
#endif //_LIBCPP_STD_VER > 17
diff --git a/contrib/llvm-project/libcxx/include/__format/format_functions.h b/contrib/llvm-project/libcxx/include/__format/format_functions.h
index 185148ccba53..0f0001272d40 100644
--- a/contrib/llvm-project/libcxx/include/__format/format_functions.h
+++ b/contrib/llvm-project/libcxx/include/__format/format_functions.h
@@ -258,10 +258,12 @@ __handle_replacement_field(const _CharT* __begin, const _CharT* __end,
if constexpr (same_as<_Ctx, __compile_time_basic_format_context<_CharT>>) {
__arg_t __type = __ctx.arg(__r.__value);
- if (__type == __arg_t::__handle)
+ if (__type == __arg_t::__none)
+ std::__throw_format_error("Argument index out of bounds");
+ else if (__type == __arg_t::__handle)
__ctx.__handle(__r.__value).__parse(__parse_ctx);
- else
- __format::__compile_time_visit_format_arg(__parse_ctx, __ctx, __type);
+ else if (__parse)
+ __format::__compile_time_visit_format_arg(__parse_ctx, __ctx, __type);
} else
_VSTD::__visit_format_arg(
[&](auto __arg) {
diff --git a/contrib/llvm-project/libcxx/include/__format/formatter_floating_point.h b/contrib/llvm-project/libcxx/include/__format/formatter_floating_point.h
index a544b53f7e6d..ca065723e198 100644
--- a/contrib/llvm-project/libcxx/include/__format/formatter_floating_point.h
+++ b/contrib/llvm-project/libcxx/include/__format/formatter_floating_point.h
@@ -404,7 +404,6 @@ _LIBCPP_HIDE_FROM_ABI __float_result __format_buffer_general_lower_case(__float_
// In fixed mode the algorithm truncates trailing spaces and possibly the
// radix point. There's no good guess for the position of the radix point
// therefore scan the output after the first digit.
-
__result.__radix_point = _VSTD::find(__first, __result.__last, '.');
}
}
@@ -665,7 +664,7 @@ __format_floating_point(_Tp __value, auto& __ctx, __format_spec::__parsed_specif
if (__result.__exponent == __result.__last)
// if P > X >= -4, the conversion is with style f or F and precision P - 1 - X.
// By including the radix point it calculates P - (1 + X)
- __p -= __result.__radix_point - __buffer.begin();
+ __p -= __result.__radix_point - __result.__integral;
else
// otherwise, the conversion is with style e or E and precision P - 1.
--__p;
diff --git a/contrib/llvm-project/libcxx/include/__functional/function.h b/contrib/llvm-project/libcxx/include/__functional/function.h
index ca79d334a028..9f92f6181468 100644
--- a/contrib/llvm-project/libcxx/include/__functional/function.h
+++ b/contrib/llvm-project/libcxx/include/__functional/function.h
@@ -268,10 +268,10 @@ public:
virtual void destroy() _NOEXCEPT = 0;
virtual void destroy_deallocate() _NOEXCEPT = 0;
virtual _Rp operator()(_ArgTypes&& ...) = 0;
-#ifndef _LIBCPP_NO_RTTI
+#ifndef _LIBCPP_HAS_NO_RTTI
virtual const void* target(const type_info&) const _NOEXCEPT = 0;
virtual const std::type_info& target_type() const _NOEXCEPT = 0;
-#endif // _LIBCPP_NO_RTTI
+#endif // _LIBCPP_HAS_NO_RTTI
};
// __func implements __base for a given functor type.
@@ -305,10 +305,10 @@ public:
virtual void destroy() _NOEXCEPT;
virtual void destroy_deallocate() _NOEXCEPT;
virtual _Rp operator()(_ArgTypes&&... __arg);
-#ifndef _LIBCPP_NO_RTTI
+#ifndef _LIBCPP_HAS_NO_RTTI
virtual const void* target(const type_info&) const _NOEXCEPT;
virtual const std::type_info& target_type() const _NOEXCEPT;
-#endif // _LIBCPP_NO_RTTI
+#endif // _LIBCPP_HAS_NO_RTTI
};
template<class _Fp, class _Alloc, class _Rp, class ..._ArgTypes>
@@ -356,7 +356,7 @@ __func<_Fp, _Alloc, _Rp(_ArgTypes...)>::operator()(_ArgTypes&& ... __arg)
return __f_(_VSTD::forward<_ArgTypes>(__arg)...);
}
-#ifndef _LIBCPP_NO_RTTI
+#ifndef _LIBCPP_HAS_NO_RTTI
template<class _Fp, class _Alloc, class _Rp, class ..._ArgTypes>
const void*
@@ -374,7 +374,7 @@ __func<_Fp, _Alloc, _Rp(_ArgTypes...)>::target_type() const _NOEXCEPT
return typeid(_Fp);
}
-#endif // _LIBCPP_NO_RTTI
+#endif // _LIBCPP_HAS_NO_RTTI
// __value_func creates a value-type from a __func.
@@ -553,7 +553,7 @@ template <class _Rp, class... _ArgTypes> class __value_func<_Rp(_ArgTypes...)>
_LIBCPP_INLINE_VISIBILITY
explicit operator bool() const _NOEXCEPT { return __f_ != nullptr; }
-#ifndef _LIBCPP_NO_RTTI
+#ifndef _LIBCPP_HAS_NO_RTTI
_LIBCPP_INLINE_VISIBILITY
const std::type_info& target_type() const _NOEXCEPT
{
@@ -569,7 +569,7 @@ template <class _Rp, class... _ArgTypes> class __value_func<_Rp(_ArgTypes...)>
return nullptr;
return (const _Tp*)__f_->target(typeid(_Tp));
}
-#endif // _LIBCPP_NO_RTTI
+#endif // _LIBCPP_HAS_NO_RTTI
};
// Storage for a functor object, to be used with __policy to manage copy and
@@ -616,7 +616,7 @@ struct __policy
{
static const _LIBCPP_CONSTEXPR __policy __policy_ = {nullptr, nullptr,
true,
-#ifndef _LIBCPP_NO_RTTI
+#ifndef _LIBCPP_HAS_NO_RTTI
&typeid(void)
#else
nullptr
@@ -642,7 +642,7 @@ struct __policy
__choose_policy(/* is_small = */ false_type) {
static const _LIBCPP_CONSTEXPR __policy __policy_ = {
&__large_clone<_Fun>, &__large_destroy<_Fun>, false,
-#ifndef _LIBCPP_NO_RTTI
+#ifndef _LIBCPP_HAS_NO_RTTI
&typeid(typename _Fun::_Target)
#else
nullptr
@@ -657,7 +657,7 @@ struct __policy
{
static const _LIBCPP_CONSTEXPR __policy __policy_ = {
nullptr, nullptr, false,
-#ifndef _LIBCPP_NO_RTTI
+#ifndef _LIBCPP_HAS_NO_RTTI
&typeid(typename _Fun::_Target)
#else
nullptr
@@ -861,7 +861,7 @@ template <class _Rp, class... _ArgTypes> class __policy_func<_Rp(_ArgTypes...)>
return !__policy_->__is_null;
}
-#ifndef _LIBCPP_NO_RTTI
+#ifndef _LIBCPP_HAS_NO_RTTI
_LIBCPP_INLINE_VISIBILITY
const std::type_info& target_type() const _NOEXCEPT
{
@@ -878,7 +878,7 @@ template <class _Rp, class... _ArgTypes> class __policy_func<_Rp(_ArgTypes...)>
else
return reinterpret_cast<const _Tp*>(&__buf_.__small);
}
-#endif // _LIBCPP_NO_RTTI
+#endif // _LIBCPP_HAS_NO_RTTI
};
#if defined(_LIBCPP_HAS_BLOCKS_RUNTIME)
@@ -945,7 +945,7 @@ public:
return _VSTD::__invoke(__f_, _VSTD::forward<_ArgTypes>(__arg)...);
}
-#ifndef _LIBCPP_NO_RTTI
+#ifndef _LIBCPP_HAS_NO_RTTI
virtual const void* target(type_info const& __ti) const _NOEXCEPT {
if (__ti == typeid(__func::__block_type))
return &__f_;
@@ -955,7 +955,7 @@ public:
virtual const std::type_info& target_type() const _NOEXCEPT {
return typeid(__func::__block_type);
}
-#endif // _LIBCPP_NO_RTTI
+#endif // _LIBCPP_HAS_NO_RTTI
};
#endif // _LIBCPP_HAS_EXTENSION_BLOCKS
@@ -1056,12 +1056,12 @@ public:
// function invocation:
_Rp operator()(_ArgTypes...) const;
-#ifndef _LIBCPP_NO_RTTI
+#ifndef _LIBCPP_HAS_NO_RTTI
// function target access:
const std::type_info& target_type() const _NOEXCEPT;
template <typename _Tp> _Tp* target() _NOEXCEPT;
template <typename _Tp> const _Tp* target() const _NOEXCEPT;
-#endif // _LIBCPP_NO_RTTI
+#endif // _LIBCPP_HAS_NO_RTTI
};
#if _LIBCPP_STD_VER >= 17
@@ -1156,7 +1156,7 @@ function<_Rp(_ArgTypes...)>::operator()(_ArgTypes... __arg) const
return __f_(_VSTD::forward<_ArgTypes>(__arg)...);
}
-#ifndef _LIBCPP_NO_RTTI
+#ifndef _LIBCPP_HAS_NO_RTTI
template<class _Rp, class ..._ArgTypes>
const std::type_info&
@@ -1181,7 +1181,7 @@ function<_Rp(_ArgTypes...)>::target() const _NOEXCEPT
return __f_.template target<_Tp>();
}
-#endif // _LIBCPP_NO_RTTI
+#endif // _LIBCPP_HAS_NO_RTTI
template <class _Rp, class... _ArgTypes>
inline _LIBCPP_INLINE_VISIBILITY
diff --git a/contrib/llvm-project/libcxx/include/__functional/hash.h b/contrib/llvm-project/libcxx/include/__functional/hash.h
index dfd8ea2553dc..39382aa9bff4 100644
--- a/contrib/llvm-project/libcxx/include/__functional/hash.h
+++ b/contrib/llvm-project/libcxx/include/__functional/hash.h
@@ -140,7 +140,11 @@ struct __murmur2_or_cityhash<_Size, 64>
if (__len >= 4) {
const uint32_t __a = std::__loadword<uint32_t>(__s);
const uint32_t __b = std::__loadword<uint32_t>(__s + __len - 4);
+#ifdef _LIBCPP_ABI_FIX_CITYHASH_IMPLEMENTATION
return __hash_len_16(__len + (static_cast<_Size>(__a) << 3), __b);
+#else
+ return __hash_len_16(__len + (__a << 3), __b);
+#endif
}
if (__len > 0) {
const unsigned char __a = static_cast<unsigned char>(__s[0]);
diff --git a/contrib/llvm-project/libcxx/include/__memory/construct_at.h b/contrib/llvm-project/libcxx/include/__memory/construct_at.h
index ffee0022c243..14484dd6aa0d 100644
--- a/contrib/llvm-project/libcxx/include/__memory/construct_at.h
+++ b/contrib/llvm-project/libcxx/include/__memory/construct_at.h
@@ -83,6 +83,16 @@ _ForwardIterator __destroy(_ForwardIterator __first, _ForwardIterator __last) {
return __first;
}
+template <class _BidirectionalIterator>
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20
+_BidirectionalIterator __reverse_destroy(_BidirectionalIterator __first, _BidirectionalIterator __last) {
+ while (__last != __first) {
+ --__last;
+ std::__destroy_at(std::addressof(*__last));
+ }
+ return __last;
+}
+
#if _LIBCPP_STD_VER > 14
template <class _Tp, enable_if_t<!is_array_v<_Tp>, int> = 0>
diff --git a/contrib/llvm-project/libcxx/include/__memory/shared_ptr.h b/contrib/llvm-project/libcxx/include/__memory/shared_ptr.h
index b77ce9230bff..f0ffa26abfeb 100644
--- a/contrib/llvm-project/libcxx/include/__memory/shared_ptr.h
+++ b/contrib/llvm-project/libcxx/include/__memory/shared_ptr.h
@@ -219,7 +219,7 @@ public:
__shared_ptr_pointer(_Tp __p, _Dp __d, _Alloc __a)
: __data_(__compressed_pair<_Tp, _Dp>(__p, _VSTD::move(__d)), _VSTD::move(__a)) {}
-#ifndef _LIBCPP_NO_RTTI
+#ifndef _LIBCPP_HAS_NO_RTTI
const void* __get_deleter(const type_info&) const _NOEXCEPT override;
#endif
@@ -228,7 +228,7 @@ private:
void __on_zero_shared_weak() _NOEXCEPT override;
};
-#ifndef _LIBCPP_NO_RTTI
+#ifndef _LIBCPP_HAS_NO_RTTI
template <class _Tp, class _Dp, class _Alloc>
const void*
@@ -237,7 +237,7 @@ __shared_ptr_pointer<_Tp, _Dp, _Alloc>::__get_deleter(const type_info& __t) cons
return __t == typeid(_Dp) ? _VSTD::addressof(__data_.first().second()) : nullptr;
}
-#endif // _LIBCPP_NO_RTTI
+#endif // _LIBCPP_HAS_NO_RTTI
template <class _Tp, class _Dp, class _Alloc>
void
@@ -260,7 +260,10 @@ __shared_ptr_pointer<_Tp, _Dp, _Alloc>::__on_zero_shared_weak() _NOEXCEPT
__a.deallocate(_PTraits::pointer_to(*this), 1);
}
-struct __default_initialize_tag {};
+// This tag is used to instantiate an allocator type. The various shared_ptr control blocks
+// detect that the allocator has been instantiated for this type and perform alternative
+// initialization/destruction based on that.
+struct __for_overwrite_tag {};
template <class _Tp, class _Alloc>
struct __shared_ptr_emplace
@@ -271,25 +274,20 @@ struct __shared_ptr_emplace
explicit __shared_ptr_emplace(_Alloc __a, _Args&& ...__args)
: __storage_(_VSTD::move(__a))
{
-#if _LIBCPP_STD_VER > 17
- using _TpAlloc = typename __allocator_traits_rebind<_Alloc, _Tp>::type;
- _TpAlloc __tmp(*__get_alloc());
- allocator_traits<_TpAlloc>::construct(__tmp, __get_elem(), _VSTD::forward<_Args>(__args)...);
+#if _LIBCPP_STD_VER >= 20
+ if constexpr (is_same_v<typename _Alloc::value_type, __for_overwrite_tag>) {
+ static_assert(sizeof...(_Args) == 0, "No argument should be provided to the control block when using _for_overwrite");
+ ::new ((void*)__get_elem()) _Tp;
+ } else {
+ using _TpAlloc = typename __allocator_traits_rebind<_Alloc, _Tp>::type;
+ _TpAlloc __tmp(*__get_alloc());
+ allocator_traits<_TpAlloc>::construct(__tmp, __get_elem(), _VSTD::forward<_Args>(__args)...);
+ }
#else
::new ((void*)__get_elem()) _Tp(_VSTD::forward<_Args>(__args)...);
#endif
}
-
-#if _LIBCPP_STD_VER >= 20
- _LIBCPP_HIDE_FROM_ABI
- explicit __shared_ptr_emplace(__default_initialize_tag, _Alloc __a)
- : __storage_(std::move(__a))
- {
- ::new ((void*)__get_elem()) _Tp;
- }
-#endif
-
_LIBCPP_HIDE_FROM_ABI
_Alloc* __get_alloc() _NOEXCEPT { return __storage_.__get_alloc(); }
@@ -299,9 +297,13 @@ struct __shared_ptr_emplace
private:
void __on_zero_shared() _NOEXCEPT override {
#if _LIBCPP_STD_VER > 17
- using _TpAlloc = typename __allocator_traits_rebind<_Alloc, _Tp>::type;
- _TpAlloc __tmp(*__get_alloc());
- allocator_traits<_TpAlloc>::destroy(__tmp, __get_elem());
+ if constexpr (is_same_v<typename _Alloc::value_type, __for_overwrite_tag>) {
+ __get_elem()->~_Tp();
+ } else {
+ using _TpAlloc = typename __allocator_traits_rebind<_Alloc, _Tp>::type;
+ _TpAlloc __tmp(*__get_alloc());
+ allocator_traits<_TpAlloc>::destroy(__tmp, __get_elem());
+ }
#else
__get_elem()->~_Tp();
#endif
@@ -367,13 +369,57 @@ public:
template<class _Tp> class _LIBCPP_TEMPLATE_VIS enable_shared_from_this;
-template<class _Tp, class _Up>
+// http://eel.is/c++draft/util.sharedptr#util.smartptr.shared.general-6
+// A pointer type Y* is said to be compatible with a pointer type T*
+// when either Y* is convertible to T* or Y is U[N] and T is cv U[].
+#if _LIBCPP_STD_VER >= 17
+template <class _Yp, class _Tp>
+struct __bounded_convertible_to_unbounded : false_type {};
+
+template <class _Up, std::size_t _Np, class _Tp>
+struct __bounded_convertible_to_unbounded<_Up[_Np], _Tp>
+ : is_same<__remove_cv_t<_Tp>, _Up[]> {};
+
+template <class _Yp, class _Tp>
struct __compatible_with
-#if _LIBCPP_STD_VER > 14
- : is_convertible<remove_extent_t<_Tp>*, remove_extent_t<_Up>*> {};
+ : _Or<
+ is_convertible<_Yp*, _Tp*>,
+ __bounded_convertible_to_unbounded<_Yp, _Tp>
+ > {};
#else
- : is_convertible<_Tp*, _Up*> {};
-#endif // _LIBCPP_STD_VER > 14
+template <class _Yp, class _Tp>
+struct __compatible_with
+ : is_convertible<_Yp*, _Tp*> {};
+#endif // _LIBCPP_STD_VER >= 17
+
+// Constructors that take raw pointers have a different set of "compatible" constraints
+// http://eel.is/c++draft/util.sharedptr#util.smartptr.shared.const-9.1
+// - If T is an array type, then either T is U[N] and Y(*)[N] is convertible to T*,
+// or T is U[] and Y(*)[] is convertible to T*.
+// - If T is not an array type, then Y* is convertible to T*.
+#if _LIBCPP_STD_VER >= 17
+template <class _Yp, class _Tp, class = void>
+struct __raw_pointer_compatible_with : _And<
+ _Not<is_array<_Tp>>,
+ is_convertible<_Yp*, _Tp*>
+ > {};
+
+template <class _Yp, class _Up, std::size_t _Np>
+struct __raw_pointer_compatible_with<_Yp, _Up[_Np], __enable_if_t<
+ is_convertible<_Yp(*)[_Np], _Up(*)[_Np]>::value> >
+ : true_type {};
+
+template <class _Yp, class _Up>
+struct __raw_pointer_compatible_with<_Yp, _Up[], __enable_if_t<
+ is_convertible<_Yp(*)[], _Up(*)[]>::value> >
+ : true_type {};
+
+#else
+template <class _Yp, class _Tp>
+struct __raw_pointer_compatible_with
+ : is_convertible<_Yp*, _Tp*> {};
+#endif // _LIBCPP_STD_VER >= 17
+
template <class _Ptr, class = void>
struct __is_deletable : false_type { };
@@ -395,12 +441,12 @@ static false_type __well_formed_deleter_test(...);
template <class _Dp, class _Pt>
struct __well_formed_deleter : decltype(std::__well_formed_deleter_test<_Dp, _Pt>(0)) {};
-template<class _Dp, class _Tp, class _Yp>
+template<class _Dp, class _Yp, class _Tp>
struct __shared_ptr_deleter_ctor_reqs
{
- static const bool value = __compatible_with<_Tp, _Yp>::value &&
+ static const bool value = __raw_pointer_compatible_with<_Yp, _Tp>::value &&
is_move_constructible<_Dp>::value &&
- __well_formed_deleter<_Dp, _Tp*>::value;
+ __well_formed_deleter<_Dp, _Yp*>::value;
};
#if defined(_LIBCPP_ABI_ENABLE_SHARED_PTR_TRIVIAL_ABI)
@@ -439,7 +485,7 @@ public:
template<class _Yp, class = __enable_if_t<
_And<
- __compatible_with<_Yp, _Tp>
+ __raw_pointer_compatible_with<_Yp, _Tp>
// In C++03 we get errors when trying to do SFINAE with the
// delete operator, so we always pretend that it's deletable.
// The same happens on GCC.
@@ -457,7 +503,7 @@ public:
__enable_weak_this(__p, __p);
}
- template<class _Yp, class _Dp, class = __enable_if_t<__shared_ptr_deleter_ctor_reqs<_Dp, _Yp, element_type>::value> >
+ template<class _Yp, class _Dp, class = __enable_if_t<__shared_ptr_deleter_ctor_reqs<_Dp, _Yp, _Tp>::value> >
_LIBCPP_HIDE_FROM_ABI
shared_ptr(_Yp* __p, _Dp __d)
: __ptr_(__p)
@@ -484,7 +530,7 @@ public:
#endif // _LIBCPP_NO_EXCEPTIONS
}
- template<class _Yp, class _Dp, class _Alloc, class = __enable_if_t<__shared_ptr_deleter_ctor_reqs<_Dp, _Yp, element_type>::value> >
+ template<class _Yp, class _Dp, class _Alloc, class = __enable_if_t<__shared_ptr_deleter_ctor_reqs<_Dp, _Yp, _Tp>::value> >
_LIBCPP_HIDE_FROM_ABI
shared_ptr(_Yp* __p, _Dp __d, _Alloc __a)
: __ptr_(__p)
@@ -646,6 +692,7 @@ public:
template <class _Yp, class _Dp, class = __enable_if_t<
!is_lvalue_reference<_Dp>::value &&
+ __compatible_with<_Yp, _Tp>::value &&
is_convertible<typename unique_ptr<_Yp, _Dp>::pointer, element_type*>::value
> >
_LIBCPP_HIDE_FROM_ABI
@@ -668,6 +715,7 @@ public:
template <class _Yp, class _Dp, class = void, class = __enable_if_t<
is_lvalue_reference<_Dp>::value &&
+ __compatible_with<_Yp, _Tp>::value &&
is_convertible<typename unique_ptr<_Yp, _Dp>::pointer, element_type*>::value
> >
_LIBCPP_HIDE_FROM_ABI
@@ -740,9 +788,10 @@ public:
}
#endif
- template <class _Yp, class _Dp, class = __enable_if_t<
- is_convertible<typename unique_ptr<_Yp, _Dp>::pointer, element_type*>::value
- > >
+ template <class _Yp, class _Dp, class = __enable_if_t<_And<
+ __compatible_with<_Yp, _Tp>,
+ is_convertible<typename unique_ptr<_Yp, _Dp>::pointer, element_type*>
+ >::value> >
_LIBCPP_HIDE_FROM_ABI
shared_ptr<_Tp>& operator=(unique_ptr<_Yp, _Dp>&& __r)
{
@@ -764,7 +813,7 @@ public:
}
template<class _Yp, class = __enable_if_t<
- __compatible_with<_Yp, _Tp>::value
+ __raw_pointer_compatible_with<_Yp, _Tp>::value
> >
_LIBCPP_HIDE_FROM_ABI
void reset(_Yp* __p)
@@ -773,8 +822,7 @@ public:
}
template<class _Yp, class _Dp, class = __enable_if_t<
- __compatible_with<_Yp, _Tp>::value
- > >
+ __shared_ptr_deleter_ctor_reqs<_Dp, _Yp, _Tp>::value> >
_LIBCPP_HIDE_FROM_ABI
void reset(_Yp* __p, _Dp __d)
{
@@ -782,8 +830,7 @@ public:
}
template<class _Yp, class _Dp, class _Alloc, class = __enable_if_t<
- __compatible_with<_Yp, _Tp>::value
- > >
+ __shared_ptr_deleter_ctor_reqs<_Dp, _Yp, _Tp>::value> >
_LIBCPP_HIDE_FROM_ABI
void reset(_Yp* __p, _Dp __d, _Alloc __a)
{
@@ -858,7 +905,7 @@ public:
}
#endif
-#ifndef _LIBCPP_NO_RTTI
+#ifndef _LIBCPP_HAS_NO_RTTI
template <class _Dp>
_LIBCPP_HIDE_FROM_ABI
_Dp* __get_deleter() const _NOEXCEPT
@@ -867,7 +914,7 @@ public:
? const_cast<void *>(__cntrl_->__get_deleter(typeid(_Dp)))
: nullptr);
}
-#endif // _LIBCPP_NO_RTTI
+#endif // _LIBCPP_HAS_NO_RTTI
template<class _Yp, class _CntrlBlk>
_LIBCPP_HIDE_FROM_ABI
@@ -963,12 +1010,9 @@ template<class _Tp, class _Alloc, __enable_if_t<!is_array<_Tp>::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI
shared_ptr<_Tp> allocate_shared_for_overwrite(const _Alloc& __a)
{
- using _ControlBlock = __shared_ptr_emplace<_Tp, _Alloc>;
- using _ControlBlockAllocator = typename __allocator_traits_rebind<_Alloc, _ControlBlock>::type;
- __allocation_guard<_ControlBlockAllocator> __guard(__a, 1);
- ::new ((void*)_VSTD::addressof(*__guard.__get())) _ControlBlock(__default_initialize_tag{}, __a);
- auto __control_block = __guard.__release_ptr();
- return shared_ptr<_Tp>::__create_with_control_block((*__control_block).__get_elem(), _VSTD::addressof(*__control_block));
+ using _ForOverwriteAllocator = __allocator_traits_rebind_t<_Alloc, __for_overwrite_tag>;
+ _ForOverwriteAllocator __alloc(__a);
+ return std::allocate_shared<_Tp>(__alloc);
}
template<class _Tp, __enable_if_t<!is_array<_Tp>::value, int> = 0>
@@ -1000,26 +1044,25 @@ struct __unbounded_array_control_block<_Tp[], _Alloc> : __shared_weak_count
explicit __unbounded_array_control_block(_Alloc const& __alloc, size_t __count, _Tp const& __arg)
: __alloc_(__alloc), __count_(__count)
{
- std::__uninitialized_allocator_fill_n(__alloc_, std::begin(__data_), __count_, __arg);
+ std::__uninitialized_allocator_fill_n_multidimensional(__alloc_, std::begin(__data_), __count_, __arg);
}
_LIBCPP_HIDE_FROM_ABI
explicit __unbounded_array_control_block(_Alloc const& __alloc, size_t __count)
: __alloc_(__alloc), __count_(__count)
{
- std::__uninitialized_allocator_value_construct_n(__alloc_, std::begin(__data_), __count_);
- }
-
#if _LIBCPP_STD_VER >= 20
- _LIBCPP_HIDE_FROM_ABI
- explicit __unbounded_array_control_block(_Alloc const& __alloc, size_t __count, __default_initialize_tag)
- : __alloc_(__alloc), __count_(__count)
- {
- // We are purposefully not using an allocator-aware default construction because the spec says so.
- // There's currently no way of expressing default initialization in an allocator-aware manner anyway.
- std::uninitialized_default_construct_n(std::begin(__data_), __count_);
- }
+ if constexpr (is_same_v<typename _Alloc::value_type, __for_overwrite_tag>) {
+ // We are purposefully not using an allocator-aware default construction because the spec says so.
+ // There's currently no way of expressing default initialization in an allocator-aware manner anyway.
+ std::uninitialized_default_construct_n(std::begin(__data_), __count_);
+ } else {
+ std::__uninitialized_allocator_value_construct_n_multidimensional(__alloc_, std::begin(__data_), __count_);
+ }
+#else
+ std::__uninitialized_allocator_value_construct_n_multidimensional(__alloc_, std::begin(__data_), __count_);
#endif
+ }
// Returns the number of bytes required to store a control block followed by the given number
// of elements of _Tp, with the whole storage being aligned to a multiple of _Tp's alignment.
@@ -1042,8 +1085,17 @@ struct __unbounded_array_control_block<_Tp[], _Alloc> : __shared_weak_count
private:
void __on_zero_shared() _NOEXCEPT override {
+#if _LIBCPP_STD_VER >= 20
+ if constexpr (is_same_v<typename _Alloc::value_type, __for_overwrite_tag>) {
+ std::__reverse_destroy(__data_, __data_ + __count_);
+ } else {
+ __allocator_traits_rebind_t<_Alloc, _Tp> __value_alloc(__alloc_);
+ std::__allocator_destroy_multidimensional(__value_alloc, __data_, __data_ + __count_);
+ }
+#else
__allocator_traits_rebind_t<_Alloc, _Tp> __value_alloc(__alloc_);
std::__allocator_destroy_multidimensional(__value_alloc, __data_, __data_ + __count_);
+#endif
}
void __on_zero_shared_weak() _NOEXCEPT override {
@@ -1096,30 +1148,40 @@ struct __bounded_array_control_block<_Tp[_Count], _Alloc>
_LIBCPP_HIDE_FROM_ABI
explicit __bounded_array_control_block(_Alloc const& __alloc, _Tp const& __arg) : __alloc_(__alloc) {
- std::__uninitialized_allocator_fill_n(__alloc_, std::addressof(__data_[0]), _Count, __arg);
+ std::__uninitialized_allocator_fill_n_multidimensional(__alloc_, std::addressof(__data_[0]), _Count, __arg);
}
_LIBCPP_HIDE_FROM_ABI
explicit __bounded_array_control_block(_Alloc const& __alloc) : __alloc_(__alloc) {
- std::__uninitialized_allocator_value_construct_n(__alloc_, std::addressof(__data_[0]), _Count);
- }
-
#if _LIBCPP_STD_VER >= 20
- _LIBCPP_HIDE_FROM_ABI
- explicit __bounded_array_control_block(_Alloc const& __alloc, __default_initialize_tag) : __alloc_(__alloc) {
- // We are purposefully not using an allocator-aware default construction because the spec says so.
- // There's currently no way of expressing default initialization in an allocator-aware manner anyway.
- std::uninitialized_default_construct_n(std::addressof(__data_[0]), _Count);
- }
+ if constexpr (is_same_v<typename _Alloc::value_type, __for_overwrite_tag>) {
+ // We are purposefully not using an allocator-aware default construction because the spec says so.
+ // There's currently no way of expressing default initialization in an allocator-aware manner anyway.
+ std::uninitialized_default_construct_n(std::addressof(__data_[0]), _Count);
+ } else {
+ std::__uninitialized_allocator_value_construct_n_multidimensional(__alloc_, std::addressof(__data_[0]), _Count);
+ }
+#else
+ std::__uninitialized_allocator_value_construct_n_multidimensional(__alloc_, std::addressof(__data_[0]), _Count);
#endif
+ }
_LIBCPP_HIDE_FROM_ABI_VIRTUAL
~__bounded_array_control_block() override { } // can't be `= default` because of the sometimes-non-trivial union member __data_
private:
void __on_zero_shared() _NOEXCEPT override {
+#if _LIBCPP_STD_VER >= 20
+ if constexpr (is_same_v<typename _Alloc::value_type, __for_overwrite_tag>) {
+ std::__reverse_destroy(__data_, __data_ + _Count);
+ } else {
+ __allocator_traits_rebind_t<_Alloc, _Tp> __value_alloc(__alloc_);
+ std::__allocator_destroy_multidimensional(__value_alloc, __data_, __data_ + _Count);
+ }
+#else
__allocator_traits_rebind_t<_Alloc, _Tp> __value_alloc(__alloc_);
std::__allocator_destroy_multidimensional(__value_alloc, __data_, __data_ + _Count);
+#endif
}
void __on_zero_shared_weak() _NOEXCEPT override {
@@ -1175,7 +1237,9 @@ template<class _Tp, class _Alloc, __enable_if_t<is_bounded_array<_Tp>::value, in
_LIBCPP_HIDE_FROM_ABI
shared_ptr<_Tp> allocate_shared_for_overwrite(const _Alloc& __a)
{
- return std::__allocate_shared_bounded_array<_Tp>(__a, __default_initialize_tag{});
+ using _ForOverwriteAllocator = __allocator_traits_rebind_t<_Alloc, __for_overwrite_tag>;
+ _ForOverwriteAllocator __alloc(__a);
+ return std::__allocate_shared_bounded_array<_Tp>(__alloc);
}
template<class _Tp, class = __enable_if_t<is_bounded_array<_Tp>::value>>
@@ -1196,7 +1260,7 @@ template<class _Tp, __enable_if_t<is_bounded_array<_Tp>::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI
shared_ptr<_Tp> make_shared_for_overwrite()
{
- return std::__allocate_shared_bounded_array<_Tp>(allocator<_Tp>(), __default_initialize_tag{});
+ return std::__allocate_shared_bounded_array<_Tp>(allocator<__for_overwrite_tag>());
}
// unbounded array variants
@@ -1218,7 +1282,9 @@ template<class _Tp, class _Alloc, __enable_if_t<is_unbounded_array<_Tp>::value,
_LIBCPP_HIDE_FROM_ABI
shared_ptr<_Tp> allocate_shared_for_overwrite(const _Alloc& __a, size_t __n)
{
- return std::__allocate_shared_unbounded_array<_Tp>(__a, __n, __default_initialize_tag{});
+ using _ForOverwriteAllocator = __allocator_traits_rebind_t<_Alloc, __for_overwrite_tag>;
+ _ForOverwriteAllocator __alloc(__a);
+ return std::__allocate_shared_unbounded_array<_Tp>(__alloc, __n);
}
template<class _Tp, class = __enable_if_t<is_unbounded_array<_Tp>::value>>
@@ -1239,7 +1305,7 @@ template<class _Tp, __enable_if_t<is_unbounded_array<_Tp>::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI
shared_ptr<_Tp> make_shared_for_overwrite(size_t __n)
{
- return std::__allocate_shared_unbounded_array<_Tp>(allocator<_Tp>(), __n, __default_initialize_tag{});
+ return std::__allocate_shared_unbounded_array<_Tp>(allocator<__for_overwrite_tag>(), __n);
}
#endif // _LIBCPP_STD_VER > 17
@@ -1465,7 +1531,7 @@ reinterpret_pointer_cast(const shared_ptr<_Up>& __r) _NOEXCEPT
typename shared_ptr<_Tp>::element_type*>(__r.get()));
}
-#ifndef _LIBCPP_NO_RTTI
+#ifndef _LIBCPP_HAS_NO_RTTI
template<class _Dp, class _Tp>
inline _LIBCPP_INLINE_VISIBILITY
@@ -1475,7 +1541,7 @@ get_deleter(const shared_ptr<_Tp>& __p) _NOEXCEPT
return __p.template __get_deleter<_Dp>();
}
-#endif // _LIBCPP_NO_RTTI
+#endif // _LIBCPP_HAS_NO_RTTI
template<class _Tp>
class _LIBCPP_SHARED_PTR_TRIVIAL_ABI _LIBCPP_TEMPLATE_VIS weak_ptr
diff --git a/contrib/llvm-project/libcxx/include/__memory/uninitialized_algorithms.h b/contrib/llvm-project/libcxx/include/__memory/uninitialized_algorithms.h
index 63a45b2ac87b..90aecb7d6ad2 100644
--- a/contrib/llvm-project/libcxx/include/__memory/uninitialized_algorithms.h
+++ b/contrib/llvm-project/libcxx/include/__memory/uninitialized_algorithms.h
@@ -410,7 +410,7 @@ constexpr void __allocator_destroy_multidimensional(_Alloc& __alloc, _BidirIter
// This function assumes that the allocator is bound to the correct type.
template<class _Alloc, class _Tp>
_LIBCPP_HIDE_FROM_ABI
-constexpr void __allocator_construct_at(_Alloc& __alloc, _Tp* __loc) {
+constexpr void __allocator_construct_at_multidimensional(_Alloc& __alloc, _Tp* __loc) {
static_assert(is_same_v<typename allocator_traits<_Alloc>::value_type, _Tp>,
"The allocator should already be rebound to the correct type");
@@ -421,12 +421,12 @@ constexpr void __allocator_construct_at(_Alloc& __alloc, _Tp* __loc) {
_Tp& __array = *__loc;
// If an exception is thrown, destroy what we have constructed so far in reverse order.
- __exception_guard __guard([&]() {
+ auto __guard = std::__make_exception_guard([&]() {
std::__allocator_destroy_multidimensional(__elem_alloc, __array, __array + __i);
});
for (; __i != extent_v<_Tp>; ++__i) {
- std::__allocator_construct_at(__elem_alloc, std::addressof(__array[__i]));
+ std::__allocator_construct_at_multidimensional(__elem_alloc, std::addressof(__array[__i]));
}
__guard.__complete();
} else {
@@ -446,13 +446,13 @@ constexpr void __allocator_construct_at(_Alloc& __alloc, _Tp* __loc) {
// This function assumes that the allocator is bound to the correct type.
template<class _Alloc, class _Tp, class _Arg>
_LIBCPP_HIDE_FROM_ABI
-constexpr void __allocator_construct_at(_Alloc& __alloc, _Tp* __loc, _Arg const& __arg) {
+constexpr void __allocator_construct_at_multidimensional(_Alloc& __alloc, _Tp* __loc, _Arg const& __arg) {
static_assert(is_same_v<typename allocator_traits<_Alloc>::value_type, _Tp>,
"The allocator should already be rebound to the correct type");
if constexpr (is_array_v<_Tp>) {
static_assert(is_array_v<_Arg>,
- "Provided non-array initialization argument to __allocator_construct_at when "
+ "Provided non-array initialization argument to __allocator_construct_at_multidimensional when "
"trying to construct an array.");
using _Element = remove_extent_t<_Tp>;
@@ -461,11 +461,11 @@ constexpr void __allocator_construct_at(_Alloc& __alloc, _Tp* __loc, _Arg const&
_Tp& __array = *__loc;
// If an exception is thrown, destroy what we have constructed so far in reverse order.
- __exception_guard __guard([&]() {
+ auto __guard = std::__make_exception_guard([&]() {
std::__allocator_destroy_multidimensional(__elem_alloc, __array, __array + __i);
});
for (; __i != extent_v<_Tp>; ++__i) {
- std::__allocator_construct_at(__elem_alloc, std::addressof(__array[__i]), __arg[__i]);
+ std::__allocator_construct_at_multidimensional(__elem_alloc, std::addressof(__array[__i]), __arg[__i]);
}
__guard.__complete();
} else {
@@ -481,33 +481,33 @@ constexpr void __allocator_construct_at(_Alloc& __alloc, _Tp* __loc, _Arg const&
// initialization using allocator_traits destruction. If the elements in the range are C-style
// arrays, they are initialized element-wise using allocator construction, and recursively so.
template<class _Alloc, class _BidirIter, class _Tp, class _Size = typename iterator_traits<_BidirIter>::difference_type>
-_LIBCPP_HIDE_FROM_ABI
-constexpr void __uninitialized_allocator_fill_n(_Alloc& __alloc, _BidirIter __it, _Size __n, _Tp const& __value) {
+_LIBCPP_HIDE_FROM_ABI constexpr void
+__uninitialized_allocator_fill_n_multidimensional(_Alloc& __alloc, _BidirIter __it, _Size __n, _Tp const& __value) {
using _ValueType = typename iterator_traits<_BidirIter>::value_type;
__allocator_traits_rebind_t<_Alloc, _ValueType> __value_alloc(__alloc);
_BidirIter __begin = __it;
// If an exception is thrown, destroy what we have constructed so far in reverse order.
- __exception_guard __guard([&]() { std::__allocator_destroy_multidimensional(__value_alloc, __begin, __it); });
+ auto __guard = std::__make_exception_guard([&]() { std::__allocator_destroy_multidimensional(__value_alloc, __begin, __it); });
for (; __n != 0; --__n, ++__it) {
- std::__allocator_construct_at(__value_alloc, std::addressof(*__it), __value);
+ std::__allocator_construct_at_multidimensional(__value_alloc, std::addressof(*__it), __value);
}
__guard.__complete();
}
-// Same as __uninitialized_allocator_fill_n, but doesn't pass any initialization argument
+// Same as __uninitialized_allocator_fill_n_multidimensional, but doesn't pass any initialization argument
// to the allocator's construct method, which results in value initialization.
-template<class _Alloc, class _BidirIter, class _Size = typename iterator_traits<_BidirIter>::difference_type>
-_LIBCPP_HIDE_FROM_ABI
-constexpr void __uninitialized_allocator_value_construct_n(_Alloc& __alloc, _BidirIter __it, _Size __n) {
+template <class _Alloc, class _BidirIter, class _Size = typename iterator_traits<_BidirIter>::difference_type>
+_LIBCPP_HIDE_FROM_ABI constexpr void
+__uninitialized_allocator_value_construct_n_multidimensional(_Alloc& __alloc, _BidirIter __it, _Size __n) {
using _ValueType = typename iterator_traits<_BidirIter>::value_type;
__allocator_traits_rebind_t<_Alloc, _ValueType> __value_alloc(__alloc);
_BidirIter __begin = __it;
// If an exception is thrown, destroy what we have constructed so far in reverse order.
- __exception_guard __guard([&]() { std::__allocator_destroy_multidimensional(__value_alloc, __begin, __it); });
+ auto __guard = std::__make_exception_guard([&]() { std::__allocator_destroy_multidimensional(__value_alloc, __begin, __it); });
for (; __n != 0; --__n, ++__it) {
- std::__allocator_construct_at(__value_alloc, std::addressof(*__it));
+ std::__allocator_construct_at_multidimensional(__value_alloc, std::addressof(*__it));
}
__guard.__complete();
}
diff --git a/contrib/llvm-project/libcxx/include/__memory_resource/polymorphic_allocator.h b/contrib/llvm-project/libcxx/include/__memory_resource/polymorphic_allocator.h
index 2489502bcdaf..f7b9a0b408c1 100644
--- a/contrib/llvm-project/libcxx/include/__memory_resource/polymorphic_allocator.h
+++ b/contrib/llvm-project/libcxx/include/__memory_resource/polymorphic_allocator.h
@@ -98,7 +98,7 @@ public:
template <class _Type, class... _CtorArgs>
[[nodiscard]] _Type* new_object(_CtorArgs&&... __ctor_args) {
_Type* __ptr = allocate_object<_Type>();
- __exception_guard __guard([&] { deallocate_object(__ptr); });
+ auto __guard = std::__make_exception_guard([&] { deallocate_object(__ptr); });
construct(__ptr, std::forward<_CtorArgs>(__ctor_args)...);
__guard.__complete();
return __ptr;
diff --git a/contrib/llvm-project/libcxx/include/__ranges/elements_view.h b/contrib/llvm-project/libcxx/include/__ranges/elements_view.h
index 3afd6ddbe8f2..997380ee9c65 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/elements_view.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/elements_view.h
@@ -49,12 +49,6 @@ _LIBCPP_BEGIN_NAMESPACE_STD
namespace ranges {
-template <class _View, size_t _Np, bool _Const>
-class __elements_view_iterator;
-
-template <class _View, size_t _Np, bool _Const>
-class __elements_view_sentinel;
-
template <class _Tp, size_t _Np>
concept __has_tuple_element = __tuple_like<_Tp> && _Np < tuple_size<_Tp>::value;
@@ -66,6 +60,13 @@ template <input_range _View, size_t _Np>
__has_tuple_element<remove_reference_t<range_reference_t<_View>>, _Np> &&
__returnable_element<range_reference_t<_View>, _Np>
class elements_view : public view_interface<elements_view<_View, _Np>> {
+private:
+ template <bool>
+ class __iterator;
+
+ template <bool>
+ class __sentinel;
+
public:
_LIBCPP_HIDE_FROM_ABI elements_view()
requires default_initializable<_View>
@@ -130,12 +131,6 @@ public:
}
private:
- template <bool _Const>
- using __iterator = __elements_view_iterator<_View, _Np, _Const>;
-
- template <bool _Const>
- using __sentinel = __elements_view_sentinel<_View, _Np, _Const>;
-
_LIBCPP_NO_UNIQUE_ADDRESS _View __base_ = _View();
};
@@ -160,13 +155,18 @@ struct __elements_view_iterator_category_base<_Base, _Np> {
using iterator_category = decltype(__get_iterator_category());
};
-template <class _View, size_t _Np, bool _Const>
-class __elements_view_iterator : public __elements_view_iterator_category_base<__maybe_const<_Const, _View>, _Np> {
- template <class, size_t, bool >
- friend class __elements_view_iterator;
+template <input_range _View, size_t _Np>
+ requires view<_View> && __has_tuple_element<range_value_t<_View>, _Np> &&
+ __has_tuple_element<remove_reference_t<range_reference_t<_View>>, _Np> &&
+ __returnable_element<range_reference_t<_View>, _Np>
+template <bool _Const>
+class elements_view<_View, _Np>::__iterator
+ : public __elements_view_iterator_category_base<__maybe_const<_Const, _View>, _Np> {
+ template <bool>
+ friend class __iterator;
- template <class, size_t, bool >
- friend class __elements_view_sentinel;
+ template <bool>
+ friend class __sentinel;
using _Base = __maybe_const<_Const, _View>;
@@ -198,14 +198,13 @@ public:
using value_type = remove_cvref_t<tuple_element_t<_Np, range_value_t<_Base>>>;
using difference_type = range_difference_t<_Base>;
- _LIBCPP_HIDE_FROM_ABI __elements_view_iterator()
+ _LIBCPP_HIDE_FROM_ABI __iterator()
requires default_initializable<iterator_t<_Base>>
= default;
- _LIBCPP_HIDE_FROM_ABI constexpr explicit __elements_view_iterator(iterator_t<_Base> __current)
- : __current_(std::move(__current)) {}
+ _LIBCPP_HIDE_FROM_ABI constexpr explicit __iterator(iterator_t<_Base> __current) : __current_(std::move(__current)) {}
- _LIBCPP_HIDE_FROM_ABI constexpr __elements_view_iterator(__elements_view_iterator<_View, _Np, !_Const> __i)
+ _LIBCPP_HIDE_FROM_ABI constexpr __iterator(__iterator<!_Const> __i)
requires _Const && convertible_to<iterator_t<_View>, iterator_t<_Base>>
: __current_(std::move(__i.__current_)) {}
@@ -215,14 +214,14 @@ public:
_LIBCPP_HIDE_FROM_ABI constexpr decltype(auto) operator*() const { return __get_element(__current_); }
- _LIBCPP_HIDE_FROM_ABI constexpr __elements_view_iterator& operator++() {
+ _LIBCPP_HIDE_FROM_ABI constexpr __iterator& operator++() {
++__current_;
return *this;
}
_LIBCPP_HIDE_FROM_ABI constexpr void operator++(int) { ++__current_; }
- _LIBCPP_HIDE_FROM_ABI constexpr __elements_view_iterator operator++(int)
+ _LIBCPP_HIDE_FROM_ABI constexpr __iterator operator++(int)
requires forward_range<_Base>
{
auto temp = *this;
@@ -230,14 +229,14 @@ public:
return temp;
}
- _LIBCPP_HIDE_FROM_ABI constexpr __elements_view_iterator& operator--()
+ _LIBCPP_HIDE_FROM_ABI constexpr __iterator& operator--()
requires bidirectional_range<_Base>
{
--__current_;
return *this;
}
- _LIBCPP_HIDE_FROM_ABI constexpr __elements_view_iterator operator--(int)
+ _LIBCPP_HIDE_FROM_ABI constexpr __iterator operator--(int)
requires bidirectional_range<_Base>
{
auto temp = *this;
@@ -245,14 +244,14 @@ public:
return temp;
}
- _LIBCPP_HIDE_FROM_ABI constexpr __elements_view_iterator& operator+=(difference_type __n)
+ _LIBCPP_HIDE_FROM_ABI constexpr __iterator& operator+=(difference_type __n)
requires random_access_range<_Base>
{
__current_ += __n;
return *this;
}
- _LIBCPP_HIDE_FROM_ABI constexpr __elements_view_iterator& operator-=(difference_type __n)
+ _LIBCPP_HIDE_FROM_ABI constexpr __iterator& operator-=(difference_type __n)
requires random_access_range<_Base>
{
__current_ -= __n;
@@ -265,99 +264,91 @@ public:
return __get_element(__current_ + __n);
}
- _LIBCPP_HIDE_FROM_ABI friend constexpr bool
- operator==(const __elements_view_iterator& __x, const __elements_view_iterator& __y)
+ _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator==(const __iterator& __x, const __iterator& __y)
requires equality_comparable<iterator_t<_Base>>
{
return __x.__current_ == __y.__current_;
}
- _LIBCPP_HIDE_FROM_ABI friend constexpr bool
- operator<(const __elements_view_iterator& __x, const __elements_view_iterator& __y)
+ _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator<(const __iterator& __x, const __iterator& __y)
requires random_access_range<_Base>
{
return __x.__current_ < __y.__current_;
}
- _LIBCPP_HIDE_FROM_ABI friend constexpr bool
- operator>(const __elements_view_iterator& __x, const __elements_view_iterator& __y)
+ _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator>(const __iterator& __x, const __iterator& __y)
requires random_access_range<_Base>
{
return __y < __x;
}
- _LIBCPP_HIDE_FROM_ABI friend constexpr bool
- operator<=(const __elements_view_iterator& __x, const __elements_view_iterator& __y)
+ _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator<=(const __iterator& __x, const __iterator& __y)
requires random_access_range<_Base>
{
return !(__y < __x);
}
- _LIBCPP_HIDE_FROM_ABI friend constexpr bool
- operator>=(const __elements_view_iterator& __x, const __elements_view_iterator& __y)
+ _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator>=(const __iterator& __x, const __iterator& __y)
requires random_access_range<_Base>
{
return !(__x < __y);
}
- _LIBCPP_HIDE_FROM_ABI friend constexpr auto
- operator<=>(const __elements_view_iterator& __x, const __elements_view_iterator& __y)
+ _LIBCPP_HIDE_FROM_ABI friend constexpr auto operator<=>(const __iterator& __x, const __iterator& __y)
requires random_access_range<_Base> && three_way_comparable<iterator_t<_Base>>
{
return __x.__current_ <=> __y.__current_;
}
- _LIBCPP_HIDE_FROM_ABI friend constexpr __elements_view_iterator
- operator+(const __elements_view_iterator& __x, difference_type __y)
+ _LIBCPP_HIDE_FROM_ABI friend constexpr __iterator operator+(const __iterator& __x, difference_type __y)
requires random_access_range<_Base>
{
- return __elements_view_iterator{__x} += __y;
+ return __iterator{__x} += __y;
}
- _LIBCPP_HIDE_FROM_ABI friend constexpr __elements_view_iterator
- operator+(difference_type __x, const __elements_view_iterator& __y)
+ _LIBCPP_HIDE_FROM_ABI friend constexpr __iterator operator+(difference_type __x, const __iterator& __y)
requires random_access_range<_Base>
{
return __y + __x;
}
- _LIBCPP_HIDE_FROM_ABI friend constexpr __elements_view_iterator
- operator-(const __elements_view_iterator& __x, difference_type __y)
+ _LIBCPP_HIDE_FROM_ABI friend constexpr __iterator operator-(const __iterator& __x, difference_type __y)
requires random_access_range<_Base>
{
- return __elements_view_iterator{__x} -= __y;
+ return __iterator{__x} -= __y;
}
- _LIBCPP_HIDE_FROM_ABI friend constexpr difference_type
- operator-(const __elements_view_iterator& __x, const __elements_view_iterator& __y)
+ _LIBCPP_HIDE_FROM_ABI friend constexpr difference_type operator-(const __iterator& __x, const __iterator& __y)
requires sized_sentinel_for<iterator_t<_Base>, iterator_t<_Base>>
{
return __x.__current_ - __y.__current_;
}
};
-template <class _View, size_t _Np, bool _Const>
-class __elements_view_sentinel {
+template <input_range _View, size_t _Np>
+ requires view<_View> && __has_tuple_element<range_value_t<_View>, _Np> &&
+ __has_tuple_element<remove_reference_t<range_reference_t<_View>>, _Np> &&
+ __returnable_element<range_reference_t<_View>, _Np>
+template <bool _Const>
+class elements_view<_View, _Np>::__sentinel {
private:
using _Base = __maybe_const<_Const, _View>;
_LIBCPP_NO_UNIQUE_ADDRESS sentinel_t<_Base> __end_ = sentinel_t<_Base>();
- template <class, size_t, bool >
- friend class __elements_view_sentinel;
+ template <bool>
+ friend class __sentinel;
template <bool _AnyConst>
- _LIBCPP_HIDE_FROM_ABI static constexpr decltype(auto)
- __get_current(const __elements_view_iterator<_View, _Np, _AnyConst>& __iter) {
+ _LIBCPP_HIDE_FROM_ABI static constexpr decltype(auto) __get_current(const __iterator<_AnyConst>& __iter) {
return (__iter.__current_);
}
public:
- _LIBCPP_HIDE_FROM_ABI __elements_view_sentinel() = default;
+ _LIBCPP_HIDE_FROM_ABI __sentinel() = default;
- _LIBCPP_HIDE_FROM_ABI constexpr explicit __elements_view_sentinel(sentinel_t<_Base> __end)
- : __end_(std::move(__end)) {}
+ _LIBCPP_HIDE_FROM_ABI constexpr explicit __sentinel(sentinel_t<_Base> __end) : __end_(std::move(__end)) {}
- _LIBCPP_HIDE_FROM_ABI constexpr __elements_view_sentinel(__elements_view_sentinel<_View, _Np, !_Const> __other)
+ _LIBCPP_HIDE_FROM_ABI constexpr __sentinel(__sentinel<!_Const> __other)
requires _Const && convertible_to<sentinel_t<_View>, sentinel_t<_Base>>
: __end_(std::move(__other.__end_)) {}
@@ -365,22 +356,21 @@ public:
template <bool _OtherConst>
requires sentinel_for<sentinel_t<_Base>, iterator_t<__maybe_const<_OtherConst, _View>>>
- _LIBCPP_HIDE_FROM_ABI friend constexpr bool
- operator==(const __elements_view_iterator<_View, _Np, _OtherConst>& __x, const __elements_view_sentinel& __y) {
+ _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator==(const __iterator<_OtherConst>& __x, const __sentinel& __y) {
return __get_current(__x) == __y.__end_;
}
template <bool _OtherConst>
requires sized_sentinel_for<sentinel_t<_Base>, iterator_t<__maybe_const<_OtherConst, _View>>>
_LIBCPP_HIDE_FROM_ABI friend constexpr range_difference_t<__maybe_const<_OtherConst, _View>>
- operator-(const __elements_view_iterator<_View, _Np, _OtherConst>& __x, const __elements_view_sentinel& __y) {
+ operator-(const __iterator<_OtherConst>& __x, const __sentinel& __y) {
return __get_current(__x) - __y.__end_;
}
template <bool _OtherConst>
requires sized_sentinel_for<sentinel_t<_Base>, iterator_t<__maybe_const<_OtherConst, _View>>>
_LIBCPP_HIDE_FROM_ABI friend constexpr range_difference_t<__maybe_const<_OtherConst, _View>>
- operator-(const __elements_view_sentinel& __x, const __elements_view_iterator<_View, _Np, _OtherConst>& __y) {
+ operator-(const __sentinel& __x, const __iterator<_OtherConst>& __y) {
return __x.__end_ - __get_current(__y);
}
};
diff --git a/contrib/llvm-project/libcxx/include/__ranges/filter_view.h b/contrib/llvm-project/libcxx/include/__ranges/filter_view.h
index f8633a3fee84..e14a9abeb9e7 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/filter_view.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/filter_view.h
@@ -46,15 +46,6 @@ _LIBCPP_BEGIN_NAMESPACE_STD
#if _LIBCPP_STD_VER > 17
namespace ranges {
-
- template <input_range _View, indirect_unary_predicate<iterator_t<_View>> _Pred>
- requires view<_View> && is_object_v<_Pred>
- class __filter_view_iterator;
-
- template <input_range _View, indirect_unary_predicate<iterator_t<_View>> _Pred>
- requires view<_View> && is_object_v<_Pred>
- class __filter_view_sentinel;
-
template<input_range _View, indirect_unary_predicate<iterator_t<_View>> _Pred>
requires view<_View> && is_object_v<_Pred>
class filter_view : public view_interface<filter_view<_View, _Pred>> {
@@ -67,11 +58,8 @@ namespace ranges {
using _Cache = _If<_UseCache, __non_propagating_cache<iterator_t<_View>>, __empty_cache>;
_LIBCPP_NO_UNIQUE_ADDRESS _Cache __cached_begin_ = _Cache();
- using __iterator = __filter_view_iterator<_View, _Pred>;
- using __sentinel = __filter_view_sentinel<_View, _Pred>;
-
- friend __iterator;
- friend __sentinel;
+ class __iterator;
+ class __sentinel;
public:
_LIBCPP_HIDE_FROM_ABI
@@ -131,13 +119,11 @@ namespace ranges {
template<input_range _View, indirect_unary_predicate<iterator_t<_View>> _Pred>
requires view<_View> && is_object_v<_Pred>
- class __filter_view_iterator : public __filter_iterator_category<_View> {
-
- using __filter_view = filter_view<_View, _Pred>;
+ class filter_view<_View, _Pred>::__iterator : public __filter_iterator_category<_View> {
public:
_LIBCPP_NO_UNIQUE_ADDRESS iterator_t<_View> __current_ = iterator_t<_View>();
- _LIBCPP_NO_UNIQUE_ADDRESS __filter_view* __parent_ = nullptr;
+ _LIBCPP_NO_UNIQUE_ADDRESS filter_view* __parent_ = nullptr;
using iterator_concept =
_If<bidirectional_range<_View>, bidirectional_iterator_tag,
@@ -149,10 +135,10 @@ namespace ranges {
using difference_type = range_difference_t<_View>;
_LIBCPP_HIDE_FROM_ABI
- __filter_view_iterator() requires default_initializable<iterator_t<_View>> = default;
+ __iterator() requires default_initializable<iterator_t<_View>> = default;
_LIBCPP_HIDE_FROM_ABI
- constexpr __filter_view_iterator(__filter_view& __parent, iterator_t<_View> __current)
+ constexpr __iterator(filter_view& __parent, iterator_t<_View> __current)
: __current_(std::move(__current)), __parent_(std::addressof(__parent))
{ }
@@ -171,7 +157,7 @@ namespace ranges {
}
_LIBCPP_HIDE_FROM_ABI
- constexpr __filter_view_iterator& operator++() {
+ constexpr __iterator& operator++() {
__current_ = ranges::find_if(std::move(++__current_), ranges::end(__parent_->__base_),
std::ref(*__parent_->__pred_));
return *this;
@@ -179,42 +165,42 @@ namespace ranges {
_LIBCPP_HIDE_FROM_ABI
constexpr void operator++(int) { ++*this; }
_LIBCPP_HIDE_FROM_ABI
- constexpr __filter_view_iterator operator++(int) requires forward_range<_View> {
+ constexpr __iterator operator++(int) requires forward_range<_View> {
auto __tmp = *this;
++*this;
return __tmp;
}
_LIBCPP_HIDE_FROM_ABI
- constexpr __filter_view_iterator& operator--() requires bidirectional_range<_View> {
+ constexpr __iterator& operator--() requires bidirectional_range<_View> {
do {
--__current_;
} while (!std::invoke(*__parent_->__pred_, *__current_));
return *this;
}
_LIBCPP_HIDE_FROM_ABI
- constexpr __filter_view_iterator operator--(int) requires bidirectional_range<_View> {
+ constexpr __iterator operator--(int) requires bidirectional_range<_View> {
auto tmp = *this;
--*this;
return tmp;
}
_LIBCPP_HIDE_FROM_ABI
- friend constexpr bool operator==(__filter_view_iterator const& __x, __filter_view_iterator const& __y)
+ friend constexpr bool operator==(__iterator const& __x, __iterator const& __y)
requires equality_comparable<iterator_t<_View>>
{
return __x.__current_ == __y.__current_;
}
_LIBCPP_HIDE_FROM_ABI
- friend constexpr range_rvalue_reference_t<_View> iter_move(__filter_view_iterator const& __it)
+ friend constexpr range_rvalue_reference_t<_View> iter_move(__iterator const& __it)
noexcept(noexcept(ranges::iter_move(__it.__current_)))
{
return ranges::iter_move(__it.__current_);
}
_LIBCPP_HIDE_FROM_ABI
- friend constexpr void iter_swap(__filter_view_iterator const& __x, __filter_view_iterator const& __y)
+ friend constexpr void iter_swap(__iterator const& __x, __iterator const& __y)
noexcept(noexcept(ranges::iter_swap(__x.__current_, __y.__current_)))
requires indirectly_swappable<iterator_t<_View>>
{
@@ -224,17 +210,15 @@ namespace ranges {
template<input_range _View, indirect_unary_predicate<iterator_t<_View>> _Pred>
requires view<_View> && is_object_v<_Pred>
- class __filter_view_sentinel {
- using __filter_view = filter_view<_View, _Pred>;
-
+ class filter_view<_View, _Pred>::__sentinel {
public:
sentinel_t<_View> __end_ = sentinel_t<_View>();
_LIBCPP_HIDE_FROM_ABI
- __filter_view_sentinel() = default;
+ __sentinel() = default;
_LIBCPP_HIDE_FROM_ABI
- constexpr explicit __filter_view_sentinel(__filter_view& __parent)
+ constexpr explicit __sentinel(filter_view& __parent)
: __end_(ranges::end(__parent.__base_))
{ }
@@ -242,7 +226,7 @@ namespace ranges {
constexpr sentinel_t<_View> base() const { return __end_; }
_LIBCPP_HIDE_FROM_ABI friend constexpr bool
- operator==(__filter_view_iterator<_View, _Pred> const& __x, __filter_view_sentinel const& __y) {
+ operator==(__iterator const& __x, __sentinel const& __y) {
return __x.__current_ == __y.__end_;
}
};
diff --git a/contrib/llvm-project/libcxx/include/__ranges/iota_view.h b/contrib/llvm-project/libcxx/include/__ranges/iota_view.h
index 3654096b7e17..8f9148a6849c 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/iota_view.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/iota_view.h
@@ -83,14 +83,6 @@ namespace ranges {
{ __j - __j } -> convertible_to<_IotaDiffT<_Iter>>;
};
- template <weakly_incrementable _Start>
- requires copyable<_Start>
- struct __iota_view_iterator;
-
- template <weakly_incrementable _Start, semiregular _BoundSentinel>
- requires __weakly_equality_comparable_with<_Start, _BoundSentinel> && copyable<_Start>
- struct __iota_view_sentinel;
-
template<class>
struct __iota_iterator_category {};
@@ -102,9 +94,211 @@ namespace ranges {
template <weakly_incrementable _Start, semiregular _BoundSentinel = unreachable_sentinel_t>
requires __weakly_equality_comparable_with<_Start, _BoundSentinel> && copyable<_Start>
class iota_view : public view_interface<iota_view<_Start, _BoundSentinel>> {
+ struct __iterator : public __iota_iterator_category<_Start> {
+ friend class iota_view;
+
+ using iterator_concept =
+ _If<__advanceable<_Start>, random_access_iterator_tag,
+ _If<__decrementable<_Start>, bidirectional_iterator_tag,
+ _If<incrementable<_Start>, forward_iterator_tag,
+ /*Else*/ input_iterator_tag>>>;
+
+ using value_type = _Start;
+ using difference_type = _IotaDiffT<_Start>;
+
+ _Start __value_ = _Start();
+
+ _LIBCPP_HIDE_FROM_ABI
+ __iterator() requires default_initializable<_Start> = default;
+
+ _LIBCPP_HIDE_FROM_ABI
+ constexpr explicit __iterator(_Start __value) : __value_(std::move(__value)) {}
+
+ _LIBCPP_HIDE_FROM_ABI
+ constexpr _Start operator*() const noexcept(is_nothrow_copy_constructible_v<_Start>) {
+ return __value_;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ constexpr __iterator& operator++() {
+ ++__value_;
+ return *this;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ constexpr void operator++(int) { ++*this; }
+
+ _LIBCPP_HIDE_FROM_ABI
+ constexpr __iterator operator++(int) requires incrementable<_Start> {
+ auto __tmp = *this;
+ ++*this;
+ return __tmp;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ constexpr __iterator& operator--() requires __decrementable<_Start> {
+ --__value_;
+ return *this;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ constexpr __iterator operator--(int) requires __decrementable<_Start> {
+ auto __tmp = *this;
+ --*this;
+ return __tmp;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ constexpr __iterator& operator+=(difference_type __n)
+ requires __advanceable<_Start>
+ {
+ if constexpr (__integer_like<_Start> && !__signed_integer_like<_Start>) {
+ if (__n >= difference_type(0)) {
+ __value_ += static_cast<_Start>(__n);
+ } else {
+ __value_ -= static_cast<_Start>(-__n);
+ }
+ } else {
+ __value_ += __n;
+ }
+ return *this;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ constexpr __iterator& operator-=(difference_type __n)
+ requires __advanceable<_Start>
+ {
+ if constexpr (__integer_like<_Start> && !__signed_integer_like<_Start>) {
+ if (__n >= difference_type(0)) {
+ __value_ -= static_cast<_Start>(__n);
+ } else {
+ __value_ += static_cast<_Start>(-__n);
+ }
+ } else {
+ __value_ -= __n;
+ }
+ return *this;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ constexpr _Start operator[](difference_type __n) const
+ requires __advanceable<_Start>
+ {
+ return _Start(__value_ + __n);
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ friend constexpr bool operator==(const __iterator& __x, const __iterator& __y)
+ requires equality_comparable<_Start>
+ {
+ return __x.__value_ == __y.__value_;
+ }
- using __iterator = __iota_view_iterator<_Start>;
- using __sentinel = __iota_view_sentinel<_Start, _BoundSentinel>;
+ _LIBCPP_HIDE_FROM_ABI
+ friend constexpr bool operator<(const __iterator& __x, const __iterator& __y)
+ requires totally_ordered<_Start>
+ {
+ return __x.__value_ < __y.__value_;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ friend constexpr bool operator>(const __iterator& __x, const __iterator& __y)
+ requires totally_ordered<_Start>
+ {
+ return __y < __x;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ friend constexpr bool operator<=(const __iterator& __x, const __iterator& __y)
+ requires totally_ordered<_Start>
+ {
+ return !(__y < __x);
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ friend constexpr bool operator>=(const __iterator& __x, const __iterator& __y)
+ requires totally_ordered<_Start>
+ {
+ return !(__x < __y);
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ friend constexpr auto operator<=>(const __iterator& __x, const __iterator& __y)
+ requires totally_ordered<_Start> && three_way_comparable<_Start>
+ {
+ return __x.__value_ <=> __y.__value_;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ friend constexpr __iterator operator+(__iterator __i, difference_type __n)
+ requires __advanceable<_Start>
+ {
+ __i += __n;
+ return __i;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ friend constexpr __iterator operator+(difference_type __n, __iterator __i)
+ requires __advanceable<_Start>
+ {
+ return __i + __n;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ friend constexpr __iterator operator-(__iterator __i, difference_type __n)
+ requires __advanceable<_Start>
+ {
+ __i -= __n;
+ return __i;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ friend constexpr difference_type operator-(const __iterator& __x, const __iterator& __y)
+ requires __advanceable<_Start>
+ {
+ if constexpr (__integer_like<_Start>) {
+ if constexpr (__signed_integer_like<_Start>) {
+ return difference_type(difference_type(__x.__value_) - difference_type(__y.__value_));
+ }
+ if (__y.__value_ > __x.__value_) {
+ return difference_type(-difference_type(__y.__value_ - __x.__value_));
+ }
+ return difference_type(__x.__value_ - __y.__value_);
+ }
+ return __x.__value_ - __y.__value_;
+ }
+ };
+
+ struct __sentinel {
+ friend class iota_view;
+
+ private:
+ _BoundSentinel __bound_sentinel_ = _BoundSentinel();
+
+ public:
+ _LIBCPP_HIDE_FROM_ABI
+ __sentinel() = default;
+ constexpr explicit __sentinel(_BoundSentinel __bound_sentinel) : __bound_sentinel_(std::move(__bound_sentinel)) {}
+
+ _LIBCPP_HIDE_FROM_ABI
+ friend constexpr bool operator==(const __iterator& __x, const __sentinel& __y) {
+ return __x.__value_ == __y.__bound_sentinel_;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ friend constexpr iter_difference_t<_Start> operator-(const __iterator& __x, const __sentinel& __y)
+ requires sized_sentinel_for<_BoundSentinel, _Start>
+ {
+ return __x.__value_ - __y.__bound_sentinel_;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI
+ friend constexpr iter_difference_t<_Start> operator-(const __sentinel& __x, const __iterator& __y)
+ requires sized_sentinel_for<_BoundSentinel, _Start>
+ {
+ return -(__y - __x);
+ }
+ };
_Start __value_ = _Start();
_BoundSentinel __bound_sentinel_ = _BoundSentinel();
@@ -185,224 +379,6 @@ namespace ranges {
template <class _Start, class _BoundSentinel>
inline constexpr bool enable_borrowed_range<iota_view<_Start, _BoundSentinel>> = true;
- template <weakly_incrementable _Start>
- requires copyable<_Start>
- struct __iota_view_iterator : public __iota_iterator_category<_Start> {
-
- template <weakly_incrementable _StartT, semiregular _BoundSentinelT>
- requires __weakly_equality_comparable_with<_StartT, _BoundSentinelT> && copyable<_StartT>
- friend class iota_view;
-
- using iterator_concept =
- _If<__advanceable<_Start>, random_access_iterator_tag,
- _If<__decrementable<_Start>, bidirectional_iterator_tag,
- _If<incrementable<_Start>, forward_iterator_tag,
- /*Else*/ input_iterator_tag>>>;
-
- using value_type = _Start;
- using difference_type = _IotaDiffT<_Start>;
-
- _Start __value_ = _Start();
-
- _LIBCPP_HIDE_FROM_ABI
- __iota_view_iterator() requires default_initializable<_Start> = default;
-
- _LIBCPP_HIDE_FROM_ABI
- constexpr explicit __iota_view_iterator(_Start __value) : __value_(std::move(__value)) {}
-
- _LIBCPP_HIDE_FROM_ABI
- constexpr _Start operator*() const noexcept(is_nothrow_copy_constructible_v<_Start>) {
- return __value_;
- }
-
- _LIBCPP_HIDE_FROM_ABI
- constexpr __iota_view_iterator& operator++() {
- ++__value_;
- return *this;
- }
-
- _LIBCPP_HIDE_FROM_ABI
- constexpr void operator++(int) { ++*this; }
-
- _LIBCPP_HIDE_FROM_ABI
- constexpr __iota_view_iterator operator++(int) requires incrementable<_Start> {
- auto __tmp = *this;
- ++*this;
- return __tmp;
- }
-
- _LIBCPP_HIDE_FROM_ABI
- constexpr __iota_view_iterator& operator--() requires __decrementable<_Start> {
- --__value_;
- return *this;
- }
-
- _LIBCPP_HIDE_FROM_ABI
- constexpr __iota_view_iterator operator--(int) requires __decrementable<_Start> {
- auto __tmp = *this;
- --*this;
- return __tmp;
- }
-
- _LIBCPP_HIDE_FROM_ABI
- constexpr __iota_view_iterator& operator+=(difference_type __n)
- requires __advanceable<_Start>
- {
- if constexpr (__integer_like<_Start> && !__signed_integer_like<_Start>) {
- if (__n >= difference_type(0)) {
- __value_ += static_cast<_Start>(__n);
- } else {
- __value_ -= static_cast<_Start>(-__n);
- }
- } else {
- __value_ += __n;
- }
- return *this;
- }
-
- _LIBCPP_HIDE_FROM_ABI
- constexpr __iota_view_iterator& operator-=(difference_type __n)
- requires __advanceable<_Start>
- {
- if constexpr (__integer_like<_Start> && !__signed_integer_like<_Start>) {
- if (__n >= difference_type(0)) {
- __value_ -= static_cast<_Start>(__n);
- } else {
- __value_ += static_cast<_Start>(-__n);
- }
- } else {
- __value_ -= __n;
- }
- return *this;
- }
-
- _LIBCPP_HIDE_FROM_ABI
- constexpr _Start operator[](difference_type __n) const
- requires __advanceable<_Start>
- {
- return _Start(__value_ + __n);
- }
-
- _LIBCPP_HIDE_FROM_ABI
- friend constexpr bool operator==(const __iota_view_iterator& __x, const __iota_view_iterator& __y)
- requires equality_comparable<_Start>
- {
- return __x.__value_ == __y.__value_;
- }
-
- _LIBCPP_HIDE_FROM_ABI
- friend constexpr bool operator<(const __iota_view_iterator& __x, const __iota_view_iterator& __y)
- requires totally_ordered<_Start>
- {
- return __x.__value_ < __y.__value_;
- }
-
- _LIBCPP_HIDE_FROM_ABI
- friend constexpr bool operator>(const __iota_view_iterator& __x, const __iota_view_iterator& __y)
- requires totally_ordered<_Start>
- {
- return __y < __x;
- }
-
- _LIBCPP_HIDE_FROM_ABI
- friend constexpr bool operator<=(const __iota_view_iterator& __x, const __iota_view_iterator& __y)
- requires totally_ordered<_Start>
- {
- return !(__y < __x);
- }
-
- _LIBCPP_HIDE_FROM_ABI
- friend constexpr bool operator>=(const __iota_view_iterator& __x, const __iota_view_iterator& __y)
- requires totally_ordered<_Start>
- {
- return !(__x < __y);
- }
-
- _LIBCPP_HIDE_FROM_ABI
- friend constexpr auto operator<=>(const __iota_view_iterator& __x, const __iota_view_iterator& __y)
- requires totally_ordered<_Start> && three_way_comparable<_Start>
- {
- return __x.__value_ <=> __y.__value_;
- }
-
- _LIBCPP_HIDE_FROM_ABI
- friend constexpr __iota_view_iterator operator+(__iota_view_iterator __i, difference_type __n)
- requires __advanceable<_Start>
- {
- __i += __n;
- return __i;
- }
-
- _LIBCPP_HIDE_FROM_ABI
- friend constexpr __iota_view_iterator operator+(difference_type __n, __iota_view_iterator __i)
- requires __advanceable<_Start>
- {
- return __i + __n;
- }
-
- _LIBCPP_HIDE_FROM_ABI
- friend constexpr __iota_view_iterator operator-(__iota_view_iterator __i, difference_type __n)
- requires __advanceable<_Start>
- {
- __i -= __n;
- return __i;
- }
-
- _LIBCPP_HIDE_FROM_ABI
- friend constexpr difference_type operator-(const __iota_view_iterator& __x, const __iota_view_iterator& __y)
- requires __advanceable<_Start>
- {
- if constexpr (__integer_like<_Start>) {
- if constexpr (__signed_integer_like<_Start>) {
- return difference_type(difference_type(__x.__value_) - difference_type(__y.__value_));
- }
- if (__y.__value_ > __x.__value_) {
- return difference_type(-difference_type(__y.__value_ - __x.__value_));
- }
- return difference_type(__x.__value_ - __y.__value_);
- }
- return __x.__value_ - __y.__value_;
- }
- };
-
- template <weakly_incrementable _Start, semiregular _BoundSentinel>
- requires __weakly_equality_comparable_with<_Start, _BoundSentinel> && copyable<_Start>
- struct __iota_view_sentinel {
-
- template <weakly_incrementable _StartT, semiregular _BoundSentinelT>
- requires __weakly_equality_comparable_with<_StartT, _BoundSentinelT> && copyable<_StartT>
- friend class iota_view;
-
- using __iterator = __iota_view_iterator<_Start>;
-
- private:
- _BoundSentinel __bound_sentinel_ = _BoundSentinel();
-
- public:
- _LIBCPP_HIDE_FROM_ABI
- __iota_view_sentinel() = default;
- constexpr explicit __iota_view_sentinel(_BoundSentinel __bound_sentinel) : __bound_sentinel_(std::move(__bound_sentinel)) {}
-
- _LIBCPP_HIDE_FROM_ABI
- friend constexpr bool operator==(const __iterator& __x, const __iota_view_sentinel& __y) {
- return __x.__value_ == __y.__bound_sentinel_;
- }
-
- _LIBCPP_HIDE_FROM_ABI
- friend constexpr iter_difference_t<_Start> operator-(const __iterator& __x, const __iota_view_sentinel& __y)
- requires sized_sentinel_for<_BoundSentinel, _Start>
- {
- return __x.__value_ - __y.__bound_sentinel_;
- }
-
- _LIBCPP_HIDE_FROM_ABI
- friend constexpr iter_difference_t<_Start> operator-(const __iota_view_sentinel& __x, const __iterator& __y)
- requires sized_sentinel_for<_BoundSentinel, _Start>
- {
- return -(__y - __x);
- }
- };
-
namespace views {
namespace __iota {
struct __fn {
diff --git a/contrib/llvm-project/libcxx/include/__ranges/istream_view.h b/contrib/llvm-project/libcxx/include/__ranges/istream_view.h
index 69a9df35a4d2..66cd91527616 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/istream_view.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/istream_view.h
@@ -36,18 +36,10 @@ namespace ranges {
template <class _Val, class _CharT, class _Traits>
concept __stream_extractable = requires(basic_istream<_CharT, _Traits>& __is, _Val& __t) { __is >> __t; };
-template <movable _Val, class _CharT, class _Traits>
- requires default_initializable<_Val> && __stream_extractable<_Val, _CharT, _Traits>
-class __basic_istream_view_iterator;
-
template <movable _Val, class _CharT, class _Traits = char_traits<_CharT>>
requires default_initializable<_Val> && __stream_extractable<_Val, _CharT, _Traits>
class basic_istream_view : public view_interface<basic_istream_view<_Val, _CharT, _Traits>> {
- using __iterator = __basic_istream_view_iterator<_Val, _CharT, _Traits>;
-
- template <movable _ValueType, class _CharType, class _TraitsType>
- requires default_initializable<_ValueType> && __stream_extractable<_ValueType, _CharType, _TraitsType>
- friend class __basic_istream_view_iterator;
+ class __iterator;
public:
_LIBCPP_HIDE_FROM_ABI constexpr explicit basic_istream_view(basic_istream<_CharT, _Traits>& __stream)
@@ -67,23 +59,23 @@ private:
template <movable _Val, class _CharT, class _Traits>
requires default_initializable<_Val> && __stream_extractable<_Val, _CharT, _Traits>
-class __basic_istream_view_iterator {
+class basic_istream_view<_Val, _CharT, _Traits>::__iterator {
public:
using iterator_concept = input_iterator_tag;
using difference_type = ptrdiff_t;
using value_type = _Val;
- _LIBCPP_HIDE_FROM_ABI constexpr explicit __basic_istream_view_iterator(
+ _LIBCPP_HIDE_FROM_ABI constexpr explicit __iterator(
basic_istream_view<_Val, _CharT, _Traits>& __parent) noexcept
: __parent_(std::addressof(__parent)) {}
- __basic_istream_view_iterator(const __basic_istream_view_iterator&) = delete;
- _LIBCPP_HIDE_FROM_ABI __basic_istream_view_iterator(__basic_istream_view_iterator&&) = default;
+ __iterator(const __iterator&) = delete;
+ _LIBCPP_HIDE_FROM_ABI __iterator(__iterator&&) = default;
- __basic_istream_view_iterator& operator=(const __basic_istream_view_iterator&) = delete;
- _LIBCPP_HIDE_FROM_ABI __basic_istream_view_iterator& operator=(__basic_istream_view_iterator&&) = default;
+ __iterator& operator=(const __iterator&) = delete;
+ _LIBCPP_HIDE_FROM_ABI __iterator& operator=(__iterator&&) = default;
- _LIBCPP_HIDE_FROM_ABI __basic_istream_view_iterator& operator++() {
+ _LIBCPP_HIDE_FROM_ABI __iterator& operator++() {
*__parent_->__stream_ >> __parent_->__value_;
return *this;
}
@@ -92,7 +84,7 @@ public:
_LIBCPP_HIDE_FROM_ABI _Val& operator*() const { return __parent_->__value_; }
- _LIBCPP_HIDE_FROM_ABI friend bool operator==(const __basic_istream_view_iterator& __x, default_sentinel_t) {
+ _LIBCPP_HIDE_FROM_ABI friend bool operator==(const __iterator& __x, default_sentinel_t) {
return !*__x.__get_parent_stream();
}
diff --git a/contrib/llvm-project/libcxx/include/__ranges/join_view.h b/contrib/llvm-project/libcxx/include/__ranges/join_view.h
index 869540fc99c2..7c078422e900 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/join_view.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/join_view.h
@@ -40,7 +40,10 @@
_LIBCPP_BEGIN_NAMESPACE_STD
-#if _LIBCPP_STD_VER > 17
+// Note: `join_view` is still marked experimental because there is an ABI-breaking change that affects `join_view` in
+// the pipeline (https://isocpp.org/files/papers/D2770R0.html).
+// TODO: make `join_view` non-experimental once D2770 is implemented.
+#if _LIBCPP_STD_VER > 17 && defined(_LIBCPP_ENABLE_EXPERIMENTAL)
namespace ranges {
template<class>
@@ -66,14 +69,6 @@ namespace ranges {
>;
};
- template <input_range _View, bool _Const>
- requires view<_View> && input_range<range_reference_t<_View>>
- struct __join_view_iterator;
-
- template <input_range _View, bool _Const>
- requires view<_View> && input_range<range_reference_t<_View>>
- struct __join_view_sentinel;
-
template<input_range _View>
requires view<_View> && input_range<range_reference_t<_View>>
class join_view
@@ -81,19 +76,9 @@ namespace ranges {
private:
using _InnerRange = range_reference_t<_View>;
- template<bool _Const>
- using __iterator = __join_view_iterator<_View, _Const>;
-
- template<bool _Const>
- using __sentinel = __join_view_sentinel<_View, _Const>;
+ template<bool> struct __iterator;
- template <input_range _View2, bool _Const2>
- requires view<_View2> && input_range<range_reference_t<_View2>>
- friend struct __join_view_iterator;
-
- template <input_range _View2, bool _Const2>
- requires view<_View2> && input_range<range_reference_t<_View2>>
- friend struct __join_view_sentinel;
+ template<bool> struct __sentinel;
template <class>
friend struct std::__segmented_iterator_traits;
@@ -164,12 +149,12 @@ namespace ranges {
}
};
- template<input_range _View, bool _Const>
+ template<input_range _View>
requires view<_View> && input_range<range_reference_t<_View>>
- struct __join_view_sentinel {
- template<input_range _View2, bool>
- requires view<_View2> && input_range<range_reference_t<_View2>>
- friend struct __join_view_sentinel;
+ template<bool _Const>
+ struct join_view<_View>::__sentinel {
+ template<bool>
+ friend struct __sentinel;
private:
using _Parent = __maybe_const<_Const, join_view<_View>>;
@@ -178,37 +163,42 @@ namespace ranges {
public:
_LIBCPP_HIDE_FROM_ABI
- __join_view_sentinel() = default;
+ __sentinel() = default;
_LIBCPP_HIDE_FROM_ABI
- constexpr explicit __join_view_sentinel(_Parent& __parent)
+ constexpr explicit __sentinel(_Parent& __parent)
: __end_(ranges::end(__parent.__base_)) {}
_LIBCPP_HIDE_FROM_ABI
- constexpr __join_view_sentinel(__join_view_sentinel<_View, !_Const> __s)
+ constexpr __sentinel(__sentinel<!_Const> __s)
requires _Const && convertible_to<sentinel_t<_View>, sentinel_t<_Base>>
: __end_(std::move(__s.__end_)) {}
template<bool _OtherConst>
requires sentinel_for<sentinel_t<_Base>, iterator_t<__maybe_const<_OtherConst, _View>>>
_LIBCPP_HIDE_FROM_ABI
- friend constexpr bool operator==(const __join_view_iterator<_View, _OtherConst>& __x, const __join_view_sentinel& __y) {
+ friend constexpr bool operator==(const __iterator<_OtherConst>& __x, const __sentinel& __y) {
return __x.__outer_ == __y.__end_;
}
};
- template<input_range _View, bool _Const>
+ // https://reviews.llvm.org/D142811#inline-1383022
+ // To simplify the segmented iterator traits specialization,
+ // make the iterator `final`
+ template<input_range _View>
requires view<_View> && input_range<range_reference_t<_View>>
- struct __join_view_iterator
+ template<bool _Const>
+ struct join_view<_View>::__iterator final
: public __join_view_iterator_category<__maybe_const<_Const, _View>> {
- template<input_range _View2, bool>
- requires view<_View2> && input_range<range_reference_t<_View2>>
- friend struct __join_view_iterator;
+ template<bool>
+ friend struct __iterator;
template <class>
friend struct std::__segmented_iterator_traits;
+ static constexpr bool __is_join_view_iterator = true;
+
private:
using _Parent = __maybe_const<_Const, join_view<_View>>;
using _Base = __maybe_const<_Const, _View>;
@@ -243,7 +233,7 @@ namespace ranges {
__inner_.reset();
}
- _LIBCPP_HIDE_FROM_ABI constexpr __join_view_iterator(_Parent* __parent, _Outer __outer, _Inner __inner)
+ _LIBCPP_HIDE_FROM_ABI constexpr __iterator(_Parent* __parent, _Outer __outer, _Inner __inner)
: __outer_(std::move(__outer)), __inner_(std::move(__inner)), __parent_(__parent) {}
public:
@@ -264,17 +254,17 @@ namespace ranges {
range_difference_t<_Base>, range_difference_t<range_reference_t<_Base>>>;
_LIBCPP_HIDE_FROM_ABI
- __join_view_iterator() requires default_initializable<_Outer> = default;
+ __iterator() requires default_initializable<_Outer> = default;
_LIBCPP_HIDE_FROM_ABI
- constexpr __join_view_iterator(_Parent& __parent, _Outer __outer)
+ constexpr __iterator(_Parent& __parent, _Outer __outer)
: __outer_(std::move(__outer))
, __parent_(std::addressof(__parent)) {
__satisfy();
}
_LIBCPP_HIDE_FROM_ABI
- constexpr __join_view_iterator(__join_view_iterator<_View, !_Const> __i)
+ constexpr __iterator(__iterator<!_Const> __i)
requires _Const &&
convertible_to<iterator_t<_View>, _Outer> &&
convertible_to<iterator_t<_InnerRange>, _Inner>
@@ -295,7 +285,7 @@ namespace ranges {
}
_LIBCPP_HIDE_FROM_ABI
- constexpr __join_view_iterator& operator++() {
+ constexpr __iterator& operator++() {
auto&& __inner = [&]() -> auto&& {
if constexpr (__ref_is_glvalue)
return *__outer_;
@@ -315,7 +305,7 @@ namespace ranges {
}
_LIBCPP_HIDE_FROM_ABI
- constexpr __join_view_iterator operator++(int)
+ constexpr __iterator operator++(int)
requires __ref_is_glvalue &&
forward_range<_Base> &&
forward_range<range_reference_t<_Base>>
@@ -326,7 +316,7 @@ namespace ranges {
}
_LIBCPP_HIDE_FROM_ABI
- constexpr __join_view_iterator& operator--()
+ constexpr __iterator& operator--()
requires __ref_is_glvalue &&
bidirectional_range<_Base> &&
bidirectional_range<range_reference_t<_Base>> &&
@@ -345,7 +335,7 @@ namespace ranges {
}
_LIBCPP_HIDE_FROM_ABI
- constexpr __join_view_iterator operator--(int)
+ constexpr __iterator operator--(int)
requires __ref_is_glvalue &&
bidirectional_range<_Base> &&
bidirectional_range<range_reference_t<_Base>> &&
@@ -357,7 +347,7 @@ namespace ranges {
}
_LIBCPP_HIDE_FROM_ABI
- friend constexpr bool operator==(const __join_view_iterator& __x, const __join_view_iterator& __y)
+ friend constexpr bool operator==(const __iterator& __x, const __iterator& __y)
requires __ref_is_glvalue &&
equality_comparable<iterator_t<_Base>> &&
equality_comparable<iterator_t<range_reference_t<_Base>>>
@@ -366,14 +356,14 @@ namespace ranges {
}
_LIBCPP_HIDE_FROM_ABI
- friend constexpr decltype(auto) iter_move(const __join_view_iterator& __i)
+ friend constexpr decltype(auto) iter_move(const __iterator& __i)
noexcept(noexcept(ranges::iter_move(*__i.__inner_)))
{
return ranges::iter_move(*__i.__inner_);
}
_LIBCPP_HIDE_FROM_ABI
- friend constexpr void iter_swap(const __join_view_iterator& __x, const __join_view_iterator& __y)
+ friend constexpr void iter_swap(const __iterator& __x, const __iterator& __y)
noexcept(noexcept(ranges::iter_swap(*__x.__inner_, *__y.__inner_)))
requires indirectly_swappable<_Inner>
{
@@ -401,12 +391,12 @@ inline namespace __cpo {
} // namespace views
} // namespace ranges
-template <class _View, bool _Const>
- requires(ranges::common_range<typename ranges::__join_view_iterator<_View, _Const>::_Parent> &&
- __is_cpp17_random_access_iterator<typename ranges::__join_view_iterator<_View, _Const>::_Outer>::value &&
- __is_cpp17_random_access_iterator<typename ranges::__join_view_iterator<_View, _Const>::_Inner>::value)
-struct __segmented_iterator_traits<ranges::__join_view_iterator<_View, _Const>> {
- using _JoinViewIterator = ranges::__join_view_iterator<_View, _Const>;
+template <class _JoinViewIterator>
+ requires(_JoinViewIterator::__is_join_view_iterator &&
+ ranges::common_range<typename _JoinViewIterator::_Parent> &&
+ __is_cpp17_random_access_iterator<typename _JoinViewIterator::_Outer>::value &&
+ __is_cpp17_random_access_iterator<typename _JoinViewIterator::_Inner>::value)
+struct __segmented_iterator_traits<_JoinViewIterator> {
using __segment_iterator =
_LIBCPP_NODEBUG __iterator_with_data<typename _JoinViewIterator::_Outer, typename _JoinViewIterator::_Parent*>;
@@ -445,7 +435,7 @@ struct __segmented_iterator_traits<ranges::__join_view_iterator<_View, _Const>>
}
};
-#endif // _LIBCPP_STD_VER > 17
+#endif // #if _LIBCPP_STD_VER > 17 && defined(_LIBCPP_ENABLE_EXPERIMENTAL)
_LIBCPP_END_NAMESPACE_STD
diff --git a/contrib/llvm-project/libcxx/include/__ranges/split_view.h b/contrib/llvm-project/libcxx/include/__ranges/split_view.h
index 9758ee935f29..6ebe5a43ed22 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/split_view.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/split_view.h
@@ -42,12 +42,6 @@ _LIBCPP_BEGIN_NAMESPACE_STD
namespace ranges {
-template <class _View, class _Pattern>
-struct __split_view_iterator;
-
-template <class _View, class _Pattern>
-struct __split_view_sentinel;
-
template <forward_range _View, forward_range _Pattern>
requires view<_View> && view<_Pattern> &&
indirectly_comparable<iterator_t<_View>, iterator_t<_Pattern>, ranges::equal_to>
@@ -59,13 +53,13 @@ private:
_Cache __cached_begin_ = _Cache();
template <class, class>
- friend struct __split_view_iterator;
+ friend struct __iterator;
template <class, class>
- friend struct __split_view_sentinel;
+ friend struct __sentinel;
- using __iterator = __split_view_iterator<_View, _Pattern>;
- using __sentinel = __split_view_sentinel<_View, _Pattern>;
+ struct __iterator;
+ struct __sentinel;
_LIBCPP_HIDE_FROM_ABI constexpr subrange<iterator_t<_View>> __find_next(iterator_t<_View> __it) {
auto [__begin, __end] = ranges::search(subrange(__it, ranges::end(__base_)), __pattern_);
@@ -120,16 +114,17 @@ split_view(_Range&&, _Pattern&&) -> split_view<views::all_t<_Range>, views::all_
template <forward_range _Range>
split_view(_Range&&, range_value_t<_Range>) -> split_view<views::all_t<_Range>, single_view<range_value_t<_Range>>>;
-template <class _View, class _Pattern>
-struct __split_view_iterator {
+template <forward_range _View, forward_range _Pattern>
+ requires view<_View> && view<_Pattern> &&
+ indirectly_comparable<iterator_t<_View>, iterator_t<_Pattern>, ranges::equal_to>
+struct split_view<_View, _Pattern>::__iterator {
private:
- split_view<_View, _Pattern>* __parent_ = nullptr;
+ split_view* __parent_ = nullptr;
_LIBCPP_NO_UNIQUE_ADDRESS iterator_t<_View> __cur_ = iterator_t<_View>();
_LIBCPP_NO_UNIQUE_ADDRESS subrange<iterator_t<_View>> __next_ = subrange<iterator_t<_View>>();
bool __trailing_empty_ = false;
- template <class, class>
- friend struct __split_view_sentinel;
+ friend struct __sentinel;
public:
using iterator_concept = forward_iterator_tag;
@@ -137,9 +132,9 @@ public:
using value_type = subrange<iterator_t<_View>>;
using difference_type = range_difference_t<_View>;
- _LIBCPP_HIDE_FROM_ABI __split_view_iterator() = default;
+ _LIBCPP_HIDE_FROM_ABI __iterator() = default;
- _LIBCPP_HIDE_FROM_ABI constexpr __split_view_iterator(
+ _LIBCPP_HIDE_FROM_ABI constexpr __iterator(
split_view<_View, _Pattern>& __parent, iterator_t<_View> __current, subrange<iterator_t<_View>> __next)
: __parent_(std::addressof(__parent)), __cur_(std::move(__current)), __next_(std::move(__next)) {}
@@ -147,7 +142,7 @@ public:
_LIBCPP_HIDE_FROM_ABI constexpr value_type operator*() const { return {__cur_, __next_.begin()}; }
- _LIBCPP_HIDE_FROM_ABI constexpr __split_view_iterator& operator++() {
+ _LIBCPP_HIDE_FROM_ABI constexpr __iterator& operator++() {
__cur_ = __next_.begin();
if (__cur_ != ranges::end(__parent_->__base_)) {
__cur_ = __next_.end();
@@ -163,36 +158,35 @@ public:
return *this;
}
- _LIBCPP_HIDE_FROM_ABI constexpr __split_view_iterator operator++(int) {
+ _LIBCPP_HIDE_FROM_ABI constexpr __iterator operator++(int) {
auto __tmp = *this;
++*this;
return __tmp;
}
- _LIBCPP_HIDE_FROM_ABI friend constexpr bool
- operator==(const __split_view_iterator& __x, const __split_view_iterator& __y) {
+ _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator==(const __iterator& __x, const __iterator& __y) {
return __x.__cur_ == __y.__cur_ && __x.__trailing_empty_ == __y.__trailing_empty_;
}
};
-template <class _View, class _Pattern>
-struct __split_view_sentinel {
+template <forward_range _View, forward_range _Pattern>
+ requires view<_View> && view<_Pattern> &&
+ indirectly_comparable<iterator_t<_View>, iterator_t<_Pattern>, ranges::equal_to>
+struct split_view<_View, _Pattern>::__sentinel {
private:
_LIBCPP_NO_UNIQUE_ADDRESS sentinel_t<_View> __end_ = sentinel_t<_View>();
- _LIBCPP_HIDE_FROM_ABI static constexpr bool
- __equals(const __split_view_iterator<_View, _Pattern>& __x, const __split_view_sentinel& __y) {
+ _LIBCPP_HIDE_FROM_ABI static constexpr bool __equals(const __iterator& __x, const __sentinel& __y) {
return __x.__cur_ == __y.__end_ && !__x.__trailing_empty_;
}
public:
- _LIBCPP_HIDE_FROM_ABI __split_view_sentinel() = default;
+ _LIBCPP_HIDE_FROM_ABI __sentinel() = default;
- _LIBCPP_HIDE_FROM_ABI constexpr explicit __split_view_sentinel(split_view<_View, _Pattern>& __parent)
+ _LIBCPP_HIDE_FROM_ABI constexpr explicit __sentinel(split_view<_View, _Pattern>& __parent)
: __end_(ranges::end(__parent.__base_)) {}
- _LIBCPP_HIDE_FROM_ABI friend constexpr bool
- operator==(const __split_view_iterator<_View, _Pattern>& __x, const __split_view_sentinel& __y) {
+ _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator==(const __iterator& __x, const __sentinel& __y) {
return __equals(__x, __y);
}
};
diff --git a/contrib/llvm-project/libcxx/include/__ranges/take_while_view.h b/contrib/llvm-project/libcxx/include/__ranges/take_while_view.h
index 59c0a4f82889..77d7390dceb9 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/take_while_view.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/take_while_view.h
@@ -53,17 +53,11 @@ template <class _View, class _Pred>
concept __take_while_const_is_range =
range<const _View> && indirect_unary_predicate<const _Pred, iterator_t<const _View>>;
-template <class, class, bool>
-class __take_while_view_sentinel;
-
template <view _View, class _Pred>
requires input_range<_View> && is_object_v<_Pred> && indirect_unary_predicate<const _Pred, iterator_t<_View>>
class take_while_view : public view_interface<take_while_view<_View, _Pred>> {
- template <class, class, bool>
- friend class __take_while_view_sentinel;
-
- template <bool _Const>
- using __sentinel = __take_while_view_sentinel<_View, _Pred, _Const>;
+ template <bool>
+ class __sentinel;
_LIBCPP_NO_UNIQUE_ADDRESS _View __base_ = _View();
_LIBCPP_NO_UNIQUE_ADDRESS __copyable_box<_Pred> __pred_;
@@ -114,37 +108,37 @@ public:
template <class _Range, class _Pred>
take_while_view(_Range&&, _Pred) -> take_while_view<views::all_t<_Range>, _Pred>;
-template <class _View, class _Pred, bool _Const>
-class __take_while_view_sentinel {
+template <view _View, class _Pred>
+ requires input_range<_View> && is_object_v<_Pred> && indirect_unary_predicate<const _Pred, iterator_t<_View>>
+template <bool _Const>
+class take_while_view<_View, _Pred>::__sentinel {
using _Base = __maybe_const<_Const, _View>;
sentinel_t<_Base> __end_ = sentinel_t<_Base>();
const _Pred* __pred_ = nullptr;
- template <class, class, bool>
- friend class __take_while_view_sentinel;
+ friend class __sentinel<!_Const>;
public:
- _LIBCPP_HIDE_FROM_ABI __take_while_view_sentinel() = default;
+ _LIBCPP_HIDE_FROM_ABI __sentinel() = default;
- _LIBCPP_HIDE_FROM_ABI constexpr explicit __take_while_view_sentinel(sentinel_t<_Base> __end, const _Pred* __pred)
+ _LIBCPP_HIDE_FROM_ABI constexpr explicit __sentinel(sentinel_t<_Base> __end, const _Pred* __pred)
: __end_(std::move(__end)), __pred_(__pred) {}
- _LIBCPP_HIDE_FROM_ABI constexpr __take_while_view_sentinel(__take_while_view_sentinel<_View, _Pred, !_Const> __s)
+ _LIBCPP_HIDE_FROM_ABI constexpr __sentinel(__sentinel<!_Const> __s)
requires _Const && convertible_to<sentinel_t<_View>, sentinel_t<_Base>>
: __end_(std::move(__s.__end_)), __pred_(__s.__pred_) {}
_LIBCPP_HIDE_FROM_ABI constexpr sentinel_t<_Base> base() const { return __end_; }
- _LIBCPP_HIDE_FROM_ABI friend constexpr bool
- operator==(const iterator_t<_Base>& __x, const __take_while_view_sentinel& __y) {
+ _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator==(const iterator_t<_Base>& __x, const __sentinel& __y) {
return __x == __y.__end_ || !std::invoke(*__y.__pred_, *__x);
}
template <bool _OtherConst = !_Const>
requires sentinel_for<sentinel_t<_Base>, iterator_t<__maybe_const<_OtherConst, _View>>>
_LIBCPP_HIDE_FROM_ABI friend constexpr bool
- operator==(const iterator_t<__maybe_const<_OtherConst, _View>>& __x, const __take_while_view_sentinel& __y) {
+ operator==(const iterator_t<__maybe_const<_OtherConst, _View>>& __x, const __sentinel& __y) {
return __x == __y.__end_ || !std::invoke(*__y.__pred_, *__x);
}
};
diff --git a/contrib/llvm-project/libcxx/include/__ranges/transform_view.h b/contrib/llvm-project/libcxx/include/__ranges/transform_view.h
index 66d9e80e6e61..0b5ced3df428 100644
--- a/contrib/llvm-project/libcxx/include/__ranges/transform_view.h
+++ b/contrib/llvm-project/libcxx/include/__ranges/transform_view.h
@@ -57,31 +57,11 @@ concept __transform_view_constraints =
regular_invocable<_Fn&, range_reference_t<_View>> &&
__can_reference<invoke_result_t<_Fn&, range_reference_t<_View>>>;
-template <input_range _View, copy_constructible _Function, bool _IsConst>
- requires __transform_view_constraints<_View, _Function>
-class __transform_view_iterator;
-
-template <input_range _View, copy_constructible _Function, bool _IsConst>
- requires __transform_view_constraints<_View, _Function>
-class __transform_view_sentinel;
-
template<input_range _View, copy_constructible _Fn>
requires __transform_view_constraints<_View, _Fn>
class transform_view : public view_interface<transform_view<_View, _Fn>> {
-
- template <bool _IsConst>
- using __iterator = __transform_view_iterator<_View, _Fn, _IsConst>;
-
- template <bool _IsConst>
- using __sentinel = __transform_view_sentinel<_View, _Fn, _IsConst>;
-
- template <input_range _ViewType, copy_constructible _FunctionType, bool _IsConst>
- requires __transform_view_constraints<_ViewType, _FunctionType>
- friend class __transform_view_iterator;
-
- template <input_range _ViewType, copy_constructible _FunctionType, bool _IsConst>
- requires __transform_view_constraints<_ViewType, _FunctionType>
- friend class __transform_view_sentinel;
+ template<bool> class __iterator;
+ template<bool> class __sentinel;
_LIBCPP_NO_UNIQUE_ADDRESS __copyable_box<_Fn> __func_;
_LIBCPP_NO_UNIQUE_ADDRESS _View __base_ = _View();
@@ -176,23 +156,22 @@ struct __transform_view_iterator_category_base<_View, _Fn> {
>;
};
-template<input_range _View, copy_constructible _Fn, bool _Const>
+template<input_range _View, copy_constructible _Fn>
requires __transform_view_constraints<_View, _Fn>
-class __transform_view_iterator
+template<bool _Const>
+class transform_view<_View, _Fn>::__iterator
: public __transform_view_iterator_category_base<_View, _Fn> {
- using _Parent = __maybe_const<_Const, transform_view<_View, _Fn>>;
+ using _Parent = __maybe_const<_Const, transform_view>;
using _Base = __maybe_const<_Const, _View>;
_Parent *__parent_ = nullptr;
- template<input_range _ViewType, copy_constructible _FunctionType, bool _IsConst>
- requires __transform_view_constraints<_ViewType, _FunctionType>
- friend class __transform_view_iterator;
+ template<bool>
+ friend class transform_view<_View, _Fn>::__iterator;
- template<input_range _ViewType, copy_constructible _FunctionType, bool _IsConst>
- requires __transform_view_constraints<_ViewType, _FunctionType>
- friend class __transform_view_sentinel;
+ template<bool>
+ friend class transform_view<_View, _Fn>::__sentinel;
public:
iterator_t<_Base> __current_ = iterator_t<_Base>();
@@ -202,17 +181,17 @@ public:
using difference_type = range_difference_t<_Base>;
_LIBCPP_HIDE_FROM_ABI
- __transform_view_iterator() requires default_initializable<iterator_t<_Base>> = default;
+ __iterator() requires default_initializable<iterator_t<_Base>> = default;
_LIBCPP_HIDE_FROM_ABI
- constexpr __transform_view_iterator(_Parent& __parent, iterator_t<_Base> __current)
+ constexpr __iterator(_Parent& __parent, iterator_t<_Base> __current)
: __parent_(std::addressof(__parent)), __current_(std::move(__current)) {}
- // Note: `__i` should always be `__transform_view_iterator<false>`, but directly using
- // `__transform_view_iterator<false>` is ill-formed when `_Const` is false
+ // Note: `__i` should always be `__iterator<false>`, but directly using
+ // `__iterator<false>` is ill-formed when `_Const` is false
// (see http://wg21.link/class.copy.ctor#5).
_LIBCPP_HIDE_FROM_ABI
- constexpr __transform_view_iterator(__transform_view_iterator<_View, _Fn, !_Const> __i)
+ constexpr __iterator(__iterator<!_Const> __i)
requires _Const && convertible_to<iterator_t<_View>, iterator_t<_Base>>
: __parent_(__i.__parent_), __current_(std::move(__i.__current_)) {}
@@ -234,7 +213,7 @@ public:
}
_LIBCPP_HIDE_FROM_ABI
- constexpr __transform_view_iterator& operator++() {
+ constexpr __iterator& operator++() {
++__current_;
return *this;
}
@@ -243,7 +222,7 @@ public:
constexpr void operator++(int) { ++__current_; }
_LIBCPP_HIDE_FROM_ABI
- constexpr __transform_view_iterator operator++(int)
+ constexpr __iterator operator++(int)
requires forward_range<_Base>
{
auto __tmp = *this;
@@ -252,7 +231,7 @@ public:
}
_LIBCPP_HIDE_FROM_ABI
- constexpr __transform_view_iterator& operator--()
+ constexpr __iterator& operator--()
requires bidirectional_range<_Base>
{
--__current_;
@@ -260,7 +239,7 @@ public:
}
_LIBCPP_HIDE_FROM_ABI
- constexpr __transform_view_iterator operator--(int)
+ constexpr __iterator operator--(int)
requires bidirectional_range<_Base>
{
auto __tmp = *this;
@@ -269,7 +248,7 @@ public:
}
_LIBCPP_HIDE_FROM_ABI
- constexpr __transform_view_iterator& operator+=(difference_type __n)
+ constexpr __iterator& operator+=(difference_type __n)
requires random_access_range<_Base>
{
__current_ += __n;
@@ -277,7 +256,7 @@ public:
}
_LIBCPP_HIDE_FROM_ABI
- constexpr __transform_view_iterator& operator-=(difference_type __n)
+ constexpr __iterator& operator-=(difference_type __n)
requires random_access_range<_Base>
{
__current_ -= __n;
@@ -293,77 +272,77 @@ public:
}
_LIBCPP_HIDE_FROM_ABI
- friend constexpr bool operator==(const __transform_view_iterator& __x, const __transform_view_iterator& __y)
+ friend constexpr bool operator==(const __iterator& __x, const __iterator& __y)
requires equality_comparable<iterator_t<_Base>>
{
return __x.__current_ == __y.__current_;
}
_LIBCPP_HIDE_FROM_ABI
- friend constexpr bool operator<(const __transform_view_iterator& __x, const __transform_view_iterator& __y)
+ friend constexpr bool operator<(const __iterator& __x, const __iterator& __y)
requires random_access_range<_Base>
{
return __x.__current_ < __y.__current_;
}
_LIBCPP_HIDE_FROM_ABI
- friend constexpr bool operator>(const __transform_view_iterator& __x, const __transform_view_iterator& __y)
+ friend constexpr bool operator>(const __iterator& __x, const __iterator& __y)
requires random_access_range<_Base>
{
return __x.__current_ > __y.__current_;
}
_LIBCPP_HIDE_FROM_ABI
- friend constexpr bool operator<=(const __transform_view_iterator& __x, const __transform_view_iterator& __y)
+ friend constexpr bool operator<=(const __iterator& __x, const __iterator& __y)
requires random_access_range<_Base>
{
return __x.__current_ <= __y.__current_;
}
_LIBCPP_HIDE_FROM_ABI
- friend constexpr bool operator>=(const __transform_view_iterator& __x, const __transform_view_iterator& __y)
+ friend constexpr bool operator>=(const __iterator& __x, const __iterator& __y)
requires random_access_range<_Base>
{
return __x.__current_ >= __y.__current_;
}
_LIBCPP_HIDE_FROM_ABI
- friend constexpr auto operator<=>(const __transform_view_iterator& __x, const __transform_view_iterator& __y)
+ friend constexpr auto operator<=>(const __iterator& __x, const __iterator& __y)
requires random_access_range<_Base> && three_way_comparable<iterator_t<_Base>>
{
return __x.__current_ <=> __y.__current_;
}
_LIBCPP_HIDE_FROM_ABI
- friend constexpr __transform_view_iterator operator+(__transform_view_iterator __i, difference_type __n)
+ friend constexpr __iterator operator+(__iterator __i, difference_type __n)
requires random_access_range<_Base>
{
- return __transform_view_iterator{*__i.__parent_, __i.__current_ + __n};
+ return __iterator{*__i.__parent_, __i.__current_ + __n};
}
_LIBCPP_HIDE_FROM_ABI
- friend constexpr __transform_view_iterator operator+(difference_type __n, __transform_view_iterator __i)
+ friend constexpr __iterator operator+(difference_type __n, __iterator __i)
requires random_access_range<_Base>
{
- return __transform_view_iterator{*__i.__parent_, __i.__current_ + __n};
+ return __iterator{*__i.__parent_, __i.__current_ + __n};
}
_LIBCPP_HIDE_FROM_ABI
- friend constexpr __transform_view_iterator operator-(__transform_view_iterator __i, difference_type __n)
+ friend constexpr __iterator operator-(__iterator __i, difference_type __n)
requires random_access_range<_Base>
{
- return __transform_view_iterator{*__i.__parent_, __i.__current_ - __n};
+ return __iterator{*__i.__parent_, __i.__current_ - __n};
}
_LIBCPP_HIDE_FROM_ABI
- friend constexpr difference_type operator-(const __transform_view_iterator& __x, const __transform_view_iterator& __y)
+ friend constexpr difference_type operator-(const __iterator& __x, const __iterator& __y)
requires sized_sentinel_for<iterator_t<_Base>, iterator_t<_Base>>
{
return __x.__current_ - __y.__current_;
}
_LIBCPP_HIDE_FROM_ABI
- friend constexpr decltype(auto) iter_move(const __transform_view_iterator& __i)
+ friend constexpr decltype(auto) iter_move(const __iterator& __i)
noexcept(noexcept(*__i))
{
if constexpr (is_lvalue_reference_v<decltype(*__i)>)
@@ -373,37 +352,33 @@ public:
}
};
-template<input_range _View, copy_constructible _Fn, bool _Const>
+template<input_range _View, copy_constructible _Fn>
requires __transform_view_constraints<_View, _Fn>
-class __transform_view_sentinel {
- using _Parent = __maybe_const<_Const, transform_view<_View, _Fn>>;
+template<bool _Const>
+class transform_view<_View, _Fn>::__sentinel {
+ using _Parent = __maybe_const<_Const, transform_view>;
using _Base = __maybe_const<_Const, _View>;
- template <bool _IsConst>
- using __iterator = __transform_view_iterator<_View, _Fn, _IsConst>;
-
sentinel_t<_Base> __end_ = sentinel_t<_Base>();
- template<input_range _ViewType, copy_constructible _FunctionType, bool _IsConst>
- requires __transform_view_constraints<_ViewType, _FunctionType>
- friend class __transform_view_iterator;
+ template<bool>
+ friend class transform_view<_View, _Fn>::__iterator;
- template<input_range _ViewType, copy_constructible _FunctionType, bool _IsConst>
- requires __transform_view_constraints<_ViewType, _FunctionType>
- friend class __transform_view_sentinel;
+ template<bool>
+ friend class transform_view<_View, _Fn>::__sentinel;
public:
_LIBCPP_HIDE_FROM_ABI
- __transform_view_sentinel() = default;
+ __sentinel() = default;
_LIBCPP_HIDE_FROM_ABI
- constexpr explicit __transform_view_sentinel(sentinel_t<_Base> __end) : __end_(__end) {}
+ constexpr explicit __sentinel(sentinel_t<_Base> __end) : __end_(__end) {}
- // Note: `__i` should always be `__transform_view_sentinel<false>`, but directly using
- // `__transform_view_sentinel<false>` is ill-formed when `_Const` is false
+ // Note: `__i` should always be `__sentinel<false>`, but directly using
+ // `__sentinel<false>` is ill-formed when `_Const` is false
// (see http://wg21.link/class.copy.ctor#5).
_LIBCPP_HIDE_FROM_ABI
- constexpr __transform_view_sentinel(__transform_view_sentinel<_View, _Fn, !_Const> __i)
+ constexpr __sentinel(__sentinel<!_Const> __i)
requires _Const && convertible_to<sentinel_t<_View>, sentinel_t<_Base>>
: __end_(std::move(__i.__end_)) {}
@@ -413,7 +388,7 @@ public:
template<bool _OtherConst>
requires sentinel_for<sentinel_t<_Base>, iterator_t<__maybe_const<_OtherConst, _View>>>
_LIBCPP_HIDE_FROM_ABI
- friend constexpr bool operator==(const __iterator<_OtherConst>& __x, const __transform_view_sentinel& __y) {
+ friend constexpr bool operator==(const __iterator<_OtherConst>& __x, const __sentinel& __y) {
return __x.__current_ == __y.__end_;
}
@@ -421,7 +396,7 @@ public:
requires sized_sentinel_for<sentinel_t<_Base>, iterator_t<__maybe_const<_OtherConst, _View>>>
_LIBCPP_HIDE_FROM_ABI
friend constexpr range_difference_t<__maybe_const<_OtherConst, _View>>
- operator-(const __iterator<_OtherConst>& __x, const __transform_view_sentinel& __y) {
+ operator-(const __iterator<_OtherConst>& __x, const __sentinel& __y) {
return __x.__current_ - __y.__end_;
}
@@ -429,7 +404,7 @@ public:
requires sized_sentinel_for<sentinel_t<_Base>, iterator_t<__maybe_const<_OtherConst, _View>>>
_LIBCPP_HIDE_FROM_ABI
friend constexpr range_difference_t<__maybe_const<_OtherConst, _View>>
- operator-(const __transform_view_sentinel& __x, const __iterator<_OtherConst>& __y) {
+ operator-(const __sentinel& __x, const __iterator<_OtherConst>& __y) {
return __x.__end_ - __y.__current_;
}
};
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/add_pointer.h b/contrib/llvm-project/libcxx/include/__type_traits/add_pointer.h
index 9d0c2010077b..1af5eca65c63 100644
--- a/contrib/llvm-project/libcxx/include/__type_traits/add_pointer.h
+++ b/contrib/llvm-project/libcxx/include/__type_traits/add_pointer.h
@@ -22,7 +22,7 @@
_LIBCPP_BEGIN_NAMESPACE_STD
-#if __has_builtin(__add_pointer)
+#if !defined(_LIBCPP_WORKAROUND_OBJCXX_COMPILER_INTRINSICS) && __has_builtin(__add_pointer)
template <class _Tp>
using __add_pointer_t = __add_pointer(_Tp);
@@ -39,7 +39,7 @@ template <class _Tp> struct __add_pointer_impl<_Tp, false>
template <class _Tp>
using __add_pointer_t = typename __add_pointer_impl<_Tp>::type;
-#endif // __has_builtin(__add_pointer)
+#endif // !defined(_LIBCPP_WORKAROUND_OBJCXX_COMPILER_INTRINSICS) && __has_builtin(__add_pointer)
template <class _Tp>
struct add_pointer {
diff --git a/contrib/llvm-project/libcxx/include/__type_traits/remove_pointer.h b/contrib/llvm-project/libcxx/include/__type_traits/remove_pointer.h
index 33ddb7103f0d..2f810ba666bb 100644
--- a/contrib/llvm-project/libcxx/include/__type_traits/remove_pointer.h
+++ b/contrib/llvm-project/libcxx/include/__type_traits/remove_pointer.h
@@ -17,7 +17,7 @@
_LIBCPP_BEGIN_NAMESPACE_STD
-#if __has_builtin(__remove_pointer)
+#if !defined(_LIBCPP_WORKAROUND_OBJCXX_COMPILER_INTRINSICS) && __has_builtin(__remove_pointer)
template <class _Tp>
struct remove_pointer {
using type _LIBCPP_NODEBUG = __remove_pointer(_Tp);
@@ -34,7 +34,7 @@ template <class _Tp> struct _LIBCPP_TEMPLATE_VIS remove_pointer<_Tp* const volat
template <class _Tp>
using __remove_pointer_t = typename remove_pointer<_Tp>::type;
-#endif // __has_builtin(__remove_pointer)
+#endif // !defined(_LIBCPP_WORKAROUND_OBJCXX_COMPILER_INTRINSICS) && __has_builtin(__remove_pointer)
#if _LIBCPP_STD_VER > 11
template <class _Tp> using remove_pointer_t = __remove_pointer_t<_Tp>;
diff --git a/contrib/llvm-project/libcxx/include/__utility/exception_guard.h b/contrib/llvm-project/libcxx/include/__utility/exception_guard.h
index 737d1a69c971..46f9359a5c0e 100644
--- a/contrib/llvm-project/libcxx/include/__utility/exception_guard.h
+++ b/contrib/llvm-project/libcxx/include/__utility/exception_guard.h
@@ -60,25 +60,26 @@ _LIBCPP_BEGIN_NAMESPACE_STD
#ifndef _LIBCPP_NO_EXCEPTIONS
template <class _Rollback>
-struct __exception_guard {
- __exception_guard() = delete;
+struct __exception_guard_exceptions {
+ __exception_guard_exceptions() = delete;
- _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 explicit __exception_guard(_Rollback __rollback)
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 explicit __exception_guard_exceptions(_Rollback __rollback)
: __rollback_(std::move(__rollback)), __completed_(false) {}
- _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __exception_guard(__exception_guard&& __other)
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20
+ __exception_guard_exceptions(__exception_guard_exceptions&& __other)
_NOEXCEPT_(is_nothrow_move_constructible<_Rollback>::value)
: __rollback_(std::move(__other.__rollback_)), __completed_(__other.__completed_) {
__other.__completed_ = true;
}
- __exception_guard(__exception_guard const&) = delete;
- __exception_guard& operator=(__exception_guard const&) = delete;
- __exception_guard& operator=(__exception_guard&&) = delete;
+ __exception_guard_exceptions(__exception_guard_exceptions const&) = delete;
+ __exception_guard_exceptions& operator=(__exception_guard_exceptions const&) = delete;
+ __exception_guard_exceptions& operator=(__exception_guard_exceptions&&) = delete;
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 void __complete() _NOEXCEPT { __completed_ = true; }
- _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 ~__exception_guard() {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 ~__exception_guard_exceptions() {
if (!__completed_)
__rollback_();
}
@@ -87,36 +88,46 @@ private:
_Rollback __rollback_;
bool __completed_;
};
+
+_LIBCPP_CTAD_SUPPORTED_FOR_TYPE(__exception_guard_exceptions);
+
+template <class _Rollback>
+using __exception_guard = __exception_guard_exceptions<_Rollback>;
#else // _LIBCPP_NO_EXCEPTIONS
template <class _Rollback>
-struct __exception_guard {
- __exception_guard() = delete;
- _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_NODEBUG explicit __exception_guard(_Rollback) {}
+struct __exception_guard_noexceptions {
+ __exception_guard_noexceptions() = delete;
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20
+ _LIBCPP_NODEBUG explicit __exception_guard_noexceptions(_Rollback) {}
- _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_NODEBUG __exception_guard(__exception_guard&& __other)
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_NODEBUG
+ __exception_guard_noexceptions(__exception_guard_noexceptions&& __other)
_NOEXCEPT_(is_nothrow_move_constructible<_Rollback>::value)
: __completed_(__other.__completed_) {
__other.__completed_ = true;
}
- __exception_guard(__exception_guard const&) = delete;
- __exception_guard& operator=(__exception_guard const&) = delete;
- __exception_guard& operator=(__exception_guard&&) = delete;
+ __exception_guard_noexceptions(__exception_guard_noexceptions const&) = delete;
+ __exception_guard_noexceptions& operator=(__exception_guard_noexceptions const&) = delete;
+ __exception_guard_noexceptions& operator=(__exception_guard_noexceptions&&) = delete;
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_NODEBUG void __complete() _NOEXCEPT {
__completed_ = true;
}
- _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_NODEBUG ~__exception_guard() {
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_NODEBUG ~__exception_guard_noexceptions() {
_LIBCPP_ASSERT(__completed_, "__exception_guard not completed with exceptions disabled");
}
private:
bool __completed_ = false;
};
-#endif // _LIBCPP_NO_EXCEPTIONS
-_LIBCPP_CTAD_SUPPORTED_FOR_TYPE(__exception_guard);
+_LIBCPP_CTAD_SUPPORTED_FOR_TYPE(__exception_guard_noexceptions);
+
+template <class _Rollback>
+using __exception_guard = __exception_guard_noexceptions<_Rollback>;
+#endif // _LIBCPP_NO_EXCEPTIONS
template <class _Rollback>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR __exception_guard<_Rollback> __make_exception_guard(_Rollback __rollback) {
diff --git a/contrib/llvm-project/libcxx/include/any b/contrib/llvm-project/libcxx/include/any
index 92cbc9a6c92d..50a44d7f4d70 100644
--- a/contrib/llvm-project/libcxx/include/any
+++ b/contrib/llvm-project/libcxx/include/any
@@ -175,7 +175,7 @@ namespace __any_imp
inline _LIBCPP_INLINE_VISIBILITY
bool __compare_typeid(type_info const* __id, const void* __fallback_id)
{
-#if !defined(_LIBCPP_NO_RTTI)
+#if !defined(_LIBCPP_HAS_NO_RTTI)
if (__id && *__id == typeid(_Tp))
return true;
#endif
@@ -294,7 +294,7 @@ public:
_LIBCPP_INLINE_VISIBILITY
bool has_value() const _NOEXCEPT { return __h_ != nullptr; }
-#if !defined(_LIBCPP_NO_RTTI)
+#if !defined(_LIBCPP_HAS_NO_RTTI)
_LIBCPP_INLINE_VISIBILITY
const type_info & type() const _NOEXCEPT {
if (__h_) {
@@ -426,7 +426,7 @@ namespace __any_imp
_LIBCPP_INLINE_VISIBILITY
static void* __type_info()
{
-#if !defined(_LIBCPP_NO_RTTI)
+#if !defined(_LIBCPP_HAS_NO_RTTI)
return const_cast<void*>(static_cast<void const *>(&typeid(_Tp)));
#else
return nullptr;
@@ -514,7 +514,7 @@ namespace __any_imp
_LIBCPP_INLINE_VISIBILITY
static void* __type_info()
{
-#if !defined(_LIBCPP_NO_RTTI)
+#if !defined(_LIBCPP_HAS_NO_RTTI)
return const_cast<void*>(static_cast<void const *>(&typeid(_Tp)));
#else
return nullptr;
@@ -680,7 +680,7 @@ any_cast(any * __any) _NOEXCEPT
typedef add_pointer_t<_ValueType> _ReturnType;
if (__any && __any->__h_) {
void *__p = __any->__call(_Action::_Get, nullptr,
-#if !defined(_LIBCPP_NO_RTTI)
+#if !defined(_LIBCPP_HAS_NO_RTTI)
&typeid(_ValueType),
#else
nullptr,
diff --git a/contrib/llvm-project/libcxx/include/module.modulemap.in b/contrib/llvm-project/libcxx/include/module.modulemap.in
index 1f1d67dbb7fc..8898e88156ef 100644
--- a/contrib/llvm-project/libcxx/include/module.modulemap.in
+++ b/contrib/llvm-project/libcxx/include/module.modulemap.in
@@ -755,13 +755,19 @@ module std [system] {
module derived_from { private header "__concepts/derived_from.h" }
module destructible { private header "__concepts/destructible.h" }
module different_from { private header "__concepts/different_from.h" }
- module equality_comparable { private header "__concepts/equality_comparable.h" }
+ module equality_comparable {
+ private header "__concepts/equality_comparable.h"
+ export type_traits.common_reference
+ }
module invocable { private header "__concepts/invocable.h" }
module movable { private header "__concepts/movable.h" }
module predicate { private header "__concepts/predicate.h" }
module regular { private header "__concepts/regular.h" }
module relation { private header "__concepts/relation.h" }
- module same_as { private header "__concepts/same_as.h" }
+ module same_as {
+ private header "__concepts/same_as.h"
+ export type_traits.is_same
+ }
module semiregular { private header "__concepts/semiregular.h" }
module swappable { private header "__concepts/swappable.h" }
module totally_ordered { private header "__concepts/totally_ordered.h" }
@@ -979,7 +985,11 @@ module std [system] {
module back_insert_iterator { private header "__iterator/back_insert_iterator.h" }
module bounded_iter { private header "__iterator/bounded_iter.h" }
module common_iterator { private header "__iterator/common_iterator.h" }
- module concepts { private header "__iterator/concepts.h" }
+ module concepts {
+ private header "__iterator/concepts.h"
+ export concepts.equality_comparable
+ export type_traits.common_reference
+ }
module counted_iterator { private header "__iterator/counted_iterator.h" }
module data { private header "__iterator/data.h" }
module default_sentinel { private header "__iterator/default_sentinel.h" }
diff --git a/contrib/llvm-project/libcxx/include/source_location b/contrib/llvm-project/libcxx/include/source_location
index 4c4a09618ada..e9e852a6e461 100644
--- a/contrib/llvm-project/libcxx/include/source_location
+++ b/contrib/llvm-project/libcxx/include/source_location
@@ -35,7 +35,8 @@ namespace std {
_LIBCPP_BEGIN_NAMESPACE_STD
-#if _LIBCPP_STD_VER >= 20 && __has_builtin(__builtin_source_location)
+#if _LIBCPP_STD_VER >= 20 && __has_builtin(__builtin_source_location) && \
+ !(defined(_LIBCPP_APPLE_CLANG_VER) && _LIBCPP_APPLE_CLANG_VER <= 1403)
class source_location {
// The names source_location::__impl, _M_file_name, _M_function_name, _M_line, and _M_column
@@ -78,7 +79,8 @@ public:
}
};
-#endif // _LIBCPP_STD_VER >= 20 && __has_builtin(__builtin_source_location)
+#endif // _LIBCPP_STD_VER >= 20 && __has_builtin(__builtin_source_location) && !(defined(_LIBCPP_APPLE_CLANG_VER) &&
+ // _LIBCPP_APPLE_CLANG_VER <= 1403)
_LIBCPP_END_NAMESPACE_STD
diff --git a/contrib/llvm-project/libcxx/include/version b/contrib/llvm-project/libcxx/include/version
index 9705229a1c52..258dd641e144 100644
--- a/contrib/llvm-project/libcxx/include/version
+++ b/contrib/llvm-project/libcxx/include/version
@@ -139,6 +139,7 @@ __cpp_lib_polymorphic_allocator 201902L <memory_resource
__cpp_lib_quoted_string_io 201304L <iomanip>
__cpp_lib_ranges 202106L <algorithm> <functional> <iterator>
<memory> <ranges>
+__cpp_lib_ranges_as_rvalue 202207L <ranges>
__cpp_lib_ranges_chunk 202202L <ranges>
__cpp_lib_ranges_chunk_by 202202L <ranges>
__cpp_lib_ranges_iota 202202L <numeric>
@@ -365,7 +366,7 @@ __cpp_lib_void_t 201411L <type_traits>
# define __cpp_lib_shared_ptr_arrays 201707L
# define __cpp_lib_shift 201806L
// # define __cpp_lib_smart_ptr_for_overwrite 202002L
-# if __has_builtin(__builtin_source_location)
+# if __has_builtin(__builtin_source_location) && !(defined(_LIBCPP_APPLE_CLANG_VER) && _LIBCPP_APPLE_CLANG_VER <= 1403)
# define __cpp_lib_source_location 201907L
# endif
# define __cpp_lib_span 202002L
@@ -401,6 +402,7 @@ __cpp_lib_void_t 201411L <type_traits>
# undef __cpp_lib_optional
# define __cpp_lib_optional 202110L
// # define __cpp_lib_out_ptr 202106L
+# define __cpp_lib_ranges_as_rvalue 202207L
// # define __cpp_lib_ranges_chunk 202202L
// # define __cpp_lib_ranges_chunk_by 202202L
// # define __cpp_lib_ranges_iota 202202L
diff --git a/contrib/llvm-project/libunwind/include/libunwind.modulemap b/contrib/llvm-project/libunwind/include/libunwind.modulemap
index 162fe1d279a3..775841ecb5f1 100644
--- a/contrib/llvm-project/libunwind/include/libunwind.modulemap
+++ b/contrib/llvm-project/libunwind/include/libunwind.modulemap
@@ -6,5 +6,8 @@ module libunwind [system] {
module unwind [system] {
header "__libunwind_config.h"
header "unwind.h"
+ private textual header "unwind_arm_ehabi.h"
+ private textual header "unwind_itanium.h"
+
export *
}
diff --git a/contrib/llvm-project/libunwind/include/unwind.h b/contrib/llvm-project/libunwind/include/unwind.h
index 26cdef22207e..b1775d3a3dec 100644
--- a/contrib/llvm-project/libunwind/include/unwind.h
+++ b/contrib/llvm-project/libunwind/include/unwind.h
@@ -56,9 +56,9 @@ typedef enum {
typedef struct _Unwind_Context _Unwind_Context; // opaque
#if defined(_LIBUNWIND_ARM_EHABI)
-#include "unwind_arm_ehabi.h"
+#include <unwind_arm_ehabi.h>
#else
-#include "unwind_itanium.h"
+#include <unwind_itanium.h>
#endif
typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn)
diff --git a/contrib/llvm-project/libunwind/src/DwarfInstructions.hpp b/contrib/llvm-project/libunwind/src/DwarfInstructions.hpp
index 32bd45f6b0c3..4e57c5bdefa0 100644
--- a/contrib/llvm-project/libunwind/src/DwarfInstructions.hpp
+++ b/contrib/llvm-project/libunwind/src/DwarfInstructions.hpp
@@ -220,7 +220,8 @@ int DwarfInstructions<A, R>::stepWithDwarf(A &addressSpace, pint_t pc,
p &= ~0xfULL;
// CFA is the bottom of the current stack frame.
for (; p < cfa; p += 16) {
- __asm__ __volatile__(".arch_extension memtag\n"
+ __asm__ __volatile__(".arch armv8.5-a\n"
+ ".arch_extension memtag\n"
"stg %[Ptr], [%[Ptr]]\n"
:
: [Ptr] "r"(p)
diff --git a/contrib/llvm-project/libunwind/src/UnwindRegistersRestore.S b/contrib/llvm-project/libunwind/src/UnwindRegistersRestore.S
index 2a472be943f3..543b19f7e72a 100644
--- a/contrib/llvm-project/libunwind/src/UnwindRegistersRestore.S
+++ b/contrib/llvm-project/libunwind/src/UnwindRegistersRestore.S
@@ -194,9 +194,20 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv)
addi 4, 3, PPC64_OFFS_FP
// load VS register
+#ifdef __LITTLE_ENDIAN__
+// For little-endian targets, we need a swap since lxvd2x will load the register
+// in the incorrect doubleword order.
+// FIXME: when supporting targets older than Power9 on LE is no longer required,
+// this can be changed to simply `lxv n, (16 * n)(4)`.
#define PPC64_LVS(n) \
lxvd2x n, 0, 4 ;\
+ xxswapd n, n ;\
addi 4, 4, 16
+#else
+#define PPC64_LVS(n) \
+ lxvd2x n, 0, 4 ;\
+ addi 4, 4, 16
+#endif
// restore the first 32 VS regs (and also all floating point regs)
PPC64_LVS(0)
@@ -232,9 +243,16 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv)
PPC64_LVS(30)
PPC64_LVS(31)
+#ifdef __LITTLE_ENDIAN__
+#define PPC64_CLVS_RESTORE(n) \
+ addi 4, 3, PPC64_OFFS_FP + n * 16 ;\
+ lxvd2x n, 0, 4 ;\
+ xxswapd n, n
+#else
#define PPC64_CLVS_RESTORE(n) \
addi 4, 3, PPC64_OFFS_FP + n * 16 ;\
lxvd2x n, 0, 4
+#endif
#if !defined(_AIX)
// use VRSAVE to conditionally restore the remaining VS regs, that are
diff --git a/contrib/llvm-project/libunwind/src/UnwindRegistersSave.S b/contrib/llvm-project/libunwind/src/UnwindRegistersSave.S
index 6c26b79877f6..79f5696a9888 100644
--- a/contrib/llvm-project/libunwind/src/UnwindRegistersSave.S
+++ b/contrib/llvm-project/libunwind/src/UnwindRegistersSave.S
@@ -351,9 +351,20 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
addi 4, 3, PPC64_OFFS_FP
// store VS register
+#ifdef __LITTLE_ENDIAN__
+// For little-endian targets, we need a swap since stxvd2x will store the
+// register in the incorrect doubleword order.
+// FIXME: when supporting targets older than Power9 on LE is no longer required
+// this can be changed to simply `stxv n, 16 * n(4)`.
#define PPC64_STVS(n) \
+ xxswapd n, n ;\
stxvd2x n, 0, 4 ;\
addi 4, 4, 16
+#else
+#define PPC64_STVS(n) \
+ stxvd2x n, 0, 4 ;\
+ addi 4, 4, 16
+#endif
PPC64_STVS(0)
PPC64_STVS(1)
diff --git a/contrib/llvm-project/lld/COFF/MinGW.cpp b/contrib/llvm-project/lld/COFF/MinGW.cpp
index 01372cbdf29a..71aa596ea6ab 100644
--- a/contrib/llvm-project/lld/COFF/MinGW.cpp
+++ b/contrib/llvm-project/lld/COFF/MinGW.cpp
@@ -49,6 +49,9 @@ AutoExporter::AutoExporter(
"libclang_rt.profile-x86_64",
"libc++",
"libc++abi",
+ "libFortran_main",
+ "libFortranRuntime",
+ "libFortranDecimal",
"libunwind",
"libmsvcrt",
"libucrtbase",
diff --git a/contrib/llvm-project/lld/ELF/Arch/RISCV.cpp b/contrib/llvm-project/lld/ELF/Arch/RISCV.cpp
index 6c478ad4109b..87887b314a5a 100644
--- a/contrib/llvm-project/lld/ELF/Arch/RISCV.cpp
+++ b/contrib/llvm-project/lld/ELF/Arch/RISCV.cpp
@@ -846,9 +846,7 @@ public:
static void mergeArch(RISCVISAInfo::OrderedExtensionMap &mergedExts,
unsigned &mergedXlen, const InputSectionBase *sec,
StringRef s) {
- auto maybeInfo =
- RISCVISAInfo::parseArchString(s, /*EnableExperimentalExtension=*/true,
- /*ExperimentalExtensionVersionCheck=*/true);
+ auto maybeInfo = RISCVISAInfo::parseNormalizedArchString(s);
if (!maybeInfo) {
errorOrWarn(toString(sec) + ": " + s + ": " +
llvm::toString(maybeInfo.takeError()));
@@ -863,8 +861,6 @@ static void mergeArch(RISCVISAInfo::OrderedExtensionMap &mergedExts,
} else {
for (const auto &ext : info.getExtensions()) {
if (auto it = mergedExts.find(ext.first); it != mergedExts.end()) {
- // TODO This is untested because RISCVISAInfo::parseArchString does not
- // accept unsupported versions yet.
if (std::tie(it->second.MajorVersion, it->second.MinorVersion) >=
std::tie(ext.second.MajorVersion, ext.second.MinorVersion))
continue;
diff --git a/contrib/llvm-project/lld/ELF/ICF.cpp b/contrib/llvm-project/lld/ELF/ICF.cpp
index d7a85bcf844f..c2b0ce9b12b9 100644
--- a/contrib/llvm-project/lld/ELF/ICF.cpp
+++ b/contrib/llvm-project/lld/ELF/ICF.cpp
@@ -323,8 +323,9 @@ bool ICF<ELFT>::equalsConstant(const InputSection *a, const InputSection *b) {
const RelsOrRelas<ELFT> ra = a->template relsOrRelas<ELFT>();
const RelsOrRelas<ELFT> rb = b->template relsOrRelas<ELFT>();
- return ra.areRelocsRel() ? constantEq(a, ra.rels, b, rb.rels)
- : constantEq(a, ra.relas, b, rb.relas);
+ return ra.areRelocsRel() || rb.areRelocsRel()
+ ? constantEq(a, ra.rels, b, rb.rels)
+ : constantEq(a, ra.relas, b, rb.relas);
}
// Compare two lists of relocations. Returns true if all pairs of
@@ -371,8 +372,9 @@ template <class ELFT>
bool ICF<ELFT>::equalsVariable(const InputSection *a, const InputSection *b) {
const RelsOrRelas<ELFT> ra = a->template relsOrRelas<ELFT>();
const RelsOrRelas<ELFT> rb = b->template relsOrRelas<ELFT>();
- return ra.areRelocsRel() ? variableEq(a, ra.rels, b, rb.rels)
- : variableEq(a, ra.relas, b, rb.relas);
+ return ra.areRelocsRel() || rb.areRelocsRel()
+ ? variableEq(a, ra.rels, b, rb.rels)
+ : variableEq(a, ra.relas, b, rb.relas);
}
template <class ELFT> size_t ICF<ELFT>::findBoundary(size_t begin, size_t end) {
diff --git a/contrib/llvm-project/lld/ELF/Relocations.cpp b/contrib/llvm-project/lld/ELF/Relocations.cpp
index 1368d9053125..aeba918292a7 100644
--- a/contrib/llvm-project/lld/ELF/Relocations.cpp
+++ b/contrib/llvm-project/lld/ELF/Relocations.cpp
@@ -1079,7 +1079,15 @@ void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset,
return;
}
- bool canWrite = (sec->flags & SHF_WRITE) || !config->zText;
+ // Use a simple -z notext rule that treats all sections except .eh_frame as
+ // writable. GNU ld does not produce dynamic relocations in .eh_frame (and our
+ // SectionBase::getOffset would incorrectly adjust the offset).
+ //
+ // For MIPS, we don't implement GNU ld's DW_EH_PE_absptr to DW_EH_PE_pcrel
+ // conversion. We still emit a dynamic relocation.
+ bool canWrite = (sec->flags & SHF_WRITE) ||
+ !(config->zText ||
+ (isa<EhInputSection>(sec) && config->emachine != EM_MIPS));
if (canWrite) {
RelType rel = target->getDynRel(type);
if (expr == R_GOT || (rel == target->symbolicRel && !sym.isPreemptible)) {
diff --git a/contrib/llvm-project/lld/ELF/SymbolTable.cpp b/contrib/llvm-project/lld/ELF/SymbolTable.cpp
index f09d0d7f9095..f616245233b1 100644
--- a/contrib/llvm-project/lld/ELF/SymbolTable.cpp
+++ b/contrib/llvm-project/lld/ELF/SymbolTable.cpp
@@ -277,8 +277,8 @@ void SymbolTable::scanVersionScript() {
pat.isExternCpp, /*hasWildCard=*/false},
id, ver, /*includeNonDefault=*/true);
if (!found && !config->undefinedVersion)
- errorOrWarn("version script assignment of '" + ver + "' to symbol '" +
- pat.name + "' failed: symbol not defined");
+ warn("version script assignment of '" + ver + "' to symbol '" +
+ pat.name + "' failed: symbol not defined");
};
for (SymbolVersion &pat : v.nonLocalPatterns)
if (!pat.hasWildcard)
diff --git a/contrib/llvm-project/lld/docs/ReleaseNotes.rst b/contrib/llvm-project/lld/docs/ReleaseNotes.rst
index 00aaeff96d78..a450923cded9 100644
--- a/contrib/llvm-project/lld/docs/ReleaseNotes.rst
+++ b/contrib/llvm-project/lld/docs/ReleaseNotes.rst
@@ -26,6 +26,10 @@ Non-comprehensive list of changes in this release
ELF Improvements
----------------
+* Link speed improved greatly compared with lld 15.0. Notably input section
+ initialization and relocation scanning are now parallel.
+ (`D130810 <https://reviews.llvm.org/D130810>`_)
+ (`D133003 <https://reviews.llvm.org/D133003>`_)
* ``ELFCOMPRESS_ZSTD`` compressed input sections are now supported.
(`D129406 <https://reviews.llvm.org/D129406>`_)
* ``--compress-debug-sections=zstd`` is now available to compress debug
@@ -36,12 +40,25 @@ ELF Improvements
* ``DT_RISCV_VARIANT_CC`` is now produced if at least one ``R_RISCV_JUMP_SLOT``
relocation references a symbol with the ``STO_RISCV_VARIANT_CC`` bit.
(`D107951 <https://reviews.llvm.org/D107951>`_)
+* ``DT_STATIC_TLS`` is now set for AArch64/PPC32/PPC64 initial-exec TLS models
+ when producing a shared object.
* ``--no-undefined-version`` is now the default; symbols named in version
scripts that have no matching symbol in the output will be reported. Use
``--undefined-version`` to revert to the old behavior.
+ (`D135402 <https://reviews.llvm.org/D135402>`_)
+* ``-V`` is now an alias for ``-v`` to support ``gcc -fuse-ld=lld -v`` on many targets.
+* ``-r`` no longer defines ``__global_pointer$`` or ``_TLS_MODULE_BASE_``.
+* A corner case of mixed GCC and Clang object files (``STB_WEAK`` and
+ ``STB_GNU_UNIQUE`` in different COMDATs) is now supported.
+ (`D136381 <https://reviews.llvm.org/D136381>`_)
* The output ``SHT_RISCV_ATTRIBUTES`` section now merges all input components
instead of picking the first input component.
(`D138550 <https://reviews.llvm.org/D138550>`_)
+* For x86-32, ``-fno-plt`` GD/LD TLS models ``call *___tls_get_addr@GOT(%reg)``
+ are now supported. Previous output might have runtime crash.
+* Armv4(T) thunks are now supported.
+ (`D139888 <https://reviews.llvm.org/D139888>`_)
+ (`D141272 <https://reviews.llvm.org/D141272>`_)
Breaking changes
----------------
diff --git a/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/Coroutines.cpp b/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/Coroutines.cpp
index bd9ff99db67b..c5170757496f 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/Coroutines.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/Coroutines.cpp
@@ -8,7 +8,6 @@
#include "Coroutines.h"
-#include "Plugins/ExpressionParser/Clang/ClangASTImporter.h"
#include "Plugins/TypeSystem/Clang/TypeSystemClang.h"
#include "lldb/Symbol/Function.h"
#include "lldb/Symbol/VariableList.h"
@@ -17,38 +16,37 @@ using namespace lldb;
using namespace lldb_private;
using namespace lldb_private::formatters;
-static ValueObjectSP GetCoroFramePtrFromHandle(ValueObject &valobj) {
- ValueObjectSP valobj_sp(valobj.GetNonSyntheticValue());
+static lldb::addr_t GetCoroFramePtrFromHandle(ValueObjectSP valobj_sp) {
if (!valobj_sp)
- return nullptr;
+ return LLDB_INVALID_ADDRESS;
// We expect a single pointer in the `coroutine_handle` class.
// We don't care about its name.
if (valobj_sp->GetNumChildren() != 1)
- return nullptr;
+ return LLDB_INVALID_ADDRESS;
ValueObjectSP ptr_sp(valobj_sp->GetChildAtIndex(0, true));
if (!ptr_sp)
- return nullptr;
+ return LLDB_INVALID_ADDRESS;
if (!ptr_sp->GetCompilerType().IsPointerType())
- return nullptr;
-
- return ptr_sp;
-}
-
-static Function *ExtractDestroyFunction(ValueObjectSP &frame_ptr_sp) {
- lldb::TargetSP target_sp = frame_ptr_sp->GetTargetSP();
- lldb::ProcessSP process_sp = frame_ptr_sp->GetProcessSP();
- auto ptr_size = process_sp->GetAddressByteSize();
+ return LLDB_INVALID_ADDRESS;
AddressType addr_type;
- lldb::addr_t frame_ptr_addr = frame_ptr_sp->GetPointerValue(&addr_type);
+ lldb::addr_t frame_ptr_addr = ptr_sp->GetPointerValue(&addr_type);
if (!frame_ptr_addr || frame_ptr_addr == LLDB_INVALID_ADDRESS)
- return nullptr;
+ return LLDB_INVALID_ADDRESS;
lldbassert(addr_type == AddressType::eAddressTypeLoad);
+ if (addr_type != AddressType::eAddressTypeLoad)
+ return LLDB_INVALID_ADDRESS;
+
+ return frame_ptr_addr;
+}
+
+static Function *ExtractDestroyFunction(lldb::TargetSP target_sp,
+ lldb::addr_t frame_ptr_addr) {
+ lldb::ProcessSP process_sp = target_sp->GetProcessSP();
+ auto ptr_size = process_sp->GetAddressByteSize();
Status error;
- // The destroy pointer is the 2nd pointer inside the compiler-generated
- // `pair<resumePtr,destroyPtr>`.
auto destroy_func_ptr_addr = frame_ptr_addr + ptr_size;
lldb::addr_t destroy_func_addr =
process_sp->ReadPointerFromMemory(destroy_func_ptr_addr, error);
@@ -59,12 +57,7 @@ static Function *ExtractDestroyFunction(ValueObjectSP &frame_ptr_sp) {
if (!target_sp->ResolveLoadAddress(destroy_func_addr, destroy_func_address))
return nullptr;
- Function *destroy_func =
- destroy_func_address.CalculateSymbolContextFunction();
- if (!destroy_func)
- return nullptr;
-
- return destroy_func;
+ return destroy_func_address.CalculateSymbolContextFunction();
}
static CompilerType InferPromiseType(Function &destroy_func) {
@@ -85,44 +78,25 @@ static CompilerType InferPromiseType(Function &destroy_func) {
return promise_type->GetForwardCompilerType();
}
-static CompilerType GetCoroutineFrameType(TypeSystemClang &ast_ctx,
- CompilerType promise_type) {
- CompilerType void_type = ast_ctx.GetBasicType(lldb::eBasicTypeVoid);
- CompilerType coro_func_type = ast_ctx.CreateFunctionType(
- /*result_type=*/void_type, /*args=*/&void_type, /*num_args=*/1,
- /*is_variadic=*/false, /*qualifiers=*/0);
- CompilerType coro_abi_type;
- if (promise_type.IsVoidType()) {
- coro_abi_type = ast_ctx.CreateStructForIdentifier(
- ConstString(), {{"resume", coro_func_type.GetPointerType()},
- {"destroy", coro_func_type.GetPointerType()}});
- } else {
- coro_abi_type = ast_ctx.CreateStructForIdentifier(
- ConstString(), {{"resume", coro_func_type.GetPointerType()},
- {"destroy", coro_func_type.GetPointerType()},
- {"promise", promise_type}});
- }
- return coro_abi_type;
-}
-
bool lldb_private::formatters::StdlibCoroutineHandleSummaryProvider(
ValueObject &valobj, Stream &stream, const TypeSummaryOptions &options) {
- ValueObjectSP ptr_sp(GetCoroFramePtrFromHandle(valobj));
- if (!ptr_sp)
+ lldb::addr_t frame_ptr_addr =
+ GetCoroFramePtrFromHandle(valobj.GetNonSyntheticValue());
+ if (frame_ptr_addr == LLDB_INVALID_ADDRESS)
return false;
- if (!ptr_sp->GetValueAsUnsigned(0)) {
+ if (frame_ptr_addr == 0) {
stream << "nullptr";
} else {
- stream.Printf("coro frame = 0x%" PRIx64, ptr_sp->GetValueAsUnsigned(0));
+ stream.Printf("coro frame = 0x%" PRIx64, frame_ptr_addr);
}
+
return true;
}
lldb_private::formatters::StdlibCoroutineHandleSyntheticFrontEnd::
StdlibCoroutineHandleSyntheticFrontEnd(lldb::ValueObjectSP valobj_sp)
- : SyntheticChildrenFrontEnd(*valobj_sp),
- m_ast_importer(std::make_unique<ClangASTImporter>()) {
+ : SyntheticChildrenFrontEnd(*valobj_sp) {
if (valobj_sp)
Update();
}
@@ -132,32 +106,61 @@ lldb_private::formatters::StdlibCoroutineHandleSyntheticFrontEnd::
size_t lldb_private::formatters::StdlibCoroutineHandleSyntheticFrontEnd::
CalculateNumChildren() {
- if (!m_frame_ptr_sp)
+ if (!m_resume_ptr_sp || !m_destroy_ptr_sp)
return 0;
- return m_frame_ptr_sp->GetNumChildren();
+ return m_promise_ptr_sp ? 3 : 2;
}
lldb::ValueObjectSP lldb_private::formatters::
StdlibCoroutineHandleSyntheticFrontEnd::GetChildAtIndex(size_t idx) {
- if (!m_frame_ptr_sp)
- return lldb::ValueObjectSP();
-
- return m_frame_ptr_sp->GetChildAtIndex(idx, true);
+ switch (idx) {
+ case 0:
+ return m_resume_ptr_sp;
+ case 1:
+ return m_destroy_ptr_sp;
+ case 2:
+ return m_promise_ptr_sp;
+ }
+ return lldb::ValueObjectSP();
}
bool lldb_private::formatters::StdlibCoroutineHandleSyntheticFrontEnd::
Update() {
- m_frame_ptr_sp.reset();
+ m_resume_ptr_sp.reset();
+ m_destroy_ptr_sp.reset();
+ m_promise_ptr_sp.reset();
- ValueObjectSP valobj_sp = m_backend.GetSP();
+ ValueObjectSP valobj_sp = m_backend.GetNonSyntheticValue();
if (!valobj_sp)
return false;
- ValueObjectSP ptr_sp(GetCoroFramePtrFromHandle(m_backend));
- if (!ptr_sp)
+ lldb::addr_t frame_ptr_addr = GetCoroFramePtrFromHandle(valobj_sp);
+ if (frame_ptr_addr == 0 || frame_ptr_addr == LLDB_INVALID_ADDRESS)
+ return false;
+
+ auto ts = valobj_sp->GetCompilerType().GetTypeSystem();
+ auto ast_ctx = ts.dyn_cast_or_null<TypeSystemClang>();
+ if (!ast_ctx)
return false;
+ // Create the `resume` and `destroy` children.
+ lldb::TargetSP target_sp = m_backend.GetTargetSP();
+ auto &exe_ctx = m_backend.GetExecutionContextRef();
+ lldb::ProcessSP process_sp = target_sp->GetProcessSP();
+ auto ptr_size = process_sp->GetAddressByteSize();
+ CompilerType void_type = ast_ctx->GetBasicType(lldb::eBasicTypeVoid);
+ CompilerType coro_func_type = ast_ctx->CreateFunctionType(
+ /*result_type=*/void_type, /*args=*/&void_type, /*num_args=*/1,
+ /*is_variadic=*/false, /*qualifiers=*/0);
+ CompilerType coro_func_ptr_type = coro_func_type.GetPointerType();
+ m_resume_ptr_sp = CreateValueObjectFromAddress(
+ "resume", frame_ptr_addr + 0 * ptr_size, exe_ctx, coro_func_ptr_type);
+ lldbassert(m_resume_ptr_sp);
+ m_destroy_ptr_sp = CreateValueObjectFromAddress(
+ "destroy", frame_ptr_addr + 1 * ptr_size, exe_ctx, coro_func_ptr_type);
+ lldbassert(m_destroy_ptr_sp);
+
// Get the `promise_type` from the template argument
CompilerType promise_type(
valobj_sp->GetCompilerType().GetTypeTemplateArgument(0));
@@ -165,23 +168,31 @@ bool lldb_private::formatters::StdlibCoroutineHandleSyntheticFrontEnd::
return false;
// Try to infer the promise_type if it was type-erased
- auto ts = valobj_sp->GetCompilerType().GetTypeSystem();
- auto ast_ctx = ts.dyn_cast_or_null<TypeSystemClang>();
- if (!ast_ctx)
- return false;
if (promise_type.IsVoidType()) {
- if (Function *destroy_func = ExtractDestroyFunction(ptr_sp)) {
+ if (Function *destroy_func =
+ ExtractDestroyFunction(target_sp, frame_ptr_addr)) {
if (CompilerType inferred_type = InferPromiseType(*destroy_func)) {
- // Copy the type over to the correct `TypeSystemClang` instance
- promise_type = m_ast_importer->CopyType(*ast_ctx, inferred_type);
+ promise_type = inferred_type;
}
}
}
- // Build the coroutine frame type
- CompilerType coro_frame_type = GetCoroutineFrameType(*ast_ctx, promise_type);
+ // If we don't know the promise type, we don't display the `promise` member.
+ // `CreateValueObjectFromAddress` below would fail for `void` types.
+ if (promise_type.IsVoidType()) {
+ return false;
+ }
- m_frame_ptr_sp = ptr_sp->Cast(coro_frame_type.GetPointerType());
+ // Add the `promise` member. We intentionally add `promise` as a pointer type
+ // instead of a value type, and don't automatically dereference this pointer.
+ // We do so to avoid potential very deep recursion in case there is a cycle
+ // formed between `std::coroutine_handle`s and their promises.
+ lldb::ValueObjectSP promise = CreateValueObjectFromAddress(
+ "promise", frame_ptr_addr + 2 * ptr_size, exe_ctx, promise_type);
+ Status error;
+ lldb::ValueObjectSP promisePtr = promise->AddressOf(error);
+ if (error.Success())
+ m_promise_ptr_sp = promisePtr->Clone(ConstString("promise"));
return false;
}
@@ -193,10 +204,17 @@ bool lldb_private::formatters::StdlibCoroutineHandleSyntheticFrontEnd::
size_t StdlibCoroutineHandleSyntheticFrontEnd::GetIndexOfChildWithName(
ConstString name) {
- if (!m_frame_ptr_sp)
+ if (!m_resume_ptr_sp || !m_destroy_ptr_sp)
return UINT32_MAX;
- return m_frame_ptr_sp->GetIndexOfChildWithName(name);
+ if (name == ConstString("resume"))
+ return 0;
+ if (name == ConstString("destroy"))
+ return 1;
+ if (name == ConstString("promise_ptr") && m_promise_ptr_sp)
+ return 2;
+
+ return UINT32_MAX;
}
SyntheticChildrenFrontEnd *
diff --git a/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/Coroutines.h b/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/Coroutines.h
index c052bd23f4ca..b26cc9ed6132 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/Coroutines.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Language/CPlusPlus/Coroutines.h
@@ -16,8 +16,6 @@
namespace lldb_private {
-class ClangASTImporter;
-
namespace formatters {
/// Summary provider for `std::coroutine_handle<T>` from libc++, libstdc++ and
@@ -47,8 +45,9 @@ public:
size_t GetIndexOfChildWithName(ConstString name) override;
private:
- lldb::ValueObjectSP m_frame_ptr_sp;
- std::unique_ptr<lldb_private::ClangASTImporter> m_ast_importer;
+ lldb::ValueObjectSP m_resume_ptr_sp;
+ lldb::ValueObjectSP m_destroy_ptr_sp;
+ lldb::ValueObjectSP m_promise_ptr_sp;
};
SyntheticChildrenFrontEnd *
diff --git a/contrib/llvm-project/lldb/source/Plugins/ObjectFile/ELF/ObjectFileELF.cpp b/contrib/llvm-project/lldb/source/Plugins/ObjectFile/ELF/ObjectFileELF.cpp
index 9131367bf223..5b75738e070c 100644
--- a/contrib/llvm-project/lldb/source/Plugins/ObjectFile/ELF/ObjectFileELF.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/ObjectFile/ELF/ObjectFileELF.cpp
@@ -114,13 +114,13 @@ public:
static unsigned RelocSymbol64(const ELFRelocation &rel);
- static unsigned RelocOffset32(const ELFRelocation &rel);
+ static elf_addr RelocOffset32(const ELFRelocation &rel);
- static unsigned RelocOffset64(const ELFRelocation &rel);
+ static elf_addr RelocOffset64(const ELFRelocation &rel);
- static unsigned RelocAddend32(const ELFRelocation &rel);
+ static elf_sxword RelocAddend32(const ELFRelocation &rel);
- static unsigned RelocAddend64(const ELFRelocation &rel);
+ static elf_sxword RelocAddend64(const ELFRelocation &rel);
bool IsRela() { return (reloc.is<ELFRela *>()); }
@@ -185,28 +185,28 @@ unsigned ELFRelocation::RelocSymbol64(const ELFRelocation &rel) {
return ELFRela::RelocSymbol64(*rel.reloc.get<ELFRela *>());
}
-unsigned ELFRelocation::RelocOffset32(const ELFRelocation &rel) {
+elf_addr ELFRelocation::RelocOffset32(const ELFRelocation &rel) {
if (rel.reloc.is<ELFRel *>())
return rel.reloc.get<ELFRel *>()->r_offset;
else
return rel.reloc.get<ELFRela *>()->r_offset;
}
-unsigned ELFRelocation::RelocOffset64(const ELFRelocation &rel) {
+elf_addr ELFRelocation::RelocOffset64(const ELFRelocation &rel) {
if (rel.reloc.is<ELFRel *>())
return rel.reloc.get<ELFRel *>()->r_offset;
else
return rel.reloc.get<ELFRela *>()->r_offset;
}
-unsigned ELFRelocation::RelocAddend32(const ELFRelocation &rel) {
+elf_sxword ELFRelocation::RelocAddend32(const ELFRelocation &rel) {
if (rel.reloc.is<ELFRel *>())
return 0;
else
return rel.reloc.get<ELFRela *>()->r_addend;
}
-unsigned ELFRelocation::RelocAddend64(const ELFRelocation &rel) {
+elf_sxword ELFRelocation::RelocAddend64(const ELFRelocation &rel) {
if (rel.reloc.is<ELFRel *>())
return 0;
else
@@ -2593,6 +2593,50 @@ ObjectFileELF::ParseTrampolineSymbols(Symtab *symbol_table, user_id_t start_id,
rel_data, symtab_data, strtab_data);
}
+static void ApplyELF64ABS64Relocation(Symtab *symtab, ELFRelocation &rel,
+ DataExtractor &debug_data,
+ Section *rel_section) {
+ Symbol *symbol = symtab->FindSymbolByID(ELFRelocation::RelocSymbol64(rel));
+ if (symbol) {
+ addr_t value = symbol->GetAddressRef().GetFileAddress();
+ DataBufferSP &data_buffer_sp = debug_data.GetSharedDataBuffer();
+ // ObjectFileELF creates a WritableDataBuffer in CreateInstance.
+ WritableDataBuffer *data_buffer =
+ llvm::cast<WritableDataBuffer>(data_buffer_sp.get());
+ uint64_t *dst = reinterpret_cast<uint64_t *>(
+ data_buffer->GetBytes() + rel_section->GetFileOffset() +
+ ELFRelocation::RelocOffset64(rel));
+ uint64_t val_offset = value + ELFRelocation::RelocAddend64(rel);
+ memcpy(dst, &val_offset, sizeof(uint64_t));
+ }
+}
+
+static void ApplyELF64ABS32Relocation(Symtab *symtab, ELFRelocation &rel,
+ DataExtractor &debug_data,
+ Section *rel_section, bool is_signed) {
+ Symbol *symbol = symtab->FindSymbolByID(ELFRelocation::RelocSymbol64(rel));
+ if (symbol) {
+ addr_t value = symbol->GetAddressRef().GetFileAddress();
+ value += ELFRelocation::RelocAddend32(rel);
+ if ((!is_signed && (value > UINT32_MAX)) ||
+ (is_signed &&
+ ((int64_t)value > INT32_MAX || (int64_t)value < INT32_MIN))) {
+ Log *log = GetLog(LLDBLog::Modules);
+ LLDB_LOGF(log, "Failed to apply debug info relocations");
+ return;
+ }
+ uint32_t truncated_addr = (value & 0xFFFFFFFF);
+ DataBufferSP &data_buffer_sp = debug_data.GetSharedDataBuffer();
+ // ObjectFileELF creates a WritableDataBuffer in CreateInstance.
+ WritableDataBuffer *data_buffer =
+ llvm::cast<WritableDataBuffer>(data_buffer_sp.get());
+ uint32_t *dst = reinterpret_cast<uint32_t *>(
+ data_buffer->GetBytes() + rel_section->GetFileOffset() +
+ ELFRelocation::RelocOffset32(rel));
+ memcpy(dst, &truncated_addr, sizeof(uint32_t));
+ }
+}
+
unsigned ObjectFileELF::ApplyRelocations(
Symtab *symtab, const ELFHeader *hdr, const ELFSectionHeader *rel_hdr,
const ELFSectionHeader *symtab_hdr, const ELFSectionHeader *debug_hdr,
@@ -2656,55 +2700,50 @@ unsigned ObjectFileELF::ApplyRelocations(
reloc_type(rel));
}
} else {
- switch (reloc_type(rel)) {
- case R_AARCH64_ABS64:
- case R_X86_64_64: {
- symbol = symtab->FindSymbolByID(reloc_symbol(rel));
- if (symbol) {
- addr_t value = symbol->GetAddressRef().GetFileAddress();
- DataBufferSP &data_buffer_sp = debug_data.GetSharedDataBuffer();
- // ObjectFileELF creates a WritableDataBuffer in CreateInstance.
- WritableDataBuffer *data_buffer =
- llvm::cast<WritableDataBuffer>(data_buffer_sp.get());
- uint64_t *dst = reinterpret_cast<uint64_t *>(
- data_buffer->GetBytes() + rel_section->GetFileOffset() +
- ELFRelocation::RelocOffset64(rel));
- uint64_t val_offset = value + ELFRelocation::RelocAddend64(rel);
- memcpy(dst, &val_offset, sizeof(uint64_t));
+ switch (hdr->e_machine) {
+ case llvm::ELF::EM_AARCH64:
+ switch (reloc_type(rel)) {
+ case R_AARCH64_ABS64:
+ ApplyELF64ABS64Relocation(symtab, rel, debug_data, rel_section);
+ break;
+ case R_AARCH64_ABS32:
+ ApplyELF64ABS32Relocation(symtab, rel, debug_data, rel_section, true);
+ break;
+ default:
+ assert(false && "unexpected relocation type");
}
break;
- }
- case R_X86_64_32:
- case R_X86_64_32S:
- case R_AARCH64_ABS32: {
- symbol = symtab->FindSymbolByID(reloc_symbol(rel));
- if (symbol) {
- addr_t value = symbol->GetAddressRef().GetFileAddress();
- value += ELFRelocation::RelocAddend32(rel);
- if ((reloc_type(rel) == R_X86_64_32 && (value > UINT32_MAX)) ||
- (reloc_type(rel) == R_X86_64_32S &&
- ((int64_t)value > INT32_MAX && (int64_t)value < INT32_MIN)) ||
- (reloc_type(rel) == R_AARCH64_ABS32 &&
- ((int64_t)value > INT32_MAX && (int64_t)value < INT32_MIN))) {
- Log *log = GetLog(LLDBLog::Modules);
- LLDB_LOGF(log, "Failed to apply debug info relocations");
- break;
- }
- uint32_t truncated_addr = (value & 0xFFFFFFFF);
- DataBufferSP &data_buffer_sp = debug_data.GetSharedDataBuffer();
- // ObjectFileELF creates a WritableDataBuffer in CreateInstance.
- WritableDataBuffer *data_buffer =
- llvm::cast<WritableDataBuffer>(data_buffer_sp.get());
- uint32_t *dst = reinterpret_cast<uint32_t *>(
- data_buffer->GetBytes() + rel_section->GetFileOffset() +
- ELFRelocation::RelocOffset32(rel));
- memcpy(dst, &truncated_addr, sizeof(uint32_t));
+ case llvm::ELF::EM_LOONGARCH:
+ switch (reloc_type(rel)) {
+ case R_LARCH_64:
+ ApplyELF64ABS64Relocation(symtab, rel, debug_data, rel_section);
+ break;
+ case R_LARCH_32:
+ ApplyELF64ABS32Relocation(symtab, rel, debug_data, rel_section, true);
+ break;
+ default:
+ assert(false && "unexpected relocation type");
+ }
+ break;
+ case llvm::ELF::EM_X86_64:
+ switch (reloc_type(rel)) {
+ case R_X86_64_64:
+ ApplyELF64ABS64Relocation(symtab, rel, debug_data, rel_section);
+ break;
+ case R_X86_64_32:
+ ApplyELF64ABS32Relocation(symtab, rel, debug_data, rel_section,
+ false);
+ break;
+ case R_X86_64_32S:
+ ApplyELF64ABS32Relocation(symtab, rel, debug_data, rel_section, true);
+ break;
+ case R_X86_64_PC32:
+ default:
+ assert(false && "unexpected relocation type");
}
break;
- }
- case R_X86_64_PC32:
default:
- assert(false && "unexpected relocation type");
+ assert(false && "unsupported machine");
}
}
}
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeRegisterContextFreeBSD_mips64.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeRegisterContextFreeBSD_mips64.cpp
index b8db7f2f3788..0349f13945e3 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeRegisterContextFreeBSD_mips64.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeRegisterContextFreeBSD_mips64.cpp
@@ -130,7 +130,7 @@ NativeRegisterContextFreeBSD_mips64::ReadRegister(const RegisterInfo *reg_info,
return error;
}
- RegSetKind set = opt_set.getValue();
+ RegSetKind set = *opt_set;
error = ReadRegisterSet(set);
if (error.Fail())
return error;
@@ -164,7 +164,7 @@ Status NativeRegisterContextFreeBSD_mips64::WriteRegister(
return error;
}
- RegSetKind set = opt_set.getValue();
+ RegSetKind set = *opt_set;
error = ReadRegisterSet(set);
if (error.Fail())
return error;
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeRegisterContextFreeBSD_powerpc.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeRegisterContextFreeBSD_powerpc.cpp
index 7ad9e3c209df..bdb57251f706 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeRegisterContextFreeBSD_powerpc.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/FreeBSD/NativeRegisterContextFreeBSD_powerpc.cpp
@@ -181,7 +181,7 @@ NativeRegisterContextFreeBSD_powerpc::ReadRegister(const RegisterInfo *reg_info,
return error;
}
- RegSetKind set = opt_set.getValue();
+ RegSetKind set = *opt_set;
error = ReadRegisterSet(set);
if (error.Fail())
return error;
@@ -215,7 +215,7 @@ Status NativeRegisterContextFreeBSD_powerpc::WriteRegister(
return error;
}
- RegSetKind set = opt_set.getValue();
+ RegSetKind set = *opt_set;
error = ReadRegisterSet(set);
if (error.Fail())
return error;
diff --git a/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolution.h b/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolution.h
index c4bd0fa3e07c..51b67d1756b9 100644
--- a/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/contrib/llvm-project/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -1297,10 +1297,17 @@ public:
bool loopIsFiniteByAssumption(const Loop *L);
class FoldID {
- SmallVector<unsigned, 4> Bits;
+ SmallVector<unsigned, 5> Bits;
public:
- void addInteger(unsigned long I) { Bits.push_back(I); }
+ void addInteger(unsigned long I) {
+ if (sizeof(long) == sizeof(int))
+ addInteger(unsigned(I));
+ else if (sizeof(long) == sizeof(long long))
+ addInteger((unsigned long long)I);
+ else
+ llvm_unreachable("unexpected sizeof(long)");
+ }
void addInteger(unsigned I) { Bits.push_back(I); }
void addInteger(int I) { Bits.push_back(I); }
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/LogicalView/Core/LVElement.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/LogicalView/Core/LVElement.h
index 2603c4542e8d..68dfefba3b3c 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/LogicalView/Core/LVElement.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/LogicalView/Core/LVElement.h
@@ -15,7 +15,6 @@
#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVELEMENT_H
#include "llvm/DebugInfo/LogicalView/Core/LVObject.h"
-#include "llvm/DebugInfo/LogicalView/Core/LVStringPool.h"
#include "llvm/Support/Casting.h"
#include <map>
#include <set>
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/LogicalView/Core/LVStringPool.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/LogicalView/Core/LVStringPool.h
index 671ccf5d0e15..4c596b5b1dde 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/LogicalView/Core/LVStringPool.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/LogicalView/Core/LVStringPool.h
@@ -71,11 +71,6 @@ public:
return (Index >= Entries.size()) ? StringRef() : Entries[Index]->getKey();
}
- static LVStringPool &getInstance() {
- static LVStringPool Instance;
- return Instance;
- }
-
void print(raw_ostream &OS) const {
if (!Entries.empty()) {
OS << "\nString Pool:\n";
@@ -90,8 +85,6 @@ public:
#endif
};
-inline LVStringPool &getStringPool() { return LVStringPool::getInstance(); }
-
} // namespace logicalview
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/DebugInfo/LogicalView/Core/LVSupport.h b/contrib/llvm-project/llvm/include/llvm/DebugInfo/LogicalView/Core/LVSupport.h
index bff1499c1a60..a2274ec1a62f 100644
--- a/contrib/llvm-project/llvm/include/llvm/DebugInfo/LogicalView/Core/LVSupport.h
+++ b/contrib/llvm-project/llvm/include/llvm/DebugInfo/LogicalView/Core/LVSupport.h
@@ -16,6 +16,7 @@
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/DebugInfo/LogicalView/Core/LVStringPool.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/Path.h"
@@ -27,6 +28,9 @@
namespace llvm {
namespace logicalview {
+// Returns the unique string pool instance.
+LVStringPool &getStringPool();
+
template <typename T>
using TypeIsValid = std::bool_constant<std::is_pointer<T>::value>;
diff --git a/contrib/llvm-project/llvm/include/llvm/Debuginfod/Debuginfod.h b/contrib/llvm-project/llvm/include/llvm/Debuginfod/Debuginfod.h
index caece0e6fc19..ec7f5691dda4 100644
--- a/contrib/llvm-project/llvm/include/llvm/Debuginfod/Debuginfod.h
+++ b/contrib/llvm-project/llvm/include/llvm/Debuginfod/Debuginfod.h
@@ -38,9 +38,13 @@
namespace llvm {
+/// Returns false if a debuginfod lookup can be determined to have no chance of
+/// succeeding.
+bool canUseDebuginfod();
+
/// Finds default array of Debuginfod server URLs by checking DEBUGINFOD_URLS
/// environment variable.
-Expected<SmallVector<StringRef>> getDefaultDebuginfodUrls();
+SmallVector<StringRef> getDefaultDebuginfodUrls();
/// Finds a default local file caching directory for the debuginfod client,
/// first checking DEBUGINFOD_CACHE_PATH.
diff --git a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAArch64.td b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAArch64.td
index b1f85563195f..92a198befbe4 100644
--- a/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/contrib/llvm-project/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -1391,6 +1391,16 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
+class AdvSIMD_SVE_2SVBoolArg_Intrinsic
+ : DefaultAttrsIntrinsic<[llvm_nxv16i1_ty],
+ [llvm_nxv16i1_ty],
+ [IntrNoMem]>;
+
+class AdvSIMD_SVE_3SVBoolArg_Intrinsic
+ : DefaultAttrsIntrinsic<[llvm_nxv16i1_ty],
+ [llvm_nxv16i1_ty, llvm_nxv16i1_ty],
+ [IntrNoMem]>;
+
class AdvSIMD_SVE_Reduce_Intrinsic
: DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
@@ -1836,22 +1846,43 @@ def int_aarch64_sve_sel : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_lasta : AdvSIMD_SVE_Reduce_Intrinsic;
def int_aarch64_sve_lastb : AdvSIMD_SVE_Reduce_Intrinsic;
def int_aarch64_sve_rev : AdvSIMD_1VectorArg_Intrinsic;
+def int_aarch64_sve_rev_b16 : AdvSIMD_SVE_2SVBoolArg_Intrinsic;
+def int_aarch64_sve_rev_b32 : AdvSIMD_SVE_2SVBoolArg_Intrinsic;
+def int_aarch64_sve_rev_b64 : AdvSIMD_SVE_2SVBoolArg_Intrinsic;
def int_aarch64_sve_splice : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_sunpkhi : AdvSIMD_SVE_Unpack_Intrinsic;
def int_aarch64_sve_sunpklo : AdvSIMD_SVE_Unpack_Intrinsic;
def int_aarch64_sve_tbl : AdvSIMD_SVE_TBL_Intrinsic;
def int_aarch64_sve_trn1 : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_trn1_b16 : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
+def int_aarch64_sve_trn1_b32 : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
+def int_aarch64_sve_trn1_b64 : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_trn2 : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_trn2_b16 : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
+def int_aarch64_sve_trn2_b32 : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
+def int_aarch64_sve_trn2_b64 : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_trn1q : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_trn2q : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_uunpkhi : AdvSIMD_SVE_Unpack_Intrinsic;
def int_aarch64_sve_uunpklo : AdvSIMD_SVE_Unpack_Intrinsic;
def int_aarch64_sve_uzp1 : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_uzp1_b16 : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
+def int_aarch64_sve_uzp1_b32 : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
+def int_aarch64_sve_uzp1_b64 : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_uzp2 : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_uzp2_b16 : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
+def int_aarch64_sve_uzp2_b32 : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
+def int_aarch64_sve_uzp2_b64 : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_uzp1q : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_uzp2q : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_zip1 : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_zip1_b16 : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
+def int_aarch64_sve_zip1_b32 : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
+def int_aarch64_sve_zip1_b64 : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_zip2 : AdvSIMD_2VectorArg_Intrinsic;
+def int_aarch64_sve_zip2_b16 : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
+def int_aarch64_sve_zip2_b32 : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
+def int_aarch64_sve_zip2_b64 : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_zip1q : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_zip2q : AdvSIMD_2VectorArg_Intrinsic;
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h b/contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h
index 4d48308d5509..bdb772862468 100644
--- a/contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h
@@ -21,6 +21,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/Object/BuildID.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/Compiler.h"
@@ -42,6 +43,10 @@ namespace llvm {
class IndexedInstrProfReader;
+namespace object {
+class BuildIDFetcher;
+} // namespace object
+
namespace coverage {
class CoverageMappingReader;
@@ -579,6 +584,13 @@ class CoverageMapping {
ArrayRef<std::unique_ptr<CoverageMappingReader>> CoverageReaders,
IndexedInstrProfReader &ProfileReader, CoverageMapping &Coverage);
+ // Load coverage records from file.
+ static Error
+ loadFromFile(StringRef Filename, StringRef Arch, StringRef CompilationDir,
+ IndexedInstrProfReader &ProfileReader, CoverageMapping &Coverage,
+ bool &DataFound,
+ SmallVectorImpl<object::BuildID> *FoundBinaryIDs = nullptr);
+
/// Add a function record corresponding to \p Record.
Error loadFunctionRecord(const CoverageMappingRecord &Record,
IndexedInstrProfReader &ProfileReader);
@@ -604,8 +616,8 @@ public:
/// Ignores non-instrumented object files unless all are not instrumented.
static Expected<std::unique_ptr<CoverageMapping>>
load(ArrayRef<StringRef> ObjectFilenames, StringRef ProfileFilename,
- ArrayRef<StringRef> Arches = std::nullopt,
- StringRef CompilationDir = "");
+ ArrayRef<StringRef> Arches = std::nullopt, StringRef CompilationDir = "",
+ const object::BuildIDFetcher *BIDFetcher = nullptr);
/// The number of functions that couldn't have their profiles mapped.
///
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMappingReader.h b/contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMappingReader.h
index 39c0045369be..326c1b0d3338 100644
--- a/contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMappingReader.h
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/Coverage/CoverageMappingReader.h
@@ -205,7 +205,8 @@ public:
static Expected<std::vector<std::unique_ptr<BinaryCoverageReader>>>
create(MemoryBufferRef ObjectBuffer, StringRef Arch,
SmallVectorImpl<std::unique_ptr<MemoryBuffer>> &ObjectFileBuffers,
- StringRef CompilationDir = "");
+ StringRef CompilationDir = "",
+ SmallVectorImpl<object::BuildIDRef> *BinaryIDs = nullptr);
static Expected<std::unique_ptr<BinaryCoverageReader>>
createCoverageReaderFromBuffer(StringRef Coverage,
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/ExitCodes.h b/contrib/llvm-project/llvm/include/llvm/Support/ExitCodes.h
index b9041f5557d5..4eb5dedc688b 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/ExitCodes.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/ExitCodes.h
@@ -20,9 +20,9 @@
#if HAVE_SYSEXITS_H
#include <sysexits.h>
-#elif __MVS__
-// <sysexits.h> does not exist on z/OS. The only value used in LLVM is
-// EX_IOERR, which is used to signal a special error condition (broken pipe).
+#elif __MVS__ || defined(_WIN32)
+// <sysexits.h> does not exist on z/OS and Windows. The only value used in LLVM
+// is EX_IOERR, which is used to signal a special error condition (broken pipe).
// Define the macro with its usual value from BSD systems, which is chosen to
// not clash with more standard exit codes like 1.
#define EX_IOERR 74
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/RISCVISAInfo.h b/contrib/llvm-project/llvm/include/llvm/Support/RISCVISAInfo.h
index 9070b88d710e..219435979698 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/RISCVISAInfo.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/RISCVISAInfo.h
@@ -51,6 +51,12 @@ public:
bool ExperimentalExtensionVersionCheck = true,
bool IgnoreUnknown = false);
+ /// Parse RISCV ISA info from an arch string that is already in normalized
+ /// form (as defined in the psABI). Unlike parseArchString, this function
+ /// will not error for unrecognized extension names or extension versions.
+ static llvm::Expected<std::unique_ptr<RISCVISAInfo>>
+ parseNormalizedArchString(StringRef Arch);
+
/// Parse RISCV ISA info from feature vector.
static llvm::Expected<std::unique_ptr<RISCVISAInfo>>
parseFeatures(unsigned XLen, const std::vector<std::string> &Features);
diff --git a/contrib/llvm-project/llvm/include/llvm/Support/Signals.h b/contrib/llvm-project/llvm/include/llvm/Support/Signals.h
index 937e0572d4a7..70749ce30184 100644
--- a/contrib/llvm-project/llvm/include/llvm/Support/Signals.h
+++ b/contrib/llvm-project/llvm/include/llvm/Support/Signals.h
@@ -102,14 +102,17 @@ namespace sys {
/// functions. A null handler pointer disables the current installed
/// function. Note also that the handler may be executed on a
/// different thread on some platforms.
- ///
- /// This is a no-op on Windows.
void SetOneShotPipeSignalFunction(void (*Handler)());
- /// On Unix systems, this function exits with an "IO error" exit code.
- /// This is a no-op on Windows.
+ /// On Unix systems and Windows, this function exits with an "IO error" exit
+ /// code.
void DefaultOneShotPipeSignalHandler();
+#ifdef _WIN32
+ /// Windows does not support signals and this handler must be called manually.
+ void CallOneShotPipeSignalHandler();
+#endif
+
/// This function does the following:
/// - clean up any temporary files registered with RemoveFileOnSignal()
/// - dump the callstack from the exception context
diff --git a/contrib/llvm-project/llvm/include/llvm/TargetParser/RISCVTargetParser.h b/contrib/llvm-project/llvm/include/llvm/TargetParser/RISCVTargetParser.h
index da2ecd8c1339..f50576b8fee1 100644
--- a/contrib/llvm-project/llvm/include/llvm/TargetParser/RISCVTargetParser.h
+++ b/contrib/llvm-project/llvm/include/llvm/TargetParser/RISCVTargetParser.h
@@ -18,6 +18,9 @@
#include <vector>
namespace llvm {
+
+class Triple;
+
namespace RISCV {
// We use 64 bits as the known part in the scalable vector types.
@@ -38,6 +41,8 @@ void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values, bool IsRV64);
void fillValidTuneCPUArchList(SmallVectorImpl<StringRef> &Values, bool IsRV64);
bool getCPUFeaturesExceptStdExt(CPUKind Kind, std::vector<StringRef> &Features);
+bool isX18ReservedByDefault(const Triple &TT);
+
} // namespace RISCV
} // namespace llvm
diff --git a/contrib/llvm-project/llvm/include/llvm/TargetParser/Triple.h b/contrib/llvm-project/llvm/include/llvm/TargetParser/Triple.h
index 8d600989c8cf..59513fa2f206 100644
--- a/contrib/llvm-project/llvm/include/llvm/TargetParser/Triple.h
+++ b/contrib/llvm-project/llvm/include/llvm/TargetParser/Triple.h
@@ -882,6 +882,14 @@ public:
return getArch() == Triple::ppc64 || getArch() == Triple::ppc64le;
}
+ /// Tests whether the target 64-bit PowerPC big endian ABI is ELFv2.
+ bool isPPC64ELFv2ABI() const {
+ return (getArch() == Triple::ppc64 &&
+ ((getOS() == Triple::FreeBSD &&
+ (getOSMajorVersion() >= 13 || getOSVersion().empty())) ||
+ getOS() == Triple::OpenBSD || isMusl()));
+ }
+
/// Tests whether the target is 32-bit RISC-V.
bool isRISCV32() const { return getArch() == Triple::riscv32; }
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/OpenMPOpt.h b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/OpenMPOpt.h
index bf08336663b6..73aee47bfef5 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/OpenMPOpt.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/OpenMPOpt.h
@@ -37,13 +37,25 @@ KernelSet getDeviceKernels(Module &M);
/// OpenMP optimizations pass.
class OpenMPOptPass : public PassInfoMixin<OpenMPOptPass> {
public:
+ OpenMPOptPass() : LTOPhase(ThinOrFullLTOPhase::None) {}
+ OpenMPOptPass(ThinOrFullLTOPhase LTOPhase) : LTOPhase(LTOPhase) {}
+
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+
+private:
+ const ThinOrFullLTOPhase LTOPhase = ThinOrFullLTOPhase::None;
};
class OpenMPOptCGSCCPass : public PassInfoMixin<OpenMPOptCGSCCPass> {
public:
+ OpenMPOptCGSCCPass() : LTOPhase(ThinOrFullLTOPhase::None) {}
+ OpenMPOptCGSCCPass(ThinOrFullLTOPhase LTOPhase) : LTOPhase(LTOPhase) {}
+
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
LazyCallGraph &CG, CGSCCUpdateResult &UR);
+
+private:
+ const ThinOrFullLTOPhase LTOPhase = ThinOrFullLTOPhase::None;
};
} // end namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Analysis/LazyValueInfo.cpp b/contrib/llvm-project/llvm/lib/Analysis/LazyValueInfo.cpp
index f1587cecf9fb..0e9fab667e6e 100644
--- a/contrib/llvm-project/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -1673,11 +1673,6 @@ ConstantRange LazyValueInfo::getConstantRangeAtUse(const Use &U,
// TODO: Use non-local query?
CondVal =
getEdgeValueLocal(V, PHI->getIncomingBlock(*CurrU), PHI->getParent());
- } else if (!isSafeToSpeculativelyExecute(CurrI)) {
- // Stop walking if we hit a non-speculatable instruction. Even if the
- // result is only used under a specific condition, executing the
- // instruction itself may cause side effects or UB already.
- break;
}
if (CondVal && CondVal->isConstantRange())
CR = CR.intersectWith(CondVal->getConstantRange());
@@ -1685,7 +1680,13 @@ ConstantRange LazyValueInfo::getConstantRangeAtUse(const Use &U,
// Only follow one-use chain, to allow direct intersection of conditions.
// If there are multiple uses, we would have to intersect with the union of
// all conditions at different uses.
- if (!CurrI->hasOneUse())
+ // Stop walking if we hit a non-speculatable instruction. Even if the
+ // result is only used under a specific condition, executing the
+ // instruction itself may cause side effects or UB already.
+ // This also disallows looking through phi nodes: If the phi node is part
+ // of a cycle, we might end up reasoning about values from different cycle
+ // iterations (PR60629).
+ if (!CurrI->hasOneUse() || !isSafeToSpeculativelyExecute(CurrI))
break;
CurrU = &*CurrI->use_begin();
}
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 8c126d20fc9a..0b1e32c87fc3 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -2235,6 +2235,8 @@ bool AsmPrinter::doFinalization(Module &M) {
SmallVector<const GlobalAlias *, 16> AliasStack;
SmallPtrSet<const GlobalAlias *, 16> AliasVisited;
for (const auto &Alias : M.aliases()) {
+ if (Alias.hasAvailableExternallyLinkage())
+ continue;
for (const GlobalAlias *Cur = &Alias; Cur;
Cur = dyn_cast<GlobalAlias>(Cur->getAliasee())) {
if (!AliasVisited.insert(Cur).second)
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/IfConversion.cpp b/contrib/llvm-project/llvm/lib/CodeGen/IfConversion.cpp
index 105ab908d3fa..936de316e575 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/IfConversion.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/IfConversion.cpp
@@ -2244,6 +2244,15 @@ void IfConverter::MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI, bool AddEdges) {
assert(!FromMBB.hasAddressTaken() &&
"Removing a BB whose address is taken!");
+ // If we're about to splice an INLINEASM_BR from FromBBI, we need to update
+ // ToBBI's successor list accordingly.
+ if (FromMBB.mayHaveInlineAsmBr())
+ for (MachineInstr &MI : FromMBB)
+ if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
+ for (MachineOperand &MO : MI.operands())
+ if (MO.isMBB() && !ToBBI.BB->isSuccessor(MO.getMBB()))
+ ToBBI.BB->addSuccessor(MO.getMBB(), BranchProbability::getZero());
+
// In case FromMBB contains terminators (e.g. return instruction),
// first move the non-terminator instructions, then the terminators.
MachineBasicBlock::iterator FromTI = FromMBB.getFirstTerminator();
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 0a3ebd73d272..d9cde609e599 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -10447,7 +10447,8 @@ SDValue DAGCombiner::combineMinNumMaxNum(const SDLoc &DL, EVT VT, SDValue LHS,
if (NegRHS == False) {
SDValue Combined = combineMinNumMaxNumImpl(DL, VT, LHS, RHS, NegTrue,
False, CC, TLI, DAG);
- return DAG.getNode(ISD::FNEG, DL, VT, Combined);
+ if (Combined)
+ return DAG.getNode(ISD::FNEG, DL, VT, Combined);
}
}
}
@@ -12453,7 +12454,8 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG) {
SDValue N00 = N0.getOperand(0);
EVT ExtVT = cast<VTSDNode>(N0->getOperand(1))->getVT();
- if (N00.getOpcode() == ISD::TRUNCATE && (!LegalOperations || TLI.isTypeLegal(ExtVT))) {
+ if (N00.getOpcode() == ISD::TRUNCATE &&
+ (!LegalTypes || TLI.isTypeLegal(ExtVT))) {
SDValue T = DAG.getNode(ISD::TRUNCATE, DL, ExtVT, N00.getOperand(0));
return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, T);
}
@@ -21359,10 +21361,9 @@ static SDValue reduceBuildVecToShuffleWithZero(SDNode *BV, SelectionDAG &DAG) {
// the source vector. The high bits map to zero. We will use a zero vector
// as the 2nd source operand of the shuffle, so use the 1st element of
// that vector (mask value is number-of-elements) for the high bits.
- if (i % ZextRatio == 0)
- ShufMask[i] = Extract.getConstantOperandVal(1);
- else
- ShufMask[i] = NumMaskElts;
+ int Low = DAG.getDataLayout().isBigEndian() ? (ZextRatio - 1) : 0;
+ ShufMask[i] = (i % ZextRatio == Low) ? Extract.getConstantOperandVal(1)
+ : NumMaskElts;
}
// Undef elements of the build vector remain undef because we initialize
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp b/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp
index b62374320d75..fbdff4a79741 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -724,7 +724,9 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
// with the Target-specific changes necessary.
MaxAtomicSizeInBitsSupported = 1024;
- MaxDivRemBitWidthSupported = llvm::IntegerType::MAX_INT_BITS;
+ // Assume that even with libcalls, no target supports wider than 128 bit
+ // division.
+ MaxDivRemBitWidthSupported = 128;
MaxLargeFPConvertBitWidthSupported = llvm::IntegerType::MAX_INT_BITS;
diff --git a/contrib/llvm-project/llvm/lib/DebugInfo/LogicalView/Core/LVSupport.cpp b/contrib/llvm-project/llvm/lib/DebugInfo/LogicalView/Core/LVSupport.cpp
index 9fa1f28eb089..6d55b755ed46 100644
--- a/contrib/llvm-project/llvm/lib/DebugInfo/LogicalView/Core/LVSupport.cpp
+++ b/contrib/llvm-project/llvm/lib/DebugInfo/LogicalView/Core/LVSupport.cpp
@@ -20,6 +20,12 @@ using namespace llvm::logicalview;
#define DEBUG_TYPE "Support"
+namespace {
+// Unique string pool instance used by all logical readers.
+LVStringPool StringPool;
+} // namespace
+LVStringPool &llvm::logicalview::getStringPool() { return StringPool; }
+
// Perform the following transformations to the given 'Path':
// - all characters to lowercase.
// - '\\' into '/' (Platform independent).
diff --git a/contrib/llvm-project/llvm/lib/DebugInfo/Symbolize/Symbolize.cpp b/contrib/llvm-project/llvm/lib/DebugInfo/Symbolize/Symbolize.cpp
index 2a07794c7ddf..5ac861b32cd9 100644
--- a/contrib/llvm-project/llvm/lib/DebugInfo/Symbolize/Symbolize.cpp
+++ b/contrib/llvm-project/llvm/lib/DebugInfo/Symbolize/Symbolize.cpp
@@ -655,22 +655,29 @@ namespace {
// vectorcall - foo@@12
// These are all different linkage names for 'foo'.
StringRef demanglePE32ExternCFunc(StringRef SymbolName) {
- // Remove any '_' or '@' prefix.
char Front = SymbolName.empty() ? '\0' : SymbolName[0];
- if (Front == '_' || Front == '@')
- SymbolName = SymbolName.drop_front();
// Remove any '@[0-9]+' suffix.
+ bool HasAtNumSuffix = false;
if (Front != '?') {
size_t AtPos = SymbolName.rfind('@');
if (AtPos != StringRef::npos &&
- all_of(drop_begin(SymbolName, AtPos + 1), isDigit))
+ all_of(drop_begin(SymbolName, AtPos + 1), isDigit)) {
SymbolName = SymbolName.substr(0, AtPos);
+ HasAtNumSuffix = true;
+ }
}
// Remove any ending '@' for vectorcall.
- if (SymbolName.endswith("@"))
+ bool IsVectorCall = false;
+ if (HasAtNumSuffix && SymbolName.endswith("@")) {
SymbolName = SymbolName.drop_back();
+ IsVectorCall = true;
+ }
+
+ // If not vectorcall, remove any '_' or '@' prefix.
+ if (!IsVectorCall && (Front == '_' || Front == '@'))
+ SymbolName = SymbolName.drop_front();
return SymbolName;
}
@@ -698,8 +705,14 @@ LLVMSymbolizer::DemangleName(const std::string &Name,
return Result;
}
- if (DbiModuleDescriptor && DbiModuleDescriptor->isWin32Module())
- return std::string(demanglePE32ExternCFunc(Name));
+ if (DbiModuleDescriptor && DbiModuleDescriptor->isWin32Module()) {
+ std::string DemangledCName(demanglePE32ExternCFunc(Name));
+ // On i386 Windows, the C name mangling for different calling conventions
+ // may also be applied on top of the Itanium or Rust name mangling.
+ if (nonMicrosoftDemangle(DemangledCName.c_str(), Result))
+ return Result;
+ return DemangledCName;
+ }
return Name;
}
diff --git a/contrib/llvm-project/llvm/lib/Debuginfod/Debuginfod.cpp b/contrib/llvm-project/llvm/lib/Debuginfod/Debuginfod.cpp
index 026f118bbf5b..2b0710b536ba 100644
--- a/contrib/llvm-project/llvm/lib/Debuginfod/Debuginfod.cpp
+++ b/contrib/llvm-project/llvm/lib/Debuginfod/Debuginfod.cpp
@@ -55,7 +55,11 @@ static std::string buildIDToString(BuildIDRef ID) {
return llvm::toHex(ID, /*LowerCase=*/true);
}
-Expected<SmallVector<StringRef>> getDefaultDebuginfodUrls() {
+bool canUseDebuginfod() {
+ return HTTPClient::isAvailable() && !getDefaultDebuginfodUrls().empty();
+}
+
+SmallVector<StringRef> getDefaultDebuginfodUrls() {
const char *DebuginfodUrlsEnv = std::getenv("DEBUGINFOD_URLS");
if (DebuginfodUrlsEnv == nullptr)
return SmallVector<StringRef>();
@@ -126,13 +130,8 @@ Expected<std::string> getCachedOrDownloadArtifact(StringRef UniqueKey,
return CacheDirOrErr.takeError();
CacheDir = *CacheDirOrErr;
- Expected<SmallVector<StringRef>> DebuginfodUrlsOrErr =
- getDefaultDebuginfodUrls();
- if (!DebuginfodUrlsOrErr)
- return DebuginfodUrlsOrErr.takeError();
- SmallVector<StringRef> &DebuginfodUrls = *DebuginfodUrlsOrErr;
return getCachedOrDownloadArtifact(UniqueKey, UrlPath, CacheDir,
- DebuginfodUrls,
+ getDefaultDebuginfodUrls(),
getDefaultDebuginfodTimeout());
}
@@ -159,7 +158,8 @@ public:
Error StreamedHTTPResponseHandler::handleBodyChunk(StringRef BodyChunk) {
if (!FileStream) {
- if (Client.responseCode() != 200)
+ unsigned Code = Client.responseCode();
+ if (Code && Code != 200)
return Error::success();
Expected<std::unique_ptr<CachedFileStream>> FileStreamOrError =
CreateStream();
@@ -259,7 +259,8 @@ Expected<std::string> getCachedOrDownloadArtifact(
if (Err)
return std::move(Err);
- if (Client.responseCode() != 200)
+ unsigned Code = Client.responseCode();
+ if (Code && Code != 200)
continue;
// Return the path to the artifact on disk.
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp
index 567d5a4dd47a..0ad3b7235e87 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp
@@ -552,6 +552,7 @@ void link_ELF_aarch64(std::unique_ptr<LinkGraph> G,
Config.PrePrunePasses.push_back(EHFrameEdgeFixer(
".eh_frame", 8, aarch64::Pointer32, aarch64::Pointer64,
aarch64::Delta32, aarch64::Delta64, aarch64::NegDelta32));
+ Config.PrePrunePasses.push_back(EHFrameNullTerminator(".eh_frame"));
// Add a mark-live pass.
if (auto MarkLive = Ctx->getMarkLivePass(TT))
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Layer.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Layer.cpp
index 95380d912392..3368d3276cb3 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Layer.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/Orc/Layer.cpp
@@ -125,6 +125,10 @@ void IRMaterializationUnit::discard(const JITDylib &JD,
assert(!I->second->isDeclaration() &&
"Discard should only apply to definitions");
I->second->setLinkage(GlobalValue::AvailableExternallyLinkage);
+ // According to the IR verifier, "Declaration[s] may not be in a Comdat!"
+ // Remove it, if this is a GlobalObject.
+ if (auto *GO = dyn_cast<GlobalObject>(I->second))
+ GO->setComdat(nullptr);
SymbolToDefinition.erase(I);
}
diff --git a/contrib/llvm-project/llvm/lib/IR/AutoUpgrade.cpp b/contrib/llvm-project/llvm/lib/IR/AutoUpgrade.cpp
index 7b9c55ff30a5..9342e10b5eda 100644
--- a/contrib/llvm-project/llvm/lib/IR/AutoUpgrade.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/AutoUpgrade.cpp
@@ -26,6 +26,7 @@
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/IntrinsicsARM.h"
+#include "llvm/IR/IntrinsicsWebAssembly.h"
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
@@ -1125,6 +1126,40 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
break;
}
+ case 'w':
+ if (Name.startswith("wasm.fma.")) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(
+ F->getParent(), Intrinsic::wasm_relaxed_madd, F->getReturnType());
+ return true;
+ }
+ if (Name.startswith("wasm.fms.")) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(
+ F->getParent(), Intrinsic::wasm_relaxed_nmadd, F->getReturnType());
+ return true;
+ }
+ if (Name.startswith("wasm.laneselect.")) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(
+ F->getParent(), Intrinsic::wasm_relaxed_laneselect,
+ F->getReturnType());
+ return true;
+ }
+ if (Name == "wasm.dot.i8x16.i7x16.signed") {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(
+ F->getParent(), Intrinsic::wasm_relaxed_dot_i8x16_i7x16_signed);
+ return true;
+ }
+ if (Name == "wasm.dot.i8x16.i7x16.add.signed") {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(
+ F->getParent(), Intrinsic::wasm_relaxed_dot_i8x16_i7x16_add_signed);
+ return true;
+ }
+ break;
+
case 'x':
if (UpgradeX86IntrinsicFunction(F, Name, NewFn))
return true;
diff --git a/contrib/llvm-project/llvm/lib/IR/Verifier.cpp b/contrib/llvm-project/llvm/lib/IR/Verifier.cpp
index 83e42bc184ff..2d62d31cf6b4 100644
--- a/contrib/llvm-project/llvm/lib/IR/Verifier.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/Verifier.cpp
@@ -730,9 +730,6 @@ void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
GV.getName() == "llvm.global_dtors")) {
Check(!GV.hasInitializer() || GV.hasAppendingLinkage(),
"invalid linkage for intrinsic global variable", &GV);
- Check(GV.materialized_use_empty(),
- "invalid uses of intrinsic global variable", &GV);
-
// Don't worry about emitting an error for it not being an array,
// visitGlobalValue will complain on appending non-array.
if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
@@ -759,9 +756,6 @@ void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
GV.getName() == "llvm.compiler.used")) {
Check(!GV.hasInitializer() || GV.hasAppendingLinkage(),
"invalid linkage for intrinsic global variable", &GV);
- Check(GV.materialized_use_empty(),
- "invalid uses of intrinsic global variable", &GV);
-
Type *GVType = GV.getValueType();
if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
diff --git a/contrib/llvm-project/llvm/lib/Object/ELFObjectFile.cpp b/contrib/llvm-project/llvm/lib/Object/ELFObjectFile.cpp
index ebc57bd04be7..c6d536188391 100644
--- a/contrib/llvm-project/llvm/lib/Object/ELFObjectFile.cpp
+++ b/contrib/llvm-project/llvm/lib/Object/ELFObjectFile.cpp
@@ -303,12 +303,7 @@ Expected<SubtargetFeatures> ELFObjectFileBase::getRISCVFeatures() const {
std::optional<StringRef> Attr =
Attributes.getAttributeString(RISCVAttrs::ARCH);
if (Attr) {
- // Suppress version checking for experimental extensions to prevent erroring
- // when getting any unknown version of experimental extension.
- auto ParseResult = RISCVISAInfo::parseArchString(
- *Attr, /*EnableExperimentalExtension=*/true,
- /*ExperimentalExtensionVersionCheck=*/false,
- /*IgnoreUnknown=*/true);
+ auto ParseResult = RISCVISAInfo::parseNormalizedArchString(*Attr);
if (!ParseResult)
return ParseResult.takeError();
auto &ISAInfo = *ParseResult;
diff --git a/contrib/llvm-project/llvm/lib/Passes/PassBuilderPipelines.cpp b/contrib/llvm-project/llvm/lib/Passes/PassBuilderPipelines.cpp
index 0762c535f7f5..0d074951cffc 100644
--- a/contrib/llvm-project/llvm/lib/Passes/PassBuilderPipelines.cpp
+++ b/contrib/llvm-project/llvm/lib/Passes/PassBuilderPipelines.cpp
@@ -1604,7 +1604,7 @@ PassBuilder::buildLTODefaultPipeline(OptimizationLevel Level,
}
// Try to run OpenMP optimizations, quick no-op if no OpenMP metadata present.
- MPM.addPass(OpenMPOptPass());
+ MPM.addPass(OpenMPOptPass(ThinOrFullLTOPhase::FullLTOPostLink));
// Remove unused virtual tables to improve the quality of code generated by
// whole-program devirtualization and bitset lowering.
@@ -1685,6 +1685,9 @@ PassBuilder::buildLTODefaultPipeline(OptimizationLevel Level,
// keep one copy of each constant.
MPM.addPass(ConstantMergePass());
+ // Remove unused arguments from functions.
+ MPM.addPass(DeadArgumentEliminationPass());
+
// Reduce the code after globalopt and ipsccp. Both can open up significant
// simplification opportunities, and both can propagate functions through
// function pointers. When this happens, we often have to resolve varargs
@@ -1712,6 +1715,9 @@ PassBuilder::buildLTODefaultPipeline(OptimizationLevel Level,
// Optimize globals again after we ran the inliner.
MPM.addPass(GlobalOptPass());
+ // Run the OpenMPOpt pass again after global optimizations.
+ MPM.addPass(OpenMPOptPass(ThinOrFullLTOPhase::FullLTOPostLink));
+
// Garbage collect dead functions.
MPM.addPass(GlobalDCEPass());
@@ -1719,9 +1725,6 @@ PassBuilder::buildLTODefaultPipeline(OptimizationLevel Level,
// transform it to pass arguments by value instead of by reference.
MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(ArgumentPromotionPass()));
- // Remove unused arguments from functions.
- MPM.addPass(DeadArgumentEliminationPass());
-
FunctionPassManager FPM;
// The IPO Passes may leave cruft around. Clean up after them.
FPM.addPass(InstCombinePass());
@@ -1808,7 +1811,8 @@ PassBuilder::buildLTODefaultPipeline(OptimizationLevel Level,
addVectorPasses(Level, MainFPM, /* IsFullLTO */ true);
// Run the OpenMPOpt CGSCC pass again late.
- MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(OpenMPOptCGSCCPass()));
+ MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(
+ OpenMPOptCGSCCPass(ThinOrFullLTOPhase::FullLTOPostLink)));
invokePeepholeEPCallbacks(MainFPM, Level);
MainFPM.addPass(JumpThreadingPass());
diff --git a/contrib/llvm-project/llvm/lib/Passes/PassRegistry.def b/contrib/llvm-project/llvm/lib/Passes/PassRegistry.def
index ad44d86ea1a7..10af4160c545 100644
--- a/contrib/llvm-project/llvm/lib/Passes/PassRegistry.def
+++ b/contrib/llvm-project/llvm/lib/Passes/PassRegistry.def
@@ -44,6 +44,7 @@ MODULE_PASS("always-inline", AlwaysInlinerPass())
MODULE_PASS("attributor", AttributorPass())
MODULE_PASS("annotation2metadata", Annotation2MetadataPass())
MODULE_PASS("openmp-opt", OpenMPOptPass())
+MODULE_PASS("openmp-opt-postlink", OpenMPOptPass(ThinOrFullLTOPhase::FullLTOPostLink))
MODULE_PASS("called-value-propagation", CalledValuePropagationPass())
MODULE_PASS("canonicalize-aliases", CanonicalizeAliasesPass())
MODULE_PASS("cg-profile", CGProfilePass())
diff --git a/contrib/llvm-project/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp b/contrib/llvm-project/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp
index 6113f78aeb4e..ce71eebd4fd3 100644
--- a/contrib/llvm-project/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp
+++ b/contrib/llvm-project/llvm/lib/ProfileData/Coverage/CoverageMapping.cpp
@@ -17,6 +17,7 @@
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Object/BuildID.h"
#include "llvm/ProfileData/Coverage/CoverageMappingReader.h"
#include "llvm/ProfileData/InstrProfReader.h"
#include "llvm/Support/Debug.h"
@@ -342,10 +343,49 @@ static Error handleMaybeNoDataFoundError(Error E) {
});
}
+Error CoverageMapping::loadFromFile(
+ StringRef Filename, StringRef Arch, StringRef CompilationDir,
+ IndexedInstrProfReader &ProfileReader, CoverageMapping &Coverage,
+ bool &DataFound, SmallVectorImpl<object::BuildID> *FoundBinaryIDs) {
+ auto CovMappingBufOrErr = MemoryBuffer::getFileOrSTDIN(
+ Filename, /*IsText=*/false, /*RequiresNullTerminator=*/false);
+ if (std::error_code EC = CovMappingBufOrErr.getError())
+ return createFileError(Filename, errorCodeToError(EC));
+ MemoryBufferRef CovMappingBufRef =
+ CovMappingBufOrErr.get()->getMemBufferRef();
+ SmallVector<std::unique_ptr<MemoryBuffer>, 4> Buffers;
+
+ SmallVector<object::BuildIDRef> BinaryIDs;
+ auto CoverageReadersOrErr = BinaryCoverageReader::create(
+ CovMappingBufRef, Arch, Buffers, CompilationDir,
+ FoundBinaryIDs ? &BinaryIDs : nullptr);
+ if (Error E = CoverageReadersOrErr.takeError()) {
+ E = handleMaybeNoDataFoundError(std::move(E));
+ if (E)
+ return createFileError(Filename, std::move(E));
+ return E;
+ }
+
+ SmallVector<std::unique_ptr<CoverageMappingReader>, 4> Readers;
+ for (auto &Reader : CoverageReadersOrErr.get())
+ Readers.push_back(std::move(Reader));
+ if (FoundBinaryIDs && !Readers.empty()) {
+ llvm::append_range(*FoundBinaryIDs,
+ llvm::map_range(BinaryIDs, [](object::BuildIDRef BID) {
+ return object::BuildID(BID);
+ }));
+ }
+ DataFound |= !Readers.empty();
+ if (Error E = loadFromReaders(Readers, ProfileReader, Coverage))
+ return createFileError(Filename, std::move(E));
+ return Error::success();
+}
+
Expected<std::unique_ptr<CoverageMapping>>
CoverageMapping::load(ArrayRef<StringRef> ObjectFilenames,
StringRef ProfileFilename, ArrayRef<StringRef> Arches,
- StringRef CompilationDir) {
+ StringRef CompilationDir,
+ const object::BuildIDFetcher *BIDFetcher) {
auto ProfileReaderOrErr = IndexedInstrProfReader::create(ProfileFilename);
if (Error E = ProfileReaderOrErr.takeError())
return createFileError(ProfileFilename, std::move(E));
@@ -353,35 +393,53 @@ CoverageMapping::load(ArrayRef<StringRef> ObjectFilenames,
auto Coverage = std::unique_ptr<CoverageMapping>(new CoverageMapping());
bool DataFound = false;
+ auto GetArch = [&](size_t Idx) {
+ if (Arches.empty())
+ return StringRef();
+ if (Arches.size() == 1)
+ return Arches.front();
+ return Arches[Idx];
+ };
+
+ SmallVector<object::BuildID> FoundBinaryIDs;
for (const auto &File : llvm::enumerate(ObjectFilenames)) {
- auto CovMappingBufOrErr = MemoryBuffer::getFileOrSTDIN(
- File.value(), /*IsText=*/false, /*RequiresNullTerminator=*/false);
- if (std::error_code EC = CovMappingBufOrErr.getError())
- return createFileError(File.value(), errorCodeToError(EC));
- StringRef Arch = Arches.empty() ? StringRef() : Arches[File.index()];
- MemoryBufferRef CovMappingBufRef =
- CovMappingBufOrErr.get()->getMemBufferRef();
- SmallVector<std::unique_ptr<MemoryBuffer>, 4> Buffers;
- auto CoverageReadersOrErr = BinaryCoverageReader::create(
- CovMappingBufRef, Arch, Buffers, CompilationDir);
- if (Error E = CoverageReadersOrErr.takeError()) {
- E = handleMaybeNoDataFoundError(std::move(E));
- if (E)
- return createFileError(File.value(), std::move(E));
- // E == success (originally a no_data_found error).
- continue;
+ if (Error E =
+ loadFromFile(File.value(), GetArch(File.index()), CompilationDir,
+ *ProfileReader, *Coverage, DataFound, &FoundBinaryIDs))
+ return std::move(E);
+ }
+
+ if (BIDFetcher) {
+ std::vector<object::BuildID> ProfileBinaryIDs;
+ if (Error E = ProfileReader->readBinaryIds(ProfileBinaryIDs))
+ return createFileError(ProfileFilename, std::move(E));
+
+ SmallVector<object::BuildIDRef> BinaryIDsToFetch;
+ if (!ProfileBinaryIDs.empty()) {
+ const auto &Compare = [](object::BuildIDRef A, object::BuildIDRef B) {
+ return std::lexicographical_compare(A.begin(), A.end(), B.begin(),
+ B.end());
+ };
+ llvm::sort(FoundBinaryIDs, Compare);
+ std::set_difference(
+ ProfileBinaryIDs.begin(), ProfileBinaryIDs.end(),
+ FoundBinaryIDs.begin(), FoundBinaryIDs.end(),
+ std::inserter(BinaryIDsToFetch, BinaryIDsToFetch.end()), Compare);
}
- SmallVector<std::unique_ptr<CoverageMappingReader>, 4> Readers;
- for (auto &Reader : CoverageReadersOrErr.get())
- Readers.push_back(std::move(Reader));
- DataFound |= !Readers.empty();
- if (Error E = loadFromReaders(Readers, *ProfileReader, *Coverage))
- return createFileError(File.value(), std::move(E));
+ for (object::BuildIDRef BinaryID : BinaryIDsToFetch) {
+ std::optional<std::string> PathOpt = BIDFetcher->fetch(BinaryID);
+ if (!PathOpt)
+ continue;
+ std::string Path = std::move(*PathOpt);
+ StringRef Arch = Arches.size() == 1 ? Arches.front() : StringRef();
+ if (Error E = loadFromFile(Path, Arch, CompilationDir, *ProfileReader,
+ *Coverage, DataFound))
+ return std::move(E);
+ }
}
- // If no readers were created, either no objects were provided or none of them
- // had coverage data. Return an error in the latter case.
- if (!DataFound && !ObjectFilenames.empty())
+
+ if (!DataFound)
return createFileError(
join(ObjectFilenames.begin(), ObjectFilenames.end(), ", "),
make_error<CoverageMapError>(coveragemap_error::no_data_found));
diff --git a/contrib/llvm-project/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp b/contrib/llvm-project/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp
index 41962ab24ff9..d313864e2ddb 100644
--- a/contrib/llvm-project/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp
+++ b/contrib/llvm-project/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp
@@ -954,7 +954,8 @@ static Expected<std::vector<SectionRef>> lookupSections(ObjectFile &OF,
static Expected<std::unique_ptr<BinaryCoverageReader>>
loadBinaryFormat(std::unique_ptr<Binary> Bin, StringRef Arch,
- StringRef CompilationDir = "") {
+ StringRef CompilationDir = "",
+ std::optional<object::BuildIDRef> *BinaryID = nullptr) {
std::unique_ptr<ObjectFile> OF;
if (auto *Universal = dyn_cast<MachOUniversalBinary>(Bin.get())) {
// If we have a universal binary, try to look up the object for the
@@ -1052,6 +1053,9 @@ loadBinaryFormat(std::unique_ptr<Binary> Bin, StringRef Arch,
FuncRecords = std::move(WritableBuffer);
}
+ if (BinaryID)
+ *BinaryID = getBuildID(OF.get());
+
return BinaryCoverageReader::createCoverageReaderFromBuffer(
CoverageMapping, std::move(FuncRecords), std::move(ProfileNames),
BytesInAddress, Endian, CompilationDir);
@@ -1074,7 +1078,7 @@ Expected<std::vector<std::unique_ptr<BinaryCoverageReader>>>
BinaryCoverageReader::create(
MemoryBufferRef ObjectBuffer, StringRef Arch,
SmallVectorImpl<std::unique_ptr<MemoryBuffer>> &ObjectFileBuffers,
- StringRef CompilationDir) {
+ StringRef CompilationDir, SmallVectorImpl<object::BuildIDRef> *BinaryIDs) {
std::vector<std::unique_ptr<BinaryCoverageReader>> Readers;
if (ObjectBuffer.getBuffer().startswith(TestingFormatMagic)) {
@@ -1114,7 +1118,7 @@ BinaryCoverageReader::create(
return BinaryCoverageReader::create(
ArchiveOrErr.get()->getMemoryBufferRef(), Arch, ObjectFileBuffers,
- CompilationDir);
+ CompilationDir, BinaryIDs);
}
}
@@ -1127,7 +1131,8 @@ BinaryCoverageReader::create(
return ChildBufOrErr.takeError();
auto ChildReadersOrErr = BinaryCoverageReader::create(
- ChildBufOrErr.get(), Arch, ObjectFileBuffers, CompilationDir);
+ ChildBufOrErr.get(), Arch, ObjectFileBuffers, CompilationDir,
+ BinaryIDs);
if (!ChildReadersOrErr)
return ChildReadersOrErr.takeError();
for (auto &Reader : ChildReadersOrErr.get())
@@ -1146,10 +1151,14 @@ BinaryCoverageReader::create(
return std::move(Readers);
}
- auto ReaderOrErr = loadBinaryFormat(std::move(Bin), Arch, CompilationDir);
+ std::optional<object::BuildIDRef> BinaryID;
+ auto ReaderOrErr = loadBinaryFormat(std::move(Bin), Arch, CompilationDir,
+ BinaryIDs ? &BinaryID : nullptr);
if (!ReaderOrErr)
return ReaderOrErr.takeError();
Readers.push_back(std::move(ReaderOrErr.get()));
+ if (BinaryID)
+ BinaryIDs->push_back(*BinaryID);
return std::move(Readers);
}
diff --git a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx2_x86-64_unix.S b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx2_x86-64_unix.S
index 1da950cb4f7d..e05d3c5a99db 100644
--- a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx2_x86-64_unix.S
+++ b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx2_x86-64_unix.S
@@ -1,5 +1,7 @@
#if defined(__x86_64__)
+#include "llvm_blake3_prefix.h"
+
#if defined(__ELF__) && (defined(__linux__) || defined(__FreeBSD__))
.section .note.GNU-stack,"",%progbits
#endif
diff --git a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx2_x86-64_windows_gnu.S b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx2_x86-64_windows_gnu.S
index bb58d2ae64b1..5ad1c641a7fc 100644
--- a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx2_x86-64_windows_gnu.S
+++ b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx2_x86-64_windows_gnu.S
@@ -1,3 +1,5 @@
+#include "llvm_blake3_prefix.h"
+
.intel_syntax noprefix
.global _blake3_hash_many_avx2
.global blake3_hash_many_avx2
diff --git a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx2_x86-64_windows_msvc.asm b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx2_x86-64_windows_msvc.asm
index 352298edd2e8..46bad1d98f38 100644
--- a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx2_x86-64_windows_msvc.asm
+++ b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx2_x86-64_windows_msvc.asm
@@ -1,11 +1,11 @@
-public _blake3_hash_many_avx2
-public blake3_hash_many_avx2
+public _llvm_blake3_hash_many_avx2
+public llvm_blake3_hash_many_avx2
_TEXT SEGMENT ALIGN(16) 'CODE'
ALIGN 16
-blake3_hash_many_avx2 PROC
-_blake3_hash_many_avx2 PROC
+llvm_blake3_hash_many_avx2 PROC
+_llvm_blake3_hash_many_avx2 PROC
push r15
push r14
push r13
@@ -1785,8 +1785,8 @@ endroundloop1:
vmovdqu xmmword ptr [rbx+10H], xmm1
jmp unwind
-_blake3_hash_many_avx2 ENDP
-blake3_hash_many_avx2 ENDP
+_llvm_blake3_hash_many_avx2 ENDP
+llvm_blake3_hash_many_avx2 ENDP
_TEXT ENDS
_RDATA SEGMENT READONLY PAGE ALIAS(".rdata") 'CONST'
diff --git a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx512_x86-64_unix.S b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx512_x86-64_unix.S
index df92148fd46c..a7931e81035e 100644
--- a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx512_x86-64_unix.S
+++ b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx512_x86-64_unix.S
@@ -1,5 +1,7 @@
#if defined(__x86_64__)
+#include "llvm_blake3_prefix.h"
+
#if defined(__ELF__) && (defined(__linux__) || defined(__FreeBSD__))
.section .note.GNU-stack,"",%progbits
#endif
diff --git a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx512_x86-64_windows_gnu.S b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx512_x86-64_windows_gnu.S
index e10b9f36cbcc..53c586141fbe 100644
--- a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx512_x86-64_windows_gnu.S
+++ b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx512_x86-64_windows_gnu.S
@@ -1,3 +1,5 @@
+#include "llvm_blake3_prefix.h"
+
.intel_syntax noprefix
.global _blake3_hash_many_avx512
diff --git a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx512_x86-64_windows_msvc.asm b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx512_x86-64_windows_msvc.asm
index b19efbaaeb36..f13d1b260ab8 100644
--- a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx512_x86-64_windows_msvc.asm
+++ b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_avx512_x86-64_windows_msvc.asm
@@ -1,15 +1,15 @@
-public _blake3_hash_many_avx512
-public blake3_hash_many_avx512
-public blake3_compress_in_place_avx512
-public _blake3_compress_in_place_avx512
-public blake3_compress_xof_avx512
-public _blake3_compress_xof_avx512
+public _llvm_blake3_hash_many_avx512
+public llvm_blake3_hash_many_avx512
+public llvm_blake3_compress_in_place_avx512
+public _llvm_blake3_compress_in_place_avx512
+public llvm_blake3_compress_xof_avx512
+public _llvm_blake3_compress_xof_avx512
_TEXT SEGMENT ALIGN(16) 'CODE'
ALIGN 16
-blake3_hash_many_avx512 PROC
-_blake3_hash_many_avx512 PROC
+llvm_blake3_hash_many_avx512 PROC
+_llvm_blake3_hash_many_avx512 PROC
push r15
push r14
push r13
@@ -2404,12 +2404,12 @@ endroundloop1:
vmovdqu xmmword ptr [rbx+10H], xmm1
jmp unwind
-_blake3_hash_many_avx512 ENDP
-blake3_hash_many_avx512 ENDP
+_llvm_blake3_hash_many_avx512 ENDP
+llvm_blake3_hash_many_avx512 ENDP
ALIGN 16
-blake3_compress_in_place_avx512 PROC
-_blake3_compress_in_place_avx512 PROC
+llvm_blake3_compress_in_place_avx512 PROC
+_llvm_blake3_compress_in_place_avx512 PROC
sub rsp, 72
vmovdqa xmmword ptr [rsp], xmm6
vmovdqa xmmword ptr [rsp+10H], xmm7
@@ -2498,12 +2498,12 @@ _blake3_compress_in_place_avx512 PROC
vmovdqa xmm9, xmmword ptr [rsp+30H]
add rsp, 72
ret
-_blake3_compress_in_place_avx512 ENDP
-blake3_compress_in_place_avx512 ENDP
+_llvm_blake3_compress_in_place_avx512 ENDP
+llvm_blake3_compress_in_place_avx512 ENDP
ALIGN 16
-blake3_compress_xof_avx512 PROC
-_blake3_compress_xof_avx512 PROC
+llvm_blake3_compress_xof_avx512 PROC
+_llvm_blake3_compress_xof_avx512 PROC
sub rsp, 72
vmovdqa xmmword ptr [rsp], xmm6
vmovdqa xmmword ptr [rsp+10H], xmm7
@@ -2597,8 +2597,8 @@ _blake3_compress_xof_avx512 PROC
vmovdqa xmm9, xmmword ptr [rsp+30H]
add rsp, 72
ret
-_blake3_compress_xof_avx512 ENDP
-blake3_compress_xof_avx512 ENDP
+_llvm_blake3_compress_xof_avx512 ENDP
+llvm_blake3_compress_xof_avx512 ENDP
_TEXT ENDS
diff --git a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_impl.h b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_impl.h
index 180d0a6eeda8..8e5456d745cd 100644
--- a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_impl.h
+++ b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_impl.h
@@ -11,15 +11,7 @@
// For \p LLVM_LIBRARY_VISIBILITY
#include "llvm/Support/Compiler.h"
-// Remove the 'llvm_' prefix for the rest of the internal implementation.
-#define BLAKE3_VERSION_STRING LLVM_BLAKE3_VERSION_STRING
-#define BLAKE3_KEY_LEN LLVM_BLAKE3_KEY_LEN
-#define BLAKE3_OUT_LEN LLVM_BLAKE3_OUT_LEN
-#define BLAKE3_BLOCK_LEN LLVM_BLAKE3_BLOCK_LEN
-#define BLAKE3_CHUNK_LEN LLVM_BLAKE3_CHUNK_LEN
-#define BLAKE3_MAX_DEPTH LLVM_BLAKE3_MAX_DEPTH
-#define blake3_hasher llvm_blake3_hasher
-#define blake3_chunk_state llvm_blake3_chunk_state
+#include "llvm_blake3_prefix.h"
// internal flags
enum blake3_flags {
diff --git a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse2_x86-64_unix.S b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse2_x86-64_unix.S
index e058faf0df2f..765cd31754b9 100644
--- a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse2_x86-64_unix.S
+++ b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse2_x86-64_unix.S
@@ -1,5 +1,7 @@
#if defined(__x86_64__)
+#include "llvm_blake3_prefix.h"
+
#if defined(__ELF__) && (defined(__linux__) || defined(__FreeBSD__))
.section .note.GNU-stack,"",%progbits
#endif
diff --git a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse2_x86-64_windows_gnu.S b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse2_x86-64_windows_gnu.S
index 8852ba5976e1..bf3b4523a9f1 100644
--- a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse2_x86-64_windows_gnu.S
+++ b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse2_x86-64_windows_gnu.S
@@ -1,3 +1,5 @@
+#include "llvm_blake3_prefix.h"
+
.intel_syntax noprefix
.global blake3_hash_many_sse2
.global _blake3_hash_many_sse2
diff --git a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse2_x86-64_windows_msvc.asm b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse2_x86-64_windows_msvc.asm
index 507502f11a80..1069c8df4ed6 100644
--- a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse2_x86-64_windows_msvc.asm
+++ b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse2_x86-64_windows_msvc.asm
@@ -1,15 +1,15 @@
-public _blake3_hash_many_sse2
-public blake3_hash_many_sse2
-public blake3_compress_in_place_sse2
-public _blake3_compress_in_place_sse2
-public blake3_compress_xof_sse2
-public _blake3_compress_xof_sse2
+public _llvm_blake3_hash_many_sse2
+public llvm_blake3_hash_many_sse2
+public llvm_blake3_compress_in_place_sse2
+public _llvm_blake3_compress_in_place_sse2
+public llvm_blake3_compress_xof_sse2
+public _llvm_blake3_compress_xof_sse2
_TEXT SEGMENT ALIGN(16) 'CODE'
ALIGN 16
-blake3_hash_many_sse2 PROC
-_blake3_hash_many_sse2 PROC
+llvm_blake3_hash_many_sse2 PROC
+_llvm_blake3_hash_many_sse2 PROC
push r15
push r14
push r13
@@ -2034,11 +2034,11 @@ endroundloop1:
movups xmmword ptr [rbx], xmm0
movups xmmword ptr [rbx+10H], xmm1
jmp unwind
-_blake3_hash_many_sse2 ENDP
-blake3_hash_many_sse2 ENDP
+_llvm_blake3_hash_many_sse2 ENDP
+llvm_blake3_hash_many_sse2 ENDP
-blake3_compress_in_place_sse2 PROC
-_blake3_compress_in_place_sse2 PROC
+llvm_blake3_compress_in_place_sse2 PROC
+_llvm_blake3_compress_in_place_sse2 PROC
sub rsp, 120
movdqa xmmword ptr [rsp], xmm6
movdqa xmmword ptr [rsp+10H], xmm7
@@ -2164,12 +2164,12 @@ _blake3_compress_in_place_sse2 PROC
movdqa xmm15, xmmword ptr [rsp+60H]
add rsp, 120
ret
-_blake3_compress_in_place_sse2 ENDP
-blake3_compress_in_place_sse2 ENDP
+_llvm_blake3_compress_in_place_sse2 ENDP
+llvm_blake3_compress_in_place_sse2 ENDP
ALIGN 16
-blake3_compress_xof_sse2 PROC
-_blake3_compress_xof_sse2 PROC
+llvm_blake3_compress_xof_sse2 PROC
+_llvm_blake3_compress_xof_sse2 PROC
sub rsp, 120
movdqa xmmword ptr [rsp], xmm6
movdqa xmmword ptr [rsp+10H], xmm7
@@ -2302,8 +2302,8 @@ _blake3_compress_xof_sse2 PROC
movdqa xmm15, xmmword ptr [rsp+60H]
add rsp, 120
ret
-_blake3_compress_xof_sse2 ENDP
-blake3_compress_xof_sse2 ENDP
+_llvm_blake3_compress_xof_sse2 ENDP
+llvm_blake3_compress_xof_sse2 ENDP
_TEXT ENDS
diff --git a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse41_x86-64_unix.S b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse41_x86-64_unix.S
index 2f75acf2bbce..70cc27452ede 100644
--- a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse41_x86-64_unix.S
+++ b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse41_x86-64_unix.S
@@ -1,5 +1,7 @@
#if defined(__x86_64__)
+#include "llvm_blake3_prefix.h"
+
#if defined(__ELF__) && (defined(__linux__) || defined(__FreeBSD__))
.section .note.GNU-stack,"",%progbits
#endif
diff --git a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse41_x86-64_windows_gnu.S b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse41_x86-64_windows_gnu.S
index 60d0a4042e71..28bdf3890a29 100644
--- a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse41_x86-64_windows_gnu.S
+++ b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse41_x86-64_windows_gnu.S
@@ -1,3 +1,5 @@
+#include "llvm_blake3_prefix.h"
+
.intel_syntax noprefix
.global blake3_hash_many_sse41
.global _blake3_hash_many_sse41
diff --git a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse41_x86-64_windows_msvc.asm b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse41_x86-64_windows_msvc.asm
index 8966c7b84406..770935372cd9 100644
--- a/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse41_x86-64_windows_msvc.asm
+++ b/contrib/llvm-project/llvm/lib/Support/BLAKE3/blake3_sse41_x86-64_windows_msvc.asm
@@ -1,15 +1,15 @@
-public _blake3_hash_many_sse41
-public blake3_hash_many_sse41
-public blake3_compress_in_place_sse41
-public _blake3_compress_in_place_sse41
-public blake3_compress_xof_sse41
-public _blake3_compress_xof_sse41
+public _llvm_blake3_hash_many_sse41
+public llvm_blake3_hash_many_sse41
+public llvm_blake3_compress_in_place_sse41
+public _llvm_blake3_compress_in_place_sse41
+public llvm_blake3_compress_xof_sse41
+public _llvm_blake3_compress_xof_sse41
_TEXT SEGMENT ALIGN(16) 'CODE'
ALIGN 16
-blake3_hash_many_sse41 PROC
-_blake3_hash_many_sse41 PROC
+llvm_blake3_hash_many_sse41 PROC
+_llvm_blake3_hash_many_sse41 PROC
push r15
push r14
push r13
@@ -1797,11 +1797,11 @@ endroundloop1:
movups xmmword ptr [rbx], xmm0
movups xmmword ptr [rbx+10H], xmm1
jmp unwind
-_blake3_hash_many_sse41 ENDP
-blake3_hash_many_sse41 ENDP
+_llvm_blake3_hash_many_sse41 ENDP
+llvm_blake3_hash_many_sse41 ENDP
-blake3_compress_in_place_sse41 PROC
-_blake3_compress_in_place_sse41 PROC
+llvm_blake3_compress_in_place_sse41 PROC
+_llvm_blake3_compress_in_place_sse41 PROC
sub rsp, 120
movdqa xmmword ptr [rsp], xmm6
movdqa xmmword ptr [rsp+10H], xmm7
@@ -1916,12 +1916,12 @@ _blake3_compress_in_place_sse41 PROC
movdqa xmm15, xmmword ptr [rsp+60H]
add rsp, 120
ret
-_blake3_compress_in_place_sse41 ENDP
-blake3_compress_in_place_sse41 ENDP
+_llvm_blake3_compress_in_place_sse41 ENDP
+llvm_blake3_compress_in_place_sse41 ENDP
ALIGN 16
-blake3_compress_xof_sse41 PROC
-_blake3_compress_xof_sse41 PROC
+llvm_blake3_compress_xof_sse41 PROC
+_llvm_blake3_compress_xof_sse41 PROC
sub rsp, 120
movdqa xmmword ptr [rsp], xmm6
movdqa xmmword ptr [rsp+10H], xmm7
@@ -2043,8 +2043,8 @@ _blake3_compress_xof_sse41 PROC
movdqa xmm15, xmmword ptr [rsp+60H]
add rsp, 120
ret
-_blake3_compress_xof_sse41 ENDP
-blake3_compress_xof_sse41 ENDP
+_llvm_blake3_compress_xof_sse41 ENDP
+llvm_blake3_compress_xof_sse41 ENDP
_TEXT ENDS
diff --git a/contrib/llvm-project/llvm/lib/Support/BLAKE3/llvm_blake3_prefix.h b/contrib/llvm-project/llvm/lib/Support/BLAKE3/llvm_blake3_prefix.h
new file mode 100644
index 000000000000..3cee3691e4cf
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Support/BLAKE3/llvm_blake3_prefix.h
@@ -0,0 +1,41 @@
+#ifndef LLVM_BLAKE3_PREFIX_H
+#define LLVM_BLAKE3_PREFIX_H
+
+#define BLAKE3_VERSION_STRING LLVM_BLAKE3_VERSION_STRING
+#define BLAKE3_KEY_LEN LLVM_BLAKE3_KEY_LEN
+#define BLAKE3_OUT_LEN LLVM_BLAKE3_OUT_LEN
+#define BLAKE3_BLOCK_LEN LLVM_BLAKE3_BLOCK_LEN
+#define BLAKE3_CHUNK_LEN LLVM_BLAKE3_CHUNK_LEN
+#define BLAKE3_MAX_DEPTH LLVM_BLAKE3_MAX_DEPTH
+#define blake3_hasher llvm_blake3_hasher
+#define blake3_chunk_state llvm_blake3_chunk_state
+#define blake3_compress_in_place llvm_blake3_compress_in_place
+#define blake3_compress_xof llvm_blake3_compress_xof
+#define blake3_hash_many llvm_blake3_hash_many
+#define blake3_simd_degree llvm_blake3_simd_degree
+#define blake3_compress_in_place_portable llvm_blake3_compress_in_place_portable
+#define blake3_compress_xof_portable llvm_blake3_compress_xof_portable
+#define blake3_hash_many_portable llvm_blake3_hash_many_portable
+#define blake3_compress_in_place_sse2 llvm_blake3_compress_in_place_sse2
+#define _blake3_compress_in_place_sse2 _llvm_blake3_compress_in_place_sse2
+#define blake3_compress_xof_sse2 llvm_blake3_compress_xof_sse2
+#define _blake3_compress_xof_sse2 _llvm_blake3_compress_xof_sse2
+#define blake3_hash_many_sse2 llvm_blake3_hash_many_sse2
+#define _blake3_hash_many_sse2 _llvm_blake3_hash_many_sse2
+#define blake3_compress_in_place_sse41 llvm_blake3_compress_in_place_sse41
+#define _blake3_compress_in_place_sse41 _llvm_blake3_compress_in_place_sse41
+#define blake3_compress_xof_sse41 llvm_blake3_compress_xof_sse41
+#define _blake3_compress_xof_sse41 _llvm_blake3_compress_xof_sse41
+#define blake3_hash_many_sse41 llvm_blake3_hash_many_sse41
+#define _blake3_hash_many_sse41 _llvm_blake3_hash_many_sse41
+#define blake3_hash_many_avx2 llvm_blake3_hash_many_avx2
+#define _blake3_hash_many_avx2 _llvm_blake3_hash_many_avx2
+#define blake3_compress_in_place_avx512 llvm_blake3_compress_in_place_avx512
+#define _blake3_compress_in_place_avx512 _llvm_blake3_compress_in_place_avx512
+#define blake3_compress_xof_avx512 llvm_blake3_compress_xof_avx512
+#define _blake3_compress_xof_avx512 _llvm_blake3_compress_xof_avx512
+#define blake3_hash_many_avx512 llvm_blake3_hash_many_avx512
+#define _blake3_hash_many_avx512 _llvm_blake3_hash_many_avx512
+#define blake3_hash_many_neon llvm_blake3_hash_many_neon
+
+#endif /* LLVM_BLAKE3_PREFIX_H */
diff --git a/contrib/llvm-project/llvm/lib/Support/Parallel.cpp b/contrib/llvm-project/llvm/lib/Support/Parallel.cpp
index 23ed9d813548..c256d256be4f 100644
--- a/contrib/llvm-project/llvm/lib/Support/Parallel.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/Parallel.cpp
@@ -214,8 +214,12 @@ void llvm::parallelFor(size_t Begin, size_t End,
Fn(I);
});
}
- for (; Begin != End; ++Begin)
- Fn(Begin);
+ if (Begin != End) {
+ TG.spawn([=, &Fn] {
+ for (size_t I = Begin; I != End; ++I)
+ Fn(I);
+ });
+ }
return;
}
#endif
diff --git a/contrib/llvm-project/llvm/lib/Support/RISCVISAInfo.cpp b/contrib/llvm-project/llvm/lib/Support/RISCVISAInfo.cpp
index 1b1bff023d2f..7cb1147d4265 100644
--- a/contrib/llvm-project/llvm/lib/Support/RISCVISAInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/RISCVISAInfo.cpp
@@ -503,6 +503,67 @@ RISCVISAInfo::parseFeatures(unsigned XLen,
}
llvm::Expected<std::unique_ptr<RISCVISAInfo>>
+RISCVISAInfo::parseNormalizedArchString(StringRef Arch) {
+ if (llvm::any_of(Arch, isupper)) {
+ return createStringError(errc::invalid_argument,
+ "string must be lowercase");
+ }
+ // Must start with a valid base ISA name.
+ unsigned XLen;
+ if (Arch.startswith("rv32i") || Arch.startswith("rv32e"))
+ XLen = 32;
+ else if (Arch.startswith("rv64i") || Arch.startswith("rv64e"))
+ XLen = 64;
+ else
+ return createStringError(errc::invalid_argument,
+ "arch string must begin with valid base ISA");
+ std::unique_ptr<RISCVISAInfo> ISAInfo(new RISCVISAInfo(XLen));
+ // Discard rv32/rv64 prefix.
+ Arch = Arch.substr(4);
+
+ // Each extension is of the form ${name}${major_version}p${minor_version}
+ // and separated by _. Split by _ and then extract the name and version
+ // information for each extension.
+ SmallVector<StringRef, 8> Split;
+ Arch.split(Split, '_');
+ for (StringRef Ext : Split) {
+ StringRef Prefix, MinorVersionStr;
+ std::tie(Prefix, MinorVersionStr) = Ext.rsplit('p');
+ if (MinorVersionStr.empty())
+ return createStringError(errc::invalid_argument,
+ "extension lacks version in expected format");
+ unsigned MajorVersion, MinorVersion;
+ if (MinorVersionStr.getAsInteger(10, MinorVersion))
+ return createStringError(errc::invalid_argument,
+ "failed to parse minor version number");
+
+ // Split Prefix into the extension name and the major version number
+ // (the trailing digits of Prefix).
+ int TrailingDigits = 0;
+ StringRef ExtName = Prefix;
+ while (!ExtName.empty()) {
+ if (!isDigit(ExtName.back()))
+ break;
+ ExtName = ExtName.drop_back(1);
+ TrailingDigits++;
+ }
+ if (!TrailingDigits)
+ return createStringError(errc::invalid_argument,
+ "extension lacks version in expected format");
+
+ StringRef MajorVersionStr = Prefix.take_back(TrailingDigits);
+ if (MajorVersionStr.getAsInteger(10, MajorVersion))
+ return createStringError(errc::invalid_argument,
+ "failed to parse major version number");
+ ISAInfo->addExtension(ExtName, MajorVersion, MinorVersion);
+ }
+ ISAInfo->updateFLen();
+ ISAInfo->updateMinVLen();
+ ISAInfo->updateMaxELen();
+ return std::move(ISAInfo);
+}
+
+llvm::Expected<std::unique_ptr<RISCVISAInfo>>
RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
bool ExperimentalExtensionVersionCheck,
bool IgnoreUnknown) {
@@ -999,6 +1060,8 @@ std::vector<std::string> RISCVISAInfo::toFeatureVector() const {
std::string ExtName = Ext.first;
if (ExtName == "i") // i is not recognized in clang -cc1
continue;
+ if (!isSupportedExtension(ExtName))
+ continue;
std::string Feature = isExperimentalExtension(ExtName)
? "+experimental-" + ExtName
: "+" + ExtName;
diff --git a/contrib/llvm-project/llvm/lib/Support/Windows/Path.inc b/contrib/llvm-project/llvm/lib/Support/Windows/Path.inc
index 92cf4fcda5a6..7106a67b576a 100644
--- a/contrib/llvm-project/llvm/lib/Support/Windows/Path.inc
+++ b/contrib/llvm-project/llvm/lib/Support/Windows/Path.inc
@@ -650,8 +650,6 @@ bool equivalent(file_status A, file_status B) {
return A.FileIndexHigh == B.FileIndexHigh &&
A.FileIndexLow == B.FileIndexLow && A.FileSizeHigh == B.FileSizeHigh &&
A.FileSizeLow == B.FileSizeLow &&
- A.LastAccessedTimeHigh == B.LastAccessedTimeHigh &&
- A.LastAccessedTimeLow == B.LastAccessedTimeLow &&
A.LastWriteTimeHigh == B.LastWriteTimeHigh &&
A.LastWriteTimeLow == B.LastWriteTimeLow &&
A.VolumeSerialNumber == B.VolumeSerialNumber;
diff --git a/contrib/llvm-project/llvm/lib/Support/Windows/Signals.inc b/contrib/llvm-project/llvm/lib/Support/Windows/Signals.inc
index ba93afe0803b..cb82f55fc38b 100644
--- a/contrib/llvm-project/llvm/lib/Support/Windows/Signals.inc
+++ b/contrib/llvm-project/llvm/lib/Support/Windows/Signals.inc
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/ExitCodes.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
@@ -204,6 +205,9 @@ static bool RegisteredUnhandledExceptionFilter = false;
static bool CleanupExecuted = false;
static PTOP_LEVEL_EXCEPTION_FILTER OldFilter = NULL;
+/// The function to call on "SIGPIPE" (one-time use only).
+static std::atomic<void (*)()> OneShotPipeSignalFunction(nullptr);
+
// Windows creates a new thread to execute the console handler when an event
// (such as CTRL/C) occurs. This causes concurrency issues with the above
// globals which this critical section addresses.
@@ -575,11 +579,16 @@ void llvm::sys::SetInfoSignalFunction(void (*Handler)()) {
}
void llvm::sys::SetOneShotPipeSignalFunction(void (*Handler)()) {
- // Unimplemented.
+ OneShotPipeSignalFunction.exchange(Handler);
}
void llvm::sys::DefaultOneShotPipeSignalHandler() {
- // Unimplemented.
+ llvm::sys::Process::Exit(EX_IOERR, /*NoCleanup=*/true);
+}
+
+void llvm::sys::CallOneShotPipeSignalHandler() {
+ if (auto OldOneShotPipeFunction = OneShotPipeSignalFunction.exchange(nullptr))
+ OldOneShotPipeFunction();
}
/// Add a function to be called when a signal is delivered to the process. The
@@ -816,7 +825,15 @@ WriteWindowsDumpFile(PMINIDUMP_EXCEPTION_INFORMATION ExceptionInfo) {
}
void sys::CleanupOnSignal(uintptr_t Context) {
- LLVMUnhandledExceptionFilter((LPEXCEPTION_POINTERS)Context);
+ LPEXCEPTION_POINTERS EP = (LPEXCEPTION_POINTERS)Context;
+ // Broken pipe is not a crash.
+ //
+ // 0xE0000000 is combined with the return code in the exception raised in
+ // CrashRecoveryContext::HandleExit().
+ unsigned RetCode = EP->ExceptionRecord->ExceptionCode;
+ if (RetCode == (0xE0000000 | EX_IOERR))
+ return;
+ LLVMUnhandledExceptionFilter(EP);
}
static LONG WINAPI LLVMUnhandledExceptionFilter(LPEXCEPTION_POINTERS ep) {
diff --git a/contrib/llvm-project/llvm/lib/Support/Z3Solver.cpp b/contrib/llvm-project/llvm/lib/Support/Z3Solver.cpp
index a49bedcfd2b0..eb671fe2596d 100644
--- a/contrib/llvm-project/llvm/lib/Support/Z3Solver.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/Z3Solver.cpp
@@ -729,7 +729,7 @@ public:
const Z3_sort Z3Sort = toZ3Sort(*getBitvectorSort(BitWidth)).Sort;
// Slow path, when 64 bits are not enough.
- if (LLVM_UNLIKELY(Int.getBitWidth() > 64u)) {
+ if (LLVM_UNLIKELY(!Int.isRepresentableByInt64())) {
SmallString<40> Buffer;
Int.toString(Buffer, 10);
return newExprRef(Z3Expr(
diff --git a/contrib/llvm-project/llvm/lib/Support/raw_ostream.cpp b/contrib/llvm-project/llvm/lib/Support/raw_ostream.cpp
index 92b15f14c62f..7b9b8b2f53fb 100644
--- a/contrib/llvm-project/llvm/lib/Support/raw_ostream.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/raw_ostream.cpp
@@ -56,6 +56,7 @@
#ifdef _WIN32
#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/Signals.h"
#include "llvm/Support/Windows/WindowsSupport.h"
#endif
@@ -775,6 +776,15 @@ void raw_fd_ostream::write_impl(const char *Ptr, size_t Size) {
)
continue;
+#ifdef _WIN32
+ // Windows equivalents of SIGPIPE/EPIPE.
+ DWORD WinLastError = GetLastError();
+ if (WinLastError == ERROR_BROKEN_PIPE ||
+ (WinLastError == ERROR_NO_DATA && errno == EINVAL)) {
+ llvm::sys::CallOneShotPipeSignalHandler();
+ errno = EPIPE;
+ }
+#endif
// Otherwise it's a non-recoverable error. Note it and quit.
error_detected(std::error_code(errno, std::generic_category()));
break;
@@ -802,8 +812,6 @@ uint64_t raw_fd_ostream::seek(uint64_t off) {
flush();
#ifdef _WIN32
pos = ::_lseeki64(FD, off, SEEK_SET);
-#elif defined(HAVE_LSEEK64)
- pos = ::lseek64(FD, off, SEEK_SET);
#else
pos = ::lseek(FD, off, SEEK_SET);
#endif
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 6a42c4ff31dc..12dc30a2818b 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -839,7 +839,7 @@ let Predicates = [HasSVEorSME] in {
defm REVH_ZPmZ : sve_int_perm_rev_revh<"revh", AArch64revh_mt>;
defm REVW_ZPmZ : sve_int_perm_rev_revw<"revw", AArch64revw_mt>;
- defm REV_PP : sve_int_perm_reverse_p<"rev", vector_reverse>;
+ defm REV_PP : sve_int_perm_reverse_p<"rev", vector_reverse, int_aarch64_sve_rev_b16, int_aarch64_sve_rev_b32, int_aarch64_sve_rev_b64>;
defm REV_ZZ : sve_int_perm_reverse_z<"rev", vector_reverse>;
defm SUNPKLO_ZZ : sve_int_perm_unpk<0b00, "sunpklo", AArch64sunpklo>;
@@ -1672,12 +1672,12 @@ let Predicates = [HasSVEorSME] in {
defm TRN1_ZZZ : sve_int_perm_bin_perm_zz<0b100, "trn1", AArch64trn1>;
defm TRN2_ZZZ : sve_int_perm_bin_perm_zz<0b101, "trn2", AArch64trn2>;
- defm ZIP1_PPP : sve_int_perm_bin_perm_pp<0b000, "zip1", AArch64zip1>;
- defm ZIP2_PPP : sve_int_perm_bin_perm_pp<0b001, "zip2", AArch64zip2>;
- defm UZP1_PPP : sve_int_perm_bin_perm_pp<0b010, "uzp1", AArch64uzp1>;
- defm UZP2_PPP : sve_int_perm_bin_perm_pp<0b011, "uzp2", AArch64uzp2>;
- defm TRN1_PPP : sve_int_perm_bin_perm_pp<0b100, "trn1", AArch64trn1>;
- defm TRN2_PPP : sve_int_perm_bin_perm_pp<0b101, "trn2", AArch64trn2>;
+ defm ZIP1_PPP : sve_int_perm_bin_perm_pp<0b000, "zip1", AArch64zip1, int_aarch64_sve_zip1_b16, int_aarch64_sve_zip1_b32, int_aarch64_sve_zip1_b64>;
+ defm ZIP2_PPP : sve_int_perm_bin_perm_pp<0b001, "zip2", AArch64zip2, int_aarch64_sve_zip2_b16, int_aarch64_sve_zip2_b32, int_aarch64_sve_zip2_b64>;
+ defm UZP1_PPP : sve_int_perm_bin_perm_pp<0b010, "uzp1", AArch64uzp1, int_aarch64_sve_uzp1_b16, int_aarch64_sve_uzp1_b32, int_aarch64_sve_uzp1_b64>;
+ defm UZP2_PPP : sve_int_perm_bin_perm_pp<0b011, "uzp2", AArch64uzp2, int_aarch64_sve_uzp2_b16, int_aarch64_sve_uzp2_b32, int_aarch64_sve_uzp2_b64>;
+ defm TRN1_PPP : sve_int_perm_bin_perm_pp<0b100, "trn1", AArch64trn1, int_aarch64_sve_trn1_b16, int_aarch64_sve_trn1_b32, int_aarch64_sve_trn1_b64>;
+ defm TRN2_PPP : sve_int_perm_bin_perm_pp<0b101, "trn2", AArch64trn2, int_aarch64_sve_trn2_b16, int_aarch64_sve_trn2_b32, int_aarch64_sve_trn2_b64>;
// Extract lo/hi halves of legal predicate types.
def : Pat<(nxv1i1 (extract_subvector (nxv2i1 PPR:$Ps), (i64 0))),
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/SVEInstrFormats.td b/contrib/llvm-project/llvm/lib/Target/AArch64/SVEInstrFormats.td
index cef8d41218e8..1f0626191c0d 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -1448,11 +1448,12 @@ multiclass sve_int_perm_reverse_z<string asm, SDPatternOperator op> {
def : SVE_1_Op_Pat<nxv8bf16, op, nxv8bf16, !cast<Instruction>(NAME # _H)>;
}
-class sve_int_perm_reverse_p<bits<2> sz8_64, string asm, PPRRegOp pprty>
+class sve_int_perm_reverse_p<bits<2> sz8_64, string asm, PPRRegOp pprty,
+ SDPatternOperator op>
: I<(outs pprty:$Pd), (ins pprty:$Pn),
asm, "\t$Pd, $Pn",
"",
- []>, Sched<[]> {
+ [(set nxv16i1:$Pd, (op nxv16i1:$Pn))]>, Sched<[]> {
bits<4> Pd;
bits<4> Pn;
let Inst{31-24} = 0b00000101;
@@ -1463,16 +1464,18 @@ class sve_int_perm_reverse_p<bits<2> sz8_64, string asm, PPRRegOp pprty>
let Inst{3-0} = Pd;
}
-multiclass sve_int_perm_reverse_p<string asm, SDPatternOperator op> {
- def _B : sve_int_perm_reverse_p<0b00, asm, PPR8>;
- def _H : sve_int_perm_reverse_p<0b01, asm, PPR16>;
- def _S : sve_int_perm_reverse_p<0b10, asm, PPR32>;
- def _D : sve_int_perm_reverse_p<0b11, asm, PPR64>;
+multiclass sve_int_perm_reverse_p<string asm, SDPatternOperator ir_op,
+ SDPatternOperator op_b16,
+ SDPatternOperator op_b32,
+ SDPatternOperator op_b64> {
+ def _B : sve_int_perm_reverse_p<0b00, asm, PPR8, ir_op>;
+ def _H : sve_int_perm_reverse_p<0b01, asm, PPR16, op_b16>;
+ def _S : sve_int_perm_reverse_p<0b10, asm, PPR32, op_b32>;
+ def _D : sve_int_perm_reverse_p<0b11, asm, PPR64, op_b64>;
- def : SVE_1_Op_Pat<nxv16i1, op, nxv16i1, !cast<Instruction>(NAME # _B)>;
- def : SVE_1_Op_Pat<nxv8i1, op, nxv8i1, !cast<Instruction>(NAME # _H)>;
- def : SVE_1_Op_Pat<nxv4i1, op, nxv4i1, !cast<Instruction>(NAME # _S)>;
- def : SVE_1_Op_Pat<nxv2i1, op, nxv2i1, !cast<Instruction>(NAME # _D)>;
+ def : SVE_1_Op_Pat<nxv8i1, ir_op, nxv8i1, !cast<Instruction>(NAME # _H)>;
+ def : SVE_1_Op_Pat<nxv4i1, ir_op, nxv4i1, !cast<Instruction>(NAME # _S)>;
+ def : SVE_1_Op_Pat<nxv2i1, ir_op, nxv2i1, !cast<Instruction>(NAME # _D)>;
}
class sve_int_perm_unpk<bits<2> sz16_64, bits<2> opc, string asm,
@@ -6327,10 +6330,11 @@ multiclass sve_mem_p_spill<string asm> {
//===----------------------------------------------------------------------===//
class sve_int_perm_bin_perm_pp<bits<3> opc, bits<2> sz8_64, string asm,
- PPRRegOp pprty>
+ PPRRegOp pprty, SDPatternOperator op>
: I<(outs pprty:$Pd), (ins pprty:$Pn, pprty:$Pm),
asm, "\t$Pd, $Pn, $Pm",
- "", []>, Sched<[]> {
+ "",
+ [(set nxv16i1:$Pd, (op nxv16i1:$Pn, nxv16i1:$Pm))]>, Sched<[]> {
bits<4> Pd;
bits<4> Pm;
bits<4> Pn;
@@ -6347,16 +6351,18 @@ class sve_int_perm_bin_perm_pp<bits<3> opc, bits<2> sz8_64, string asm,
}
multiclass sve_int_perm_bin_perm_pp<bits<3> opc, string asm,
- SDPatternOperator op> {
- def _B : sve_int_perm_bin_perm_pp<opc, 0b00, asm, PPR8>;
- def _H : sve_int_perm_bin_perm_pp<opc, 0b01, asm, PPR16>;
- def _S : sve_int_perm_bin_perm_pp<opc, 0b10, asm, PPR32>;
- def _D : sve_int_perm_bin_perm_pp<opc, 0b11, asm, PPR64>;
-
- def : SVE_2_Op_Pat<nxv16i1, op, nxv16i1, nxv16i1, !cast<Instruction>(NAME # _B)>;
- def : SVE_2_Op_Pat<nxv8i1, op, nxv8i1, nxv8i1, !cast<Instruction>(NAME # _H)>;
- def : SVE_2_Op_Pat<nxv4i1, op, nxv4i1, nxv4i1, !cast<Instruction>(NAME # _S)>;
- def : SVE_2_Op_Pat<nxv2i1, op, nxv2i1, nxv2i1, !cast<Instruction>(NAME # _D)>;
+ SDPatternOperator ir_op,
+ SDPatternOperator op_b16,
+ SDPatternOperator op_b32,
+ SDPatternOperator op_b64> {
+ def _B : sve_int_perm_bin_perm_pp<opc, 0b00, asm, PPR8, ir_op>;
+ def _H : sve_int_perm_bin_perm_pp<opc, 0b01, asm, PPR16, op_b16>;
+ def _S : sve_int_perm_bin_perm_pp<opc, 0b10, asm, PPR32, op_b32>;
+ def _D : sve_int_perm_bin_perm_pp<opc, 0b11, asm, PPR64, op_b64>;
+
+ def : SVE_2_Op_Pat<nxv8i1, ir_op, nxv8i1, nxv8i1, !cast<Instruction>(NAME # _H)>;
+ def : SVE_2_Op_Pat<nxv4i1, ir_op, nxv4i1, nxv4i1, !cast<Instruction>(NAME # _S)>;
+ def : SVE_2_Op_Pat<nxv2i1, ir_op, nxv2i1, nxv2i1, !cast<Instruction>(NAME # _D)>;
}
class sve_int_perm_punpk<bit opc, string asm>
diff --git a/contrib/llvm-project/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index 5fa7068c89eb..724705c25e3a 100644
--- a/contrib/llvm-project/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -357,6 +357,34 @@ static MachineBasicBlock::iterator insertSEH(MachineBasicBlock::iterator MBBI,
.setMIFlags(Flags);
break;
+ case ARM::t2STR_PRE:
+ if (MBBI->getOperand(0).getReg() == ARM::SP &&
+ MBBI->getOperand(2).getReg() == ARM::SP &&
+ MBBI->getOperand(3).getImm() == -4) {
+ unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg());
+ MIB = BuildMI(MF, DL, TII.get(ARM::SEH_SaveRegs))
+ .addImm(1ULL << Reg)
+ .addImm(/*Wide=*/1)
+ .setMIFlags(Flags);
+ } else {
+ report_fatal_error("No matching SEH Opcode for t2STR_PRE");
+ }
+ break;
+
+ case ARM::t2LDR_POST:
+ if (MBBI->getOperand(1).getReg() == ARM::SP &&
+ MBBI->getOperand(2).getReg() == ARM::SP &&
+ MBBI->getOperand(3).getImm() == 4) {
+ unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg());
+ MIB = BuildMI(MF, DL, TII.get(ARM::SEH_SaveRegs))
+ .addImm(1ULL << Reg)
+ .addImm(/*Wide=*/1)
+ .setMIFlags(Flags);
+ } else {
+ report_fatal_error("No matching SEH Opcode for t2LDR_POST");
+ }
+ break;
+
case ARM::t2LDMIA_RET:
case ARM::t2LDMIA_UPD:
case ARM::t2STMDB_UPD: {
diff --git a/contrib/llvm-project/llvm/lib/Target/BPF/BTFDebug.cpp b/contrib/llvm-project/llvm/lib/Target/BPF/BTFDebug.cpp
index 9a873413db87..bde5f5db99e7 100644
--- a/contrib/llvm-project/llvm/lib/Target/BPF/BTFDebug.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/BPF/BTFDebug.cpp
@@ -782,6 +782,17 @@ void BTFDebug::visitCompositeType(const DICompositeType *CTy,
visitEnumType(CTy, TypeId);
}
+bool BTFDebug::IsForwardDeclCandidate(const DIType *Base) {
+ if (const auto *CTy = dyn_cast<DICompositeType>(Base)) {
+ auto CTag = CTy->getTag();
+ if ((CTag == dwarf::DW_TAG_structure_type ||
+ CTag == dwarf::DW_TAG_union_type) &&
+ !CTy->getName().empty() && !CTy->isForwardDecl())
+ return true;
+ }
+ return false;
+}
+
/// Handle pointer, typedef, const, volatile, restrict and member types.
void BTFDebug::visitDerivedType(const DIDerivedType *DTy, uint32_t &TypeId,
bool CheckPointer, bool SeenPointer) {
@@ -796,20 +807,15 @@ void BTFDebug::visitDerivedType(const DIDerivedType *DTy, uint32_t &TypeId,
if (CheckPointer && SeenPointer) {
const DIType *Base = DTy->getBaseType();
if (Base) {
- if (const auto *CTy = dyn_cast<DICompositeType>(Base)) {
- auto CTag = CTy->getTag();
- if ((CTag == dwarf::DW_TAG_structure_type ||
- CTag == dwarf::DW_TAG_union_type) &&
- !CTy->getName().empty() && !CTy->isForwardDecl()) {
- /// Find a candidate, generate a fixup. Later on the struct/union
- /// pointee type will be replaced with either a real type or
- /// a forward declaration.
- auto TypeEntry = std::make_unique<BTFTypeDerived>(DTy, Tag, true);
- auto &Fixup = FixupDerivedTypes[CTy];
- Fixup.push_back(std::make_pair(DTy, TypeEntry.get()));
- TypeId = addType(std::move(TypeEntry), DTy);
- return;
- }
+ if (IsForwardDeclCandidate(Base)) {
+ /// Find a candidate, generate a fixup. Later on the struct/union
+ /// pointee type will be replaced with either a real type or
+ /// a forward declaration.
+ auto TypeEntry = std::make_unique<BTFTypeDerived>(DTy, Tag, true);
+ auto &Fixup = FixupDerivedTypes[cast<DICompositeType>(Base)];
+ Fixup.push_back(std::make_pair(DTy, TypeEntry.get()));
+ TypeId = addType(std::move(TypeEntry), DTy);
+ return;
}
}
}
@@ -844,6 +850,13 @@ void BTFDebug::visitDerivedType(const DIDerivedType *DTy, uint32_t &TypeId,
visitTypeEntry(DTy->getBaseType(), TempTypeId, CheckPointer, SeenPointer);
}
+/// Visit a type entry. CheckPointer is true if the type has
+/// one of its predecessors as one struct/union member. SeenPointer
+/// is true if CheckPointer is true and one of its predecessors
+/// is a pointer. The goal of CheckPointer and SeenPointer is to
+/// do pruning for struct/union types so some of these types
+/// will not be emitted in BTF and rather forward declarations
+/// will be generated.
void BTFDebug::visitTypeEntry(const DIType *Ty, uint32_t &TypeId,
bool CheckPointer, bool SeenPointer) {
if (!Ty || DIToIdMap.find(Ty) != DIToIdMap.end()) {
@@ -888,6 +901,11 @@ void BTFDebug::visitTypeEntry(const DIType *Ty, uint32_t &TypeId,
if (DIToIdMap.find(BaseTy) != DIToIdMap.end()) {
DTy = dyn_cast<DIDerivedType>(BaseTy);
} else {
+ if (CheckPointer && DTy->getTag() == dwarf::DW_TAG_pointer_type) {
+ SeenPointer = true;
+ if (IsForwardDeclCandidate(BaseTy))
+ break;
+ }
uint32_t TmpTypeId;
visitTypeEntry(BaseTy, TmpTypeId, CheckPointer, SeenPointer);
break;
diff --git a/contrib/llvm-project/llvm/lib/Target/BPF/BTFDebug.h b/contrib/llvm-project/llvm/lib/Target/BPF/BTFDebug.h
index aa982babd458..f0b42232f4d5 100644
--- a/contrib/llvm-project/llvm/lib/Target/BPF/BTFDebug.h
+++ b/contrib/llvm-project/llvm/lib/Target/BPF/BTFDebug.h
@@ -338,6 +338,9 @@ class BTFDebug : public DebugHandlerBase {
void visitMapDefType(const DIType *Ty, uint32_t &TypeId);
/// @}
+ /// Check whether the type is a forward declaration candidate or not.
+ bool IsForwardDeclCandidate(const DIType *Base);
+
/// Get the file content for the subprogram. Certain lines of the file
/// later may be put into string table and referenced by line info.
std::string populateFileContent(const DISubprogram *SP);
diff --git a/contrib/llvm-project/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
index 248bd3130c25..9a2cf94edfe6 100644
--- a/contrib/llvm-project/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
@@ -71,7 +71,7 @@ static cl::opt<bool> ErrorMissingParenthesis(
static cl::opt<bool> WarnSignedMismatch(
"mwarn-sign-mismatch",
cl::desc("Warn for mismatching a signed and unsigned value"),
- cl::init(true));
+ cl::init(false));
static cl::opt<bool> WarnNoncontigiousRegister(
"mwarn-noncontigious-register",
cl::desc("Warn for register names that arent contigious"), cl::init(true));
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
index 98ced3559041..b17e2766a7a1 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
@@ -237,8 +237,7 @@ static PPCTargetMachine::PPCABI computeTargetABI(const Triple &TT,
case Triple::ppc64le:
return PPCTargetMachine::PPC_ABI_ELFv2;
case Triple::ppc64:
- if (TT.isOSFreeBSD() &&
- (TT.getOSMajorVersion() == 0 || TT.getOSMajorVersion() >= 13))
+ if (TT.isPPC64ELFv2ABI())
return PPCTargetMachine::PPC_ABI_ELFv2;
else
return PPCTargetMachine::PPC_ABI_ELFv1;
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp
index 379aaa713a00..88b926fce2aa 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp
@@ -239,6 +239,16 @@ bool RISCVELFStreamer::requiresFixups(MCContext &C, const MCExpr *Value,
if (B.isInSection() && B.getSection().getKind().isText())
return true;
+ // If A is undefined and B is defined, we should emit ADD/SUB for A-B.
+ // Unfortunately, A may be defined later, but this requiresFixups call has to
+ // eagerly make a decision. For now, emit ADD/SUB unless A is .L*. This
+ // heuristic handles many temporary label differences for .debug_* and
+ // .apple_types sections.
+ //
+ // TODO Implement delayed relocation decision.
+ if (!A.isInSection() && !A.isTemporary() && B.isInSection())
+ return true;
+
// Support cross-section symbolic differences ...
return A.isInSection() && B.isInSection() &&
A.getSection().getName() != B.getSection().getName();
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a8720d070acb..a1dc6a0cd2c1 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -4540,6 +4540,9 @@ SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
assert(N->getOffset() == 0 && "unexpected offset in global node");
+ if (DAG.getTarget().useEmulatedTLS())
+ return LowerToTLSEmulatedModel(N, DAG);
+
TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
if (DAG.getMachineFunction().getFunction().getCallingConv() ==
@@ -14211,6 +14214,25 @@ bool RISCVTargetLowering::preferScalarizeSplat(unsigned Opc) const {
return true;
}
+static Value *useTpOffset(IRBuilderBase &IRB, unsigned Offset) {
+ Module *M = IRB.GetInsertBlock()->getParent()->getParent();
+ Function *ThreadPointerFunc =
+ Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
+ return IRB.CreatePointerCast(
+ IRB.CreateConstGEP1_32(IRB.getInt8Ty(),
+ IRB.CreateCall(ThreadPointerFunc), Offset),
+ IRB.getInt8PtrTy()->getPointerTo(0));
+}
+
+Value *RISCVTargetLowering::getIRStackGuard(IRBuilderBase &IRB) const {
+ // Fuchsia provides a fixed TLS slot for the stack cookie.
+ // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
+ if (Subtarget.isTargetFuchsia())
+ return useTpOffset(IRB, -0x10);
+
+ return TargetLowering::getIRStackGuard(IRB);
+}
+
#define GET_REGISTER_MATCHER
#include "RISCVGenAsmMatcher.inc"
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h
index acf92cab3598..5086e6c0bdaf 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -618,6 +618,10 @@ public:
return Scale == 1;
}
+ /// If the target has a standard location for the stack protector cookie,
+ /// returns the address of that location. Otherwise, returns nullptr.
+ Value *getIRStackGuard(IRBuilderBase &IRB) const override;
+
private:
/// RISCVCCAssignFn - This target-specific function extends the default
/// CCValAssign with additional information used to lower RISC-V calling
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp
index 2ee228d72825..a26a3f2411f8 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp
@@ -175,8 +175,9 @@ static bool isSignExtendedW(Register SrcReg, const MachineRegisterInfo &MRI,
const AttributeSet &Attrs = CalleeFn->getAttributes().getRetAttrs();
unsigned BitWidth = IntTy->getBitWidth();
- return (BitWidth <= 32 && Attrs.hasAttribute(Attribute::SExt)) ||
- (BitWidth < 32 && Attrs.hasAttribute(Attribute::ZExt));
+ if ((BitWidth <= 32 && Attrs.hasAttribute(Attribute::SExt)) ||
+ (BitWidth < 32 && Attrs.hasAttribute(Attribute::ZExt)))
+ continue;
}
if (!AddRegDefToWorkList(CopySrcReg))
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
index c935dad1687f..d0bb4634d03d 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
@@ -83,6 +83,9 @@ RISCVSubtarget::RISCVSubtarget(const Triple &TT, StringRef CPU,
FrameLowering(
initializeSubtargetDependencies(TT, CPU, TuneCPU, FS, ABIName)),
InstrInfo(*this), RegInfo(getHwMode()), TLInfo(TM, *this) {
+ if (RISCV::isX18ReservedByDefault(TT))
+ UserReservedRegister.set(RISCV::X18);
+
CallLoweringInfo.reset(new RISCVCallLowering(*getTargetLowering()));
Legalizer.reset(new RISCVLegalizerInfo(*this));
diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.h b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.h
index 678d218a6719..290c7b03ea81 100644
--- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -176,6 +176,8 @@ public:
const LegalizerInfo *getLegalizerInfo() const override;
const RegisterBankInfo *getRegBankInfo() const override;
+ bool isTargetFuchsia() const { return getTargetTriple().isOSFuchsia(); }
+
bool useConstantPoolForLargeInts() const;
// Maximum cost used for building integers, integers will be put into constant
diff --git a/contrib/llvm-project/llvm/lib/Target/Sparc/MCTargetDesc/SparcInstPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/Sparc/MCTargetDesc/SparcInstPrinter.cpp
index fb22ddd91ba0..14c0e276a11b 100644
--- a/contrib/llvm-project/llvm/lib/Target/Sparc/MCTargetDesc/SparcInstPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Sparc/MCTargetDesc/SparcInstPrinter.cpp
@@ -178,6 +178,8 @@ void SparcInstPrinter::printCCOperand(const MCInst *MI, int opNum,
default: break;
case SP::FBCOND:
case SP::FBCONDA:
+ case SP::FBCOND_V9:
+ case SP::FBCONDA_V9:
case SP::BPFCC:
case SP::BPFCCA:
case SP::BPFCCNT:
diff --git a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcInstrInfo.cpp b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
index 63f662c41f93..a3a09a36f1dd 100644
--- a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -28,6 +28,10 @@ using namespace llvm;
#define GET_INSTRINFO_CTOR_DTOR
#include "SparcGenInstrInfo.inc"
+static cl::opt<unsigned> BPccDisplacementBits(
+ "sparc-bpcc-offset-bits", cl::Hidden, cl::init(19),
+ cl::desc("Restrict range of BPcc/FBPfcc instructions (DEBUG)"));
+
// Pin the vtable to this file.
void SparcInstrInfo::anchor() {}
@@ -73,11 +77,6 @@ unsigned SparcInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
return 0;
}
-static bool IsIntegerCC(unsigned CC)
-{
- return (CC <= SPCC::ICC_VC);
-}
-
static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
{
switch(CC) {
@@ -155,9 +154,7 @@ static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
llvm_unreachable("Invalid cond code");
}
-static bool isUncondBranchOpcode(int Opc) {
- return Opc == SP::BA || Opc == SP::BPA;
-}
+static bool isUncondBranchOpcode(int Opc) { return Opc == SP::BA; }
static bool isI32CondBranchOpcode(int Opc) {
return Opc == SP::BCOND || Opc == SP::BPICC || Opc == SP::BPICCA ||
@@ -169,7 +166,10 @@ static bool isI64CondBranchOpcode(int Opc) {
Opc == SP::BPXCCANT;
}
-static bool isFCondBranchOpcode(int Opc) { return Opc == SP::FBCOND; }
+static bool isFCondBranchOpcode(int Opc) {
+ return Opc == SP::FBCOND || Opc == SP::FBCONDA || Opc == SP::FBCOND_V9 ||
+ Opc == SP::FBCONDA_V9;
+}
static bool isCondBranchOpcode(int Opc) {
return isI32CondBranchOpcode(Opc) || isI64CondBranchOpcode(Opc) ||
@@ -193,6 +193,34 @@ static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
Target = LastInst->getOperand(0).getMBB();
}
+MachineBasicBlock *
+SparcInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
+ default:
+ llvm_unreachable("unexpected opcode!");
+ case SP::BA:
+ case SP::BCOND:
+ case SP::BCONDA:
+ case SP::FBCOND:
+ case SP::FBCONDA:
+ case SP::BPICC:
+ case SP::BPICCA:
+ case SP::BPICCNT:
+ case SP::BPICCANT:
+ case SP::BPXCC:
+ case SP::BPXCCA:
+ case SP::BPXCCNT:
+ case SP::BPXCCANT:
+ case SP::BPFCC:
+ case SP::BPFCCA:
+ case SP::BPFCCNT:
+ case SP::BPFCCANT:
+ case SP::FBCOND_V9:
+ case SP::FBCONDA_V9:
+ return MI.getOperand(0).getMBB();
+ }
+}
+
bool SparcInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
@@ -285,36 +313,37 @@ unsigned SparcInstrInfo::insertBranch(MachineBasicBlock &MBB,
assert(TBB && "insertBranch must not be told to insert a fallthrough");
assert((Cond.size() <= 2) &&
"Sparc branch conditions should have at most two components!");
- assert(!BytesAdded && "code size not handled");
if (Cond.empty()) {
assert(!FBB && "Unconditional branch with multiple successors!");
- BuildMI(&MBB, DL, get(Subtarget.isV9() ? SP::BPA : SP::BA)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(SP::BA)).addMBB(TBB);
+ if (BytesAdded)
+ *BytesAdded = 8;
return 1;
}
// Conditional branch
unsigned Opc = Cond[0].getImm();
unsigned CC = Cond[1].getImm();
+ BuildMI(&MBB, DL, get(Opc)).addMBB(TBB).addImm(CC);
- if (IsIntegerCC(CC)) {
- BuildMI(&MBB, DL, get(Opc)).addMBB(TBB).addImm(CC);
- } else {
- BuildMI(&MBB, DL, get(SP::FBCOND)).addMBB(TBB).addImm(CC);
- }
- if (!FBB)
+ if (!FBB) {
+ if (BytesAdded)
+ *BytesAdded = 8;
return 1;
+ }
- BuildMI(&MBB, DL, get(Subtarget.isV9() ? SP::BPA : SP::BA)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(SP::BA)).addMBB(FBB);
+ if (BytesAdded)
+ *BytesAdded = 16;
return 2;
}
unsigned SparcInstrInfo::removeBranch(MachineBasicBlock &MBB,
int *BytesRemoved) const {
- assert(!BytesRemoved && "code size not handled");
-
MachineBasicBlock::iterator I = MBB.end();
unsigned Count = 0;
+ int Removed = 0;
while (I != MBB.begin()) {
--I;
@@ -325,10 +354,14 @@ unsigned SparcInstrInfo::removeBranch(MachineBasicBlock &MBB,
!isUncondBranchOpcode(I->getOpcode()))
break; // Not a branch
+ Removed += getInstSizeInBytes(*I);
I->eraseFromParent();
I = MBB.end();
++Count;
}
+
+ if (BytesRemoved)
+ *BytesRemoved = Removed;
return Count;
}
@@ -340,6 +373,37 @@ bool SparcInstrInfo::reverseBranchCondition(
return false;
}
+bool SparcInstrInfo::isBranchOffsetInRange(unsigned BranchOpc,
+ int64_t Offset) const {
+ assert((Offset & 0b11) == 0 && "Malformed branch offset");
+ switch (BranchOpc) {
+ case SP::BA:
+ case SP::BCOND:
+ case SP::BCONDA:
+ case SP::FBCOND:
+ case SP::FBCONDA:
+ return isIntN(22, Offset >> 2);
+
+ case SP::BPICC:
+ case SP::BPICCA:
+ case SP::BPICCNT:
+ case SP::BPICCANT:
+ case SP::BPXCC:
+ case SP::BPXCCA:
+ case SP::BPXCCNT:
+ case SP::BPXCCANT:
+ case SP::BPFCC:
+ case SP::BPFCCA:
+ case SP::BPFCCNT:
+ case SP::BPFCCANT:
+ case SP::FBCOND_V9:
+ case SP::FBCONDA_V9:
+ return isIntN(BPccDisplacementBits, Offset >> 2);
+ }
+
+ llvm_unreachable("Unknown branch instruction!");
+}
+
void SparcInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
const DebugLoc &DL, MCRegister DestReg,
@@ -530,6 +594,23 @@ Register SparcInstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
return GlobalBaseReg;
}
+unsigned SparcInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
+ unsigned Opcode = MI.getOpcode();
+
+ if (MI.isInlineAsm()) {
+ const MachineFunction *MF = MI.getParent()->getParent();
+ const char *AsmStr = MI.getOperand(0).getSymbolName();
+ return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
+ }
+
+ // If the instruction has a delay slot, be conservative and also include
+ // it for sizing purposes. This is done so that the BranchRelaxation pass
+ // will not mistakenly mark out-of-range branches as in-range.
+ if (MI.hasDelaySlot())
+ return get(Opcode).getSize() * 2;
+ return get(Opcode).getSize();
+}
+
bool SparcInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
switch (MI.getOpcode()) {
case TargetOpcode::LOAD_STACK_GUARD: {
diff --git a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcInstrInfo.h b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcInstrInfo.h
index 39cf791c2173..7056d6babe17 100644
--- a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcInstrInfo.h
+++ b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcInstrInfo.h
@@ -64,6 +64,8 @@ public:
unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
+ MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
+
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
@@ -80,6 +82,9 @@ public:
bool
reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
+ /// Determine if the branch target is in range.
+ bool isBranchOffsetInRange(unsigned BranchOpc, int64_t Offset) const override;
+
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
bool KillSrc) const override;
@@ -99,6 +104,10 @@ public:
Register getGlobalBaseReg(MachineFunction *MF) const;
+ /// GetInstSize - Return the number of bytes of code the specified
+ /// instruction may be. This returns the maximum number of bytes.
+ unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
+
// Lower pseudo instructions after register allocation.
bool expandPostRAPseudo(MachineInstr &MI) const override;
};
diff --git a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcInstrInfo.td b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcInstrInfo.td
index 2c45a7218d04..2e95bc10337a 100644
--- a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcInstrInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcInstrInfo.td
@@ -850,15 +850,8 @@ class BranchPredictAlways<dag ins, string asmstr, list<dag> pattern>
: F2_3<0b001, 0, 1, (outs), ins, asmstr, pattern>;
}
-let cond = 8 in {
- // If we're compiling for v9, prefer BPA rather than BA
- // TODO: Disallow BA emission when FeatureV8Deprecated isn't enabled
- let Predicates = [HasV9], cc = 0b00 in
- def BPA : BranchPredictAlways<(ins bprtarget:$imm19),
- "ba %icc, $imm19", [(br bb:$imm19)]>;
-
+let cond = 8 in
def BA : BranchAlways<(ins brtarget:$imm22), "ba $imm22", [(br bb:$imm22)]>;
-}
let isBranch = 1, isTerminator = 1, hasDelaySlot = 1 in {
diff --git a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcTargetMachine.cpp b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
index 58faaafc29d6..1dbe5c563359 100644
--- a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
@@ -32,6 +32,10 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeSparcTarget() {
initializeSparcDAGToDAGISelPass(PR);
}
+static cl::opt<bool>
+ BranchRelaxation("sparc-enable-branch-relax", cl::Hidden, cl::init(true),
+ cl::desc("Relax out of range conditional branches"));
+
static std::string computeDataLayout(const Triple &T, bool is64Bit) {
// Sparc is typically big endian, but some are little.
std::string Ret = T.getArch() == Triple::sparcel ? "e" : "E";
@@ -182,6 +186,9 @@ bool SparcPassConfig::addInstSelector() {
}
void SparcPassConfig::addPreEmitPass(){
+ if (BranchRelaxation)
+ addPass(&BranchRelaxationPassID);
+
addPass(createSparcDelaySlotFillerPass());
if (this->getSparcTargetMachine().getSubtargetImpl()->insertNOPLoad())
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86.td b/contrib/llvm-project/llvm/lib/Target/X86/X86.td
index 83bd2ff6acc3..55c56e76af6f 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86.td
@@ -695,6 +695,7 @@ include "X86ScheduleSLM.td"
include "X86ScheduleZnver1.td"
include "X86ScheduleZnver2.td"
include "X86ScheduleZnver3.td"
+include "X86ScheduleZnver4.td"
include "X86ScheduleBdVer2.td"
include "X86ScheduleBtVer2.td"
include "X86SchedSkylakeClient.td"
@@ -1627,7 +1628,7 @@ def : ProcModel<"znver2", Znver2Model, ProcessorFeatures.ZN2Features,
ProcessorFeatures.ZN2Tuning>;
def : ProcModel<"znver3", Znver3Model, ProcessorFeatures.ZN3Features,
ProcessorFeatures.ZN3Tuning>;
-def : Proc<"znver4",ProcessorFeatures.ZN4Features,
+def : ProcModel<"znver4", Znver4Model, ProcessorFeatures.ZN4Features,
ProcessorFeatures.ZN4Tuning>;
def : Proc<"geode", [FeatureX87, FeatureCX8, Feature3DNowA],
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86CallingConv.td b/contrib/llvm-project/llvm/lib/Target/X86/X86CallingConv.td
index c92a30804014..4dd8a6cdd898 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86CallingConv.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86CallingConv.td
@@ -1154,11 +1154,11 @@ def CSR_64_CXX_TLS_Darwin_PE : CalleeSavedRegs<(add RBP)>;
// CSRs that are handled explicitly via copies.
def CSR_64_CXX_TLS_Darwin_ViaCopy : CalleeSavedRegs<(sub CSR_64_TLS_Darwin, RBP)>;
-// All GPRs - except r11 and return registers.
+// All GPRs - except r11
def CSR_64_RT_MostRegs : CalleeSavedRegs<(add CSR_64, RAX, RCX, RDX, RSI, RDI,
R8, R9, R10)>;
-// All registers - except r11 and return registers.
+// All registers - except r11
def CSR_64_RT_AllRegs : CalleeSavedRegs<(add CSR_64_RT_MostRegs,
(sequence "XMM%u", 0, 15))>;
def CSR_64_RT_AllRegs_AVX : CalleeSavedRegs<(add CSR_64_RT_MostRegs,
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index d69e2c3ed493..181de6abb2c5 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -4020,7 +4020,10 @@ bool X86DAGToDAGISel::tryShiftAmountMod(SDNode *N) {
EVT OpVT = ShiftAmt.getValueType();
- NewShiftAmt = CurDAG->getNOT(DL, Add0C == nullptr ? Add0 : Add1, OpVT);
+ SDValue AllOnes = CurDAG->getAllOnesConstant(DL, OpVT);
+ NewShiftAmt = CurDAG->getNode(ISD::XOR, DL, OpVT,
+ Add0C == nullptr ? Add0 : Add1, AllOnes);
+ insertDAGNode(*CurDAG, OrigShiftAmt, AllOnes);
insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
// If we are shifting by N-X where N == 0 mod Size, then just shift by
// -X to generate a NEG instead of a SUB of a constant.
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp
index a33ee63c877e..cf17c51b04fc 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -104,20 +104,6 @@ static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl,
DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
}
-/// Returns true if a CC can dynamically exclude a register from the list of
-/// callee-saved-registers (TargetRegistryInfo::getCalleeSavedRegs()) based on
-/// params/returns.
-static bool shouldDisableCalleeSavedRegisterCC(CallingConv::ID CC) {
- switch (CC) {
- default:
- return false;
- case CallingConv::X86_RegCall:
- case CallingConv::PreserveMost:
- case CallingConv::PreserveAll:
- return true;
- }
-}
-
X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
const X86Subtarget &STI)
: TargetLowering(TM), Subtarget(STI) {
@@ -3181,7 +3167,7 @@ X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
// In some cases we need to disable registers from the default CSR list.
// For example, when they are used for argument passing.
bool ShouldDisableCalleeSavedRegister =
- shouldDisableCalleeSavedRegisterCC(CallConv) ||
+ CallConv == CallingConv::X86_RegCall ||
MF.getFunction().hasFnAttribute("no_caller_saved_registers");
if (CallConv == CallingConv::X86_INTR && !Outs.empty())
@@ -4333,7 +4319,7 @@ SDValue X86TargetLowering::LowerFormalArguments(
}
}
- if (shouldDisableCalleeSavedRegisterCC(CallConv) ||
+ if (CallConv == CallingConv::X86_RegCall ||
F.hasFnAttribute("no_caller_saved_registers")) {
MachineRegisterInfo &MRI = MF.getRegInfo();
for (std::pair<Register, Register> Pair : MRI.liveins())
@@ -4894,7 +4880,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// In some calling conventions we need to remove the used physical registers
// from the reg mask.
- if (shouldDisableCalleeSavedRegisterCC(CallConv) || HasNCSR) {
+ if (CallConv == CallingConv::X86_RegCall || HasNCSR) {
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
// Allocate a new Reg Mask and copy Mask.
@@ -22000,15 +21986,25 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
// Extend everything to 80 bits to force it to be done on x87.
// TODO: Are there any fast-math-flags to propagate here?
if (IsStrict) {
- SDValue Add = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::f80, MVT::Other},
- {Chain, Fild, Fudge});
+ unsigned Opc = ISD::STRICT_FADD;
+ // Windows needs the precision control changed to 80bits around this add.
+ if (Subtarget.isOSWindows() && DstVT == MVT::f32)
+ Opc = X86ISD::STRICT_FP80_ADD;
+
+ SDValue Add =
+ DAG.getNode(Opc, dl, {MVT::f80, MVT::Other}, {Chain, Fild, Fudge});
// STRICT_FP_ROUND can't handle equal types.
if (DstVT == MVT::f80)
return Add;
return DAG.getNode(ISD::STRICT_FP_ROUND, dl, {DstVT, MVT::Other},
{Add.getValue(1), Add, DAG.getIntPtrConstant(0, dl)});
}
- SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
+ unsigned Opc = ISD::FADD;
+ // Windows needs the precision control changed to 80bits around this add.
+ if (Subtarget.isOSWindows() && DstVT == MVT::f32)
+ Opc = X86ISD::FP80_ADD;
+
+ SDValue Add = DAG.getNode(Opc, dl, MVT::f80, Fild, Fudge);
return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
}
@@ -34804,6 +34800,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(AESDECWIDE256KL)
NODE_NAME_CASE(CMPCCXADD)
NODE_NAME_CASE(TESTUI)
+ NODE_NAME_CASE(FP80_ADD)
+ NODE_NAME_CASE(STRICT_FP80_ADD)
}
return nullptr;
#undef NODE_NAME_CASE
@@ -37314,6 +37312,69 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
return BB;
}
+ case X86::FP80_ADDr:
+ case X86::FP80_ADDm32: {
+ // Change the floating point control register to use double extended
+ // precision when performing the addition.
+ int OrigCWFrameIdx =
+ MF->getFrameInfo().CreateStackObject(2, Align(2), false);
+ addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::FNSTCW16m)),
+ OrigCWFrameIdx);
+
+ // Load the old value of the control word...
+ Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
+ addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOVZX32rm16), OldCW),
+ OrigCWFrameIdx);
+
+ // OR 0b11 into bit 8 and 9. 0b11 is the encoding for double extended
+ // precision.
+ Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
+ BuildMI(*BB, MI, DL, TII->get(X86::OR32ri), NewCW)
+ .addReg(OldCW, RegState::Kill)
+ .addImm(0x300);
+
+ // Extract to 16 bits.
+ Register NewCW16 =
+ MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
+ BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), NewCW16)
+ .addReg(NewCW, RegState::Kill, X86::sub_16bit);
+
+ // Prepare memory for FLDCW.
+ int NewCWFrameIdx =
+ MF->getFrameInfo().CreateStackObject(2, Align(2), false);
+ addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)),
+ NewCWFrameIdx)
+ .addReg(NewCW16, RegState::Kill);
+
+ // Reload the modified control word now...
+ addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::FLDCW16m)),
+ NewCWFrameIdx);
+
+ // Do the addition.
+ if (MI.getOpcode() == X86::FP80_ADDr) {
+ BuildMI(*BB, MI, DL, TII->get(X86::ADD_Fp80))
+ .add(MI.getOperand(0))
+ .add(MI.getOperand(1))
+ .add(MI.getOperand(2));
+ } else {
+ BuildMI(*BB, MI, DL, TII->get(X86::ADD_Fp80m32))
+ .add(MI.getOperand(0))
+ .add(MI.getOperand(1))
+ .add(MI.getOperand(2))
+ .add(MI.getOperand(3))
+ .add(MI.getOperand(4))
+ .add(MI.getOperand(5))
+ .add(MI.getOperand(6));
+ }
+
+ // Reload the original control word now.
+ addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::FLDCW16m)),
+ OrigCWFrameIdx);
+
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+ }
+
case X86::FP32_TO_INT16_IN_MEM:
case X86::FP32_TO_INT32_IN_MEM:
case X86::FP32_TO_INT64_IN_MEM:
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h
index c5c115047271..d802d5f53aa2 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.h
@@ -740,6 +740,9 @@ namespace llvm {
// User level interrupts - testui
TESTUI,
+ // Perform an FP80 add after changing precision control in FPCW.
+ FP80_ADD,
+
/// X86 strict FP compare instructions.
STRICT_FCMP = ISD::FIRST_TARGET_STRICTFP_OPCODE,
STRICT_FCMPS,
@@ -779,6 +782,9 @@ namespace llvm {
STRICT_CVTPS2PH,
STRICT_CVTPH2PS,
+ // Perform an FP80 add after changing precision control in FPCW.
+ STRICT_FP80_ADD,
+
// WARNING: Only add nodes here if they are strict FP nodes. Non-memory and
// non-strict FP nodes should be above FIRST_TARGET_STRICTFP_OPCODE.
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86InstrCompiler.td b/contrib/llvm-project/llvm/lib/Target/X86/X86InstrCompiler.td
index 8fddd0037999..7e1c96a429eb 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -896,15 +896,15 @@ multiclass ATOMIC_LOGIC_OP<Format Form, string s> {
multiclass ATOMIC_LOGIC_OP_RM<bits<8> Opc8, string s> {
let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
SchedRW = [WriteBitTestSetRegRMW] in {
- def 16rm : Ii8<Opc8, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
+ def 16rm : I<Opc8, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
!strconcat(s, "{w}\t{$src2, $src1|$src1, $src2}"),
[(set EFLAGS, (!cast<SDNode>("x86_rm_" # s) addr:$src1, GR16:$src2))]>,
OpSize16, TB, LOCK;
- def 32rm : Ii8<Opc8, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
+ def 32rm : I<Opc8, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
!strconcat(s, "{l}\t{$src2, $src1|$src1, $src2}"),
[(set EFLAGS, (!cast<SDNode>("x86_rm_" # s) addr:$src1, GR32:$src2))]>,
OpSize32, TB, LOCK;
- def 64rm : RIi8<Opc8, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
+ def 64rm : RI<Opc8, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
!strconcat(s, "{q}\t{$src2, $src1|$src1, $src2}"),
[(set EFLAGS, (!cast<SDNode>("x86_rm_" # s) addr:$src1, GR64:$src2))]>,
TB, LOCK;
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86InstrFPStack.td b/contrib/llvm-project/llvm/lib/Target/X86/X86InstrFPStack.td
index a68d61043c5c..fbbd3c83dc5c 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86InstrFPStack.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86InstrFPStack.td
@@ -26,6 +26,13 @@ def SDTX86Fist : SDTypeProfile<0, 2, [SDTCisFP<0>, SDTCisPtrTy<1>]>;
def SDTX86CwdStore : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
def SDTX86CwdLoad : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
+def X86fp80_add : SDNode<"X86ISD::FP80_ADD", SDTFPBinOp, [SDNPCommutative]>;
+def X86strict_fp80_add : SDNode<"X86ISD::STRICT_FP80_ADD", SDTFPBinOp,
+ [SDNPHasChain,SDNPCommutative]>;
+def any_X86fp80_add : PatFrags<(ops node:$lhs, node:$rhs),
+ [(X86strict_fp80_add node:$lhs, node:$rhs),
+ (X86fp80_add node:$lhs, node:$rhs)]>;
+
def X86fld : SDNode<"X86ISD::FLD", SDTX86Fld,
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def X86fst : SDNode<"X86ISD::FST", SDTX86Fst,
@@ -141,6 +148,14 @@ let usesCustomInserter = 1, hasNoSchedulingInfo = 1, Defs = [EFLAGS] in {
[(X86fp_to_i32mem RFP80:$src, addr:$dst)]>;
def FP80_TO_INT64_IN_MEM : PseudoI<(outs), (ins i64mem:$dst, RFP80:$src),
[(X86fp_to_i64mem RFP80:$src, addr:$dst)]>;
+
+ def FP80_ADDr : PseudoI<(outs RFP80:$dst), (ins RFP80:$src1, RFP80:$src2),
+ [(set RFP80:$dst,
+ (any_X86fp80_add RFP80:$src1, RFP80:$src2))]>;
+ def FP80_ADDm32 : PseudoI<(outs RFP80:$dst), (ins RFP80:$src1, f32mem:$src2),
+ [(set RFP80:$dst,
+ (any_X86fp80_add RFP80:$src1,
+ (f80 (extloadf32 addr:$src2))))]>;
}
// All FP Stack operations are represented with four instructions here. The
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86InstrSSE.td b/contrib/llvm-project/llvm/lib/Target/X86/X86InstrSSE.td
index 561ba99db4af..f8660a9fa123 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86InstrSSE.td
@@ -577,20 +577,37 @@ let Predicates = [HasAVX, NoVLX] in {
def : Pat<(alignedloadv8f16 addr:$src),
(VMOVAPSrm addr:$src)>;
+ def : Pat<(alignedloadv8bf16 addr:$src),
+ (VMOVAPSrm addr:$src)>;
def : Pat<(loadv8f16 addr:$src),
(VMOVUPSrm addr:$src)>;
+ def : Pat<(loadv8bf16 addr:$src),
+ (VMOVUPSrm addr:$src)>;
def : Pat<(alignedstore (v8f16 VR128:$src), addr:$dst),
(VMOVAPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(alignedstore (v8bf16 VR128:$src), addr:$dst),
+ (VMOVAPSmr addr:$dst, VR128:$src)>;
def : Pat<(store (v8f16 VR128:$src), addr:$dst),
(VMOVUPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(store (v8bf16 VR128:$src), addr:$dst),
+ (VMOVUPSmr addr:$dst, VR128:$src)>;
+
def : Pat<(alignedloadv16f16 addr:$src),
(VMOVAPSYrm addr:$src)>;
+ def : Pat<(alignedloadv16bf16 addr:$src),
+ (VMOVAPSYrm addr:$src)>;
def : Pat<(loadv16f16 addr:$src),
(VMOVUPSYrm addr:$src)>;
+ def : Pat<(loadv16bf16 addr:$src),
+ (VMOVUPSYrm addr:$src)>;
def : Pat<(alignedstore (v16f16 VR256:$src), addr:$dst),
(VMOVAPSYmr addr:$dst, VR256:$src)>;
+ def : Pat<(alignedstore (v16bf16 VR256:$src), addr:$dst),
+ (VMOVAPSYmr addr:$dst, VR256:$src)>;
def : Pat<(store (v16f16 VR256:$src), addr:$dst),
(VMOVUPSYmr addr:$dst, VR256:$src)>;
+ def : Pat<(store (v16bf16 VR256:$src), addr:$dst),
+ (VMOVUPSYmr addr:$dst, VR256:$src)>;
}
// Use movaps / movups for SSE integer load / store (one byte shorter).
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86PfmCounters.td b/contrib/llvm-project/llvm/lib/Target/X86/X86PfmCounters.td
index d2460e12b005..49ef6efc6aec 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86PfmCounters.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86PfmCounters.td
@@ -290,4 +290,17 @@ def ZnVer3PfmCounters : ProcPfmCounters {
];
}
def : PfmCountersBinding<"znver3", ZnVer3PfmCounters>;
-def : PfmCountersBinding<"znver4", ZnVer3PfmCounters>;
+
+def ZnVer4PfmCounters : ProcPfmCounters {
+ let CycleCounter = PfmCounter<"cycles_not_in_halt">;
+ let UopsCounter = PfmCounter<"retired_ops">;
+ let IssueCounters = [
+ PfmIssueCounter<"Zn4Int", "ops_type_dispatched_from_decoder:int_disp_retire_mode">,
+ PfmIssueCounter<"Zn4FPU", "ops_type_dispatched_from_decoder:fp_disp_retire_mode">,
+ PfmIssueCounter<"Zn4Load", "ls_dispatch:ld_dispatch">,
+ PfmIssueCounter<"Zn4Store", "ls_dispatch:store_dispatch">,
+ PfmIssueCounter<"Zn4Divider", "div_op_count">,
+ PfmIssueCounter<"Zn4AGU", "ls_dispatch:ld_st_dispatch + ls_dispatch:ld_dispatch + ls_dispatch:store_dispatch">
+ ];
+}
+def : PfmCountersBinding<"znver4", ZnVer4PfmCounters>;
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleZnver4.td b/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleZnver4.td
new file mode 100644
index 000000000000..c3f08998419f
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ScheduleZnver4.td
@@ -0,0 +1,1957 @@
+//=- X86ScheduleZnver4.td - X86 Znver4 Scheduling ------------*- tablegen -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the machine model for Znver4 to support instruction
+// scheduling and other instruction cost heuristics.
+// Based on:
+// * AMD Software Optimization Guide for AMD Family 19h Processors.
+// https://www.amd.com/system/files/TechDocs/56665.zip
+//===----------------------------------------------------------------------===//
+
+def Znver4Model : SchedMachineModel {
+ // AMD SOG 19h, 2.9.6 Dispatch
+ // The processor may dispatch up to 6 macro ops per cycle
+ // into the execution engine.
+ let IssueWidth = 6;
+ // AMD SOG 19h, 2.10.3
+ // The retire control unit (RCU) tracks the completion status of all
+ // outstanding operations (integer, load/store, and floating-point) and is
+ // the final arbiter for exception processing and recovery.
+ // The unit can receive up to 6 macro ops dispatched per cycle and track up
+ // to 320 macro ops in-flight in non-SMT mode or 160 per thread in SMT mode.
+ let MicroOpBufferSize = 320;
+ // AMD SOG 19h, 2.9.1 Op Cache
+ // The op cache is organized as an associative cache with 64 sets and 8 ways.
+ // At each set-way intersection is an entry containing up to 8 macro ops.
+ // The maximum capacity of the op cache is 4K ops.
+ // Agner, 22.5 µop cache
+ // The size of the µop cache is big enough for holding most critical loops.
+ // FIXME: PR50584: MachineScheduler/PostRAScheduler have quadradic complexity,
+ // with large values here the compilation of certain loops
+ // ends up taking way too long.
+ // Ideally for znver4, we should have 6.75K. However we don't add that
+ // considerting the impact compile time and prefer using default values
+ // instead.
+ // let LoopMicroOpBufferSize = 6750;
+ // AMD SOG 19h, 2.6.2 L1 Data Cache
+ // The L1 data cache has a 4- or 5- cycle integer load-to-use latency.
+ // AMD SOG 19h, 2.12 L1 Data Cache
+ // The AGU and LS pipelines are optimized for simple address generation modes.
+ // <...> and can achieve 4-cycle load-to-use integer load latency.
+ let LoadLatency = 4;
+ // AMD SOG 19h, 2.12 L1 Data Cache
+ // The AGU and LS pipelines are optimized for simple address generation modes.
+ // <...> and can achieve <...> 7-cycle load-to-use FP load latency.
+ int VecLoadLatency = 7;
+ // Latency of a simple store operation.
+ int StoreLatency = 1;
+ // FIXME:
+ let HighLatency = 25; // FIXME: any better choice?
+ // AMD SOG 19h, 2.8 Optimizing Branching
+ // The branch misprediction penalty is in the range from 11 to 18 cycles,
+ // <...>. The common case penalty is 13 cycles.
+ let MispredictPenalty = 13;
+
+ let PostRAScheduler = 1; // Enable Post RegAlloc Scheduler pass.
+
+ let CompleteModel = 1;
+}
+
+let SchedModel = Znver4Model in {
+
+
+//===----------------------------------------------------------------------===//
+// RCU
+//===----------------------------------------------------------------------===//
+
+// AMD SOG 19h, 2.10.3 Retire Control Unit
+// The unit can receive up to 6 macro ops dispatched per cycle and track up to
+// 320 macro ops in-flight in non-SMT mode or 128 per thread in SMT mode. <...>
+// The retire unit handles in-order commit of up to nine macro ops per cycle.
+def Zn4RCU : RetireControlUnit<Znver4Model.MicroOpBufferSize, 9>;
+
+//===----------------------------------------------------------------------===//
+// Integer Execution Unit
+//
+
+// AMD SOG 19h, 2.4 Superscalar Organization
+// The processor uses four decoupled independent integer scheduler queues,
+// each one servicing one ALU pipeline and one or two other pipelines
+
+//
+// Execution pipes
+//===----------------------------------------------------------------------===//
+
+// AMD SOG 19h, 2.10.2 Execution Units
+// The processor contains 4 general purpose integer execution pipes.
+// Each pipe has an ALU capable of general purpose integer operations.
+def Zn4ALU0 : ProcResource<1>;
+def Zn4ALU1 : ProcResource<1>;
+def Zn4ALU2 : ProcResource<1>;
+def Zn4ALU3 : ProcResource<1>;
+
+// AMD SOG 19h, 2.10.2 Execution Units
+// There is also a separate branch execution unit.
+def Zn4BRU1 : ProcResource<1>;
+
+// AMD SOG 19h, 2.10.2 Execution Units
+// There are three Address Generation Units (AGUs) for all load and store
+// address generation. There are also 3 store data movement units
+// associated with the same schedulers as the AGUs.
+def Zn4AGU0 : ProcResource<1>;
+def Zn4AGU1 : ProcResource<1>;
+def Zn4AGU2 : ProcResource<1>;
+
+//
+// Execution Units
+//===----------------------------------------------------------------------===//
+
+// AMD SOG 19h, 2.10.2 Execution Units
+// ALU0 additionally has divide <...> execution capability.
+defvar Zn4Divider = Zn4ALU0;
+
+// AMD SOG 19h, 2.10.2 Execution Units
+// ALU0 additionally has <...> branch execution capability.
+defvar Zn4BRU0 = Zn4ALU0;
+
+// Integer Multiplication issued on ALU1.
+defvar Zn4Multiplier = Zn4ALU1;
+
+// Execution pipeline grouping
+//===----------------------------------------------------------------------===//
+
+// General ALU operations
+def Zn4ALU0123 : ProcResGroup<[Zn4ALU0, Zn4ALU1, Zn4ALU2, Zn4ALU3]>;
+
+// General AGU operations
+def Zn4AGU012 : ProcResGroup<[Zn4AGU0, Zn4AGU1, Zn4AGU2]>;
+
+// Control flow: jumps, calls
+def Zn4BRU01 : ProcResGroup<[Zn4BRU0, Zn4BRU1]>;
+
+// Everything that isn't control flow, but still needs to access CC register,
+// namely: conditional moves, SETcc.
+def Zn4ALU03 : ProcResGroup<[Zn4ALU0, Zn4ALU3]>;
+
+// Zn4ALU1 handles complex bit twiddling: CRC/PDEP/PEXT
+
+// Simple bit twiddling: bit test, shift/rotate, bit extraction
+def Zn4ALU12 : ProcResGroup<[Zn4ALU1, Zn4ALU2]>;
+
+
+//
+// Scheduling
+//===----------------------------------------------------------------------===//
+
+// AMD SOG 19h, 2.10.3 Retire Control Unit
+// The integer physical register file (PRF) consists of 224 registers.
+def Zn4IntegerPRF : RegisterFile<224, [GR64, CCR], [1, 1], [1, 0],
+ 6, // Max moves that can be eliminated per cycle.
+ 0>; // Restrict move elimination to zero regs.
+
+// anandtech, The integer scheduler has a 4*24 entry macro op capacity.
+// AMD SOG 19h, 2.10.1 Schedulers
+// The schedulers can receive up to six macro ops per cycle, with a limit of
+// two per scheduler. Each scheduler can issue one micro op per cycle into
+// each of its associated pipelines
+def Zn4Int : ProcResGroup<[Zn4ALU0, Zn4AGU0, Zn4BRU0, // scheduler 0
+ Zn4ALU1, Zn4AGU1, // scheduler 1
+ Zn4ALU2, Zn4AGU2, // scheduler 2
+ Zn4ALU3, Zn4BRU1 // scheduler 3
+ ]> {
+ let BufferSize = !mul(4, 24);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Floating-Point Unit
+//
+
+// AMD SOG 19h, 2.4 Superscalar Organization
+// The processor uses <...> two decoupled independent floating point schedulers
+// each servicing two FP pipelines and one store or FP-to-integer pipeline.
+
+//
+// Execution pipes
+//===----------------------------------------------------------------------===//
+
+// AMD SOG 19h, 2.10.1 Schedulers
+// <...>, and six FPU pipes.
+// Agner, 22.10 Floating point execution pipes
+// There are six floating point/vector execution pipes,
+def Zn4FP0 : ProcResource<1>;
+def Zn4FP1 : ProcResource<1>;
+def Zn4FP2 : ProcResource<1>;
+def Zn4FP3 : ProcResource<1>;
+def Zn4FP45 : ProcResource<2>;
+
+//
+// Execution Units
+//===----------------------------------------------------------------------===//
+// AMD SOG 19h, 2.11.1 Floating Point Execution Resources
+
+// (v)FMUL*, (v)FMA*, Floating Point Compares, Blendv(DQ)
+defvar Zn4FPFMul0 = Zn4FP0;
+defvar Zn4FPFMul1 = Zn4FP1;
+
+// (v)FADD*
+defvar Zn4FPFAdd0 = Zn4FP2;
+defvar Zn4FPFAdd1 = Zn4FP3;
+
+// All convert operations except pack/unpack
+defvar Zn4FPFCvt0 = Zn4FP2;
+defvar Zn4FPFCvt1 = Zn4FP3;
+
+// All Divide and Square Root except Reciprocal Approximation
+// AMD SOG 19h, 2.11.1 Floating Point Execution Resources
+// FDIV unit can support 2 simultaneous operations in flight
+// even though it occupies a single pipe.
+// FIXME: BufferSize=2 ?
+defvar Zn4FPFDiv = Zn4FP1;
+
+// Moves and Logical operations on Floating Point Data Types
+defvar Zn4FPFMisc0 = Zn4FP0;
+defvar Zn4FPFMisc1 = Zn4FP1;
+defvar Zn4FPFMisc2 = Zn4FP2;
+defvar Zn4FPFMisc3 = Zn4FP3;
+
+// Integer Adds, Subtracts, and Compares
+// Some complex VADD operations are not available in all pipes.
+defvar Zn4FPVAdd0 = Zn4FP0;
+defvar Zn4FPVAdd1 = Zn4FP1;
+defvar Zn4FPVAdd2 = Zn4FP2;
+defvar Zn4FPVAdd3 = Zn4FP3;
+
+// Integer Multiplies, SAD, Blendvb
+defvar Zn4FPVMul0 = Zn4FP0;
+defvar Zn4FPVMul1 = Zn4FP3;
+
+// Data Shuffles, Packs, Unpacks, Permute
+// Some complex shuffle operations are only available in pipe1.
+defvar Zn4FPVShuf = Zn4FP1;
+defvar Zn4FPVShufAux = Zn4FP2;
+
+// Bit Shift Left/Right operations
+defvar Zn4FPVShift0 = Zn4FP1;
+defvar Zn4FPVShift1 = Zn4FP2;
+
+// Moves and Logical operations on Packed Integer Data Types
+defvar Zn4FPVMisc0 = Zn4FP0;
+defvar Zn4FPVMisc1 = Zn4FP1;
+defvar Zn4FPVMisc2 = Zn4FP2;
+defvar Zn4FPVMisc3 = Zn4FP3;
+
+// *AES*
+defvar Zn4FPAES0 = Zn4FP0;
+defvar Zn4FPAES1 = Zn4FP1;
+
+// *CLM*
+defvar Zn4FPCLM0 = Zn4FP0;
+defvar Zn4FPCLM1 = Zn4FP1;
+
+// Execution pipeline grouping
+//===----------------------------------------------------------------------===//
+
+// AMD SOG 19h, 2.11 Floating-Point Unit
+// Stores and floating point to general purpose register transfer
+// have 2 dedicated pipelines (pipe 5 and 6).
+def Zn4FPU0123 : ProcResGroup<[Zn4FP0, Zn4FP1, Zn4FP2, Zn4FP3]>;
+
+// (v)FMUL*, (v)FMA*, Floating Point Compares, Blendv(DQ)
+def Zn4FPFMul01 : ProcResGroup<[Zn4FPFMul0, Zn4FPFMul1]>;
+
+// (v)FADD*
+// Some complex VADD operations are not available in all pipes.
+def Zn4FPFAdd01 : ProcResGroup<[Zn4FPFAdd0, Zn4FPFAdd1]>;
+
+// All convert operations except pack/unpack
+def Zn4FPFCvt01 : ProcResGroup<[Zn4FPFCvt0, Zn4FPFCvt1]>;
+
+// All Divide and Square Root except Reciprocal Approximation
+// def Zn4FPFDiv : ProcResGroup<[Zn4FPFDiv]>;
+
+// Moves and Logical operations on Floating Point Data Types
+def Zn4FPFMisc0123 : ProcResGroup<[Zn4FPFMisc0, Zn4FPFMisc1, Zn4FPFMisc2, Zn4FPFMisc3]>;
+
+// FIXUP and RANGE use FP01 pipelines
+def Zn4FPFMisc01 : ProcResGroup<[Zn4FPFMisc0, Zn4FPFMisc1]>;
+def Zn4FPFMisc12 : ProcResGroup<[Zn4FPFMisc1, Zn4FPFMisc2]>;
+// SCALE instructions use FP23 pipelines
+def Zn4FPFMisc23 : ProcResGroup<[Zn4FPFMisc2, Zn4FPFMisc3]>;
+def Zn4FPFMisc123 : ProcResGroup<[Zn4FPFMisc1,Zn4FPFMisc2, Zn4FPFMisc3]>;
+
+// Loads, Stores and Move to General Register (EX) Operations
+// AMD SOG 19h, 2.11 Floating-Point Unit
+// Stores and floating point to general purpose register transfer
+// have 2 dedicated pipelines (pipe 5 and 6).
+defvar Zn4FPLd01 = Zn4FP45;
+
+// AMD SOG 19h, 2.11 Floating-Point Unit
+// Note that FP stores are supported on two pipelines,
+// but throughput is limited to one per cycle.
+let Super = Zn4FP45 in
+def Zn4FPSt : ProcResource<1>;
+
+// Integer Adds, Subtracts, and Compares
+// Some complex VADD operations are not available in all pipes.
+def Zn4FPVAdd0123 : ProcResGroup<[Zn4FPVAdd0, Zn4FPVAdd1, Zn4FPVAdd2, Zn4FPVAdd3]>;
+
+def Zn4FPVAdd01: ProcResGroup<[Zn4FPVAdd0, Zn4FPVAdd1]>;
+def Zn4FPVAdd12: ProcResGroup<[Zn4FPVAdd1, Zn4FPVAdd2]>;
+
+// AVX512 Opmask pipelines
+def Zn4FPOpMask01: ProcResGroup<[Zn4FP2, Zn4FP3]>;
+def Zn4FPOpMask4: ProcResGroup<[Zn4FP45]>;
+
+// Integer Multiplies, SAD, Blendvb
+def Zn4FPVMul01 : ProcResGroup<[Zn4FPVMul0, Zn4FPVMul1]>;
+
+// Data Shuffles, Packs, Unpacks, Permute
+// Some complex shuffle operations are only available in pipe1.
+def Zn4FPVShuf01 : ProcResGroup<[Zn4FPVShuf, Zn4FPVShufAux]>;
+
+// Bit Shift Left/Right operations
+def Zn4FPVShift01 : ProcResGroup<[Zn4FPVShift0, Zn4FPVShift1]>;
+
+// Moves and Logical operations on Packed Integer Data Types
+def Zn4FPVMisc0123 : ProcResGroup<[Zn4FPVMisc0, Zn4FPVMisc1, Zn4FPVMisc2, Zn4FPVMisc3]>;
+
+// *AES*
+def Zn4FPAES01 : ProcResGroup<[Zn4FPAES0, Zn4FPAES1]>;
+
+// *CLM*
+def Zn4FPCLM01 : ProcResGroup<[Zn4FPCLM0, Zn4FPCLM1]>;
+
+
+//
+// Scheduling
+//===----------------------------------------------------------------------===//
+
+// Agner, 21.8 Register renaming and out-of-order schedulers
+// The floating point register file has 192 vector registers
+// of 512b each in zen4.
+def Zn4FpPRF : RegisterFile<192, [VR64, VR128, VR256, VR512], [1, 1, 1, 1], [0, 1, 1],
+ 6, // Max moves that can be eliminated per cycle.
+ 0>; // Restrict move elimination to zero regs.
+
+// AMD SOG 19h, 2.11 Floating-Point Unit
+// The floating-point scheduler has a 2*32 entry macro op capacity.
+// AMD SOG 19h, 2.11 Floating-Point Unit
+// <...> the scheduler can issue 1 micro op per cycle for each pipe.
+// FIXME: those are two separate schedulers, not a single big one.
+def Zn4FP : ProcResGroup<[Zn4FP0, Zn4FP2, /*Zn4FP4,*/ // scheduler 0
+ Zn4FP1, Zn4FP3, Zn4FP45 /*Zn4FP5*/ // scheduler 1
+ ]> {
+ let BufferSize = !mul(2, 32);
+}
+
+// AMD SOG 19h, 2.11 Floating-Point Unit
+// Macro ops can be dispatched to the 64 entry Non Scheduling Queue (NSQ)
+// even if floating-point scheduler is full.
+// FIXME: how to model this properly?
+
+
+//===----------------------------------------------------------------------===//
+// Load-Store Unit
+//
+
+// AMD SOG 19h, 2.12 Load-Store Unit
+// The LS unit contains three largely independent pipe-lines
+// enabling the execution of three 256-bit memory operations per cycle.
+def Zn4LSU : ProcResource<3>;
+
+// AMD SOG 19h, 2.12 Load-Store Unit
+// All three memory operations can be loads.
+let Super = Zn4LSU in
+def Zn4Load : ProcResource<3> {
+ // AMD SOG 19h, 2.12 Load-Store Unit
+ // The LS unit can process up to 72 out-of-order loads.
+ let BufferSize = 72;
+}
+
+def Zn4LoadQueue : LoadQueue<Zn4Load>;
+
+// AMD SOG 19h, 2.12 Load-Store Unit
+// A maximum of two of the memory operations can be stores.
+let Super = Zn4LSU in
+def Zn4Store : ProcResource<2> {
+ // AMD SOG 19h, 2.12 Load-Store Unit
+ // The LS unit utilizes a 64-entry store queue (STQ).
+ let BufferSize = 64;
+}
+
+def Zn4StoreQueue : StoreQueue<Zn4Store>;
+
+//===----------------------------------------------------------------------===//
+// Basic helper classes.
+//===----------------------------------------------------------------------===//
+
+// Many SchedWrites are defined in pairs with and without a folded load.
+// Instructions with folded loads are usually micro-fused, so they only appear
+// as two micro-ops when dispatched by the schedulers.
+// This multiclass defines the resource usage for variants with and without
+// folded loads.
+
+multiclass __Zn4WriteRes<SchedWrite SchedRW, list<ProcResourceKind> ExePorts,
+ int Lat = 1, list<int> Res = [], int UOps = 1> {
+ def : WriteRes<SchedRW, ExePorts> {
+ let Latency = Lat;
+ let ResourceCycles = Res;
+ let NumMicroOps = UOps;
+ }
+}
+
+multiclass __Zn4WriteResPair<X86FoldableSchedWrite SchedRW,
+ list<ProcResourceKind> ExePorts, int Lat,
+ list<int> Res, int UOps, int LoadLat, int LoadUOps,
+ ProcResourceKind AGU, int LoadRes> {
+ defm : __Zn4WriteRes<SchedRW, ExePorts, Lat, Res, UOps>;
+
+ defm : __Zn4WriteRes<SchedRW.Folded,
+ !listconcat([AGU, Zn4Load], ExePorts),
+ !add(Lat, LoadLat),
+ !if(!and(!empty(Res), !eq(LoadRes, 1)),
+ [],
+ !listconcat([1, LoadRes],
+ !if(!empty(Res),
+ !listsplat(1, !size(ExePorts)),
+ Res))),
+ !add(UOps, LoadUOps)>;
+}
+
+// For classes without folded loads.
+multiclass Zn4WriteResInt<SchedWrite SchedRW,
+ list<ProcResourceKind> ExePorts, int Lat = 1,
+ list<int> Res = [], int UOps = 1> {
+ defm : __Zn4WriteRes<SchedRW, ExePorts, Lat, Res, UOps>;
+}
+
+multiclass Zn4WriteResXMM<SchedWrite SchedRW,
+ list<ProcResourceKind> ExePorts, int Lat = 1,
+ list<int> Res = [], int UOps = 1> {
+ defm : __Zn4WriteRes<SchedRW, ExePorts, Lat, Res, UOps>;
+}
+
+multiclass Zn4WriteResYMM<SchedWrite SchedRW,
+ list<ProcResourceKind> ExePorts, int Lat = 1,
+ list<int> Res = [], int UOps = 1> {
+ defm : __Zn4WriteRes<SchedRW, ExePorts, Lat, Res, UOps>;
+}
+
+multiclass Zn4WriteResZMM<SchedWrite SchedRW,
+ list<ProcResourceKind> ExePorts, int Lat = 1,
+ list<int> Res = [], int UOps = 1> {
+ defm : __Zn4WriteRes<SchedRW, ExePorts, Lat, Res, UOps>;
+}
+
+// For classes with folded loads.
+multiclass Zn4WriteResIntPair<X86FoldableSchedWrite SchedRW,
+ list<ProcResourceKind> ExePorts, int Lat = 1,
+ list<int> Res = [], int UOps = 1,
+ int LoadUOps = 0, int LoadRes = 1> {
+ defm : __Zn4WriteResPair<SchedRW, ExePorts, Lat, Res, UOps,
+ Znver4Model.LoadLatency,
+ LoadUOps, Zn4AGU012, LoadRes>;
+}
+
+multiclass Zn4WriteResXMMPair<X86FoldableSchedWrite SchedRW,
+ list<ProcResourceKind> ExePorts, int Lat = 1,
+ list<int> Res = [], int UOps = 1,
+ int LoadUOps = 0, int LoadRes = 1> {
+ defm : __Zn4WriteResPair<SchedRW, ExePorts, Lat, Res, UOps,
+ Znver4Model.VecLoadLatency,
+ LoadUOps, Zn4FPLd01, LoadRes>;
+}
+
+multiclass Zn4WriteResYMMPair<X86FoldableSchedWrite SchedRW,
+ list<ProcResourceKind> ExePorts, int Lat = 1,
+ list<int> Res = [], int UOps = 1,
+ int LoadUOps = 0, int LoadRes = 1> {
+ defm : __Zn4WriteResPair<SchedRW, ExePorts, Lat, Res, UOps,
+ Znver4Model.VecLoadLatency,
+ LoadUOps, Zn4FPLd01, LoadRes>;
+}
+
+multiclass Zn4WriteResZMMPair<X86FoldableSchedWrite SchedRW,
+ list<ProcResourceKind> ExePorts, int Lat = 1,
+ list<int> Res = [], int UOps = 2,
+ int LoadUOps = 0, int LoadRes = 1> {
+ defm : __Zn4WriteResPair<SchedRW, ExePorts, Lat, Res, UOps,
+ Znver4Model.VecLoadLatency,
+ LoadUOps, Zn4FPLd01, LoadRes>;
+}
+
+//===----------------------------------------------------------------------===//
+// Here be dragons.
+//===----------------------------------------------------------------------===//
+
+def : ReadAdvance<ReadAfterLd, Znver4Model.LoadLatency>;
+
+def : ReadAdvance<ReadAfterVecLd, Znver4Model.VecLoadLatency>;
+def : ReadAdvance<ReadAfterVecXLd, Znver4Model.VecLoadLatency>;
+def : ReadAdvance<ReadAfterVecYLd, Znver4Model.VecLoadLatency>;
+
+// AMD SOG 19h, 2.11 Floating-Point Unit
+// There is 1 cycle of added latency for a result to cross
+// from F to I or I to F domain.
+def : ReadAdvance<ReadInt2Fpu, -1>;
+
+// Instructions with both a load and a store folded are modeled as a folded
+// load + WriteRMW.
+defm : Zn4WriteResInt<WriteRMW, [Zn4AGU012, Zn4Store], Znver4Model.StoreLatency, [1, 1], 0>;
+
+// Loads, stores, and moves, not folded with other operations.
+defm : Zn4WriteResInt<WriteLoad, [Zn4AGU012, Zn4Load], !add(Znver4Model.LoadLatency, 1), [1, 1], 1>;
+
+// Model the effect of clobbering the read-write mask operand of the GATHER operation.
+// Does not cost anything by itself, only has latency, matching that of the WriteLoad,
+defm : Zn4WriteResInt<WriteVecMaskedGatherWriteback, [], !add(Znver4Model.LoadLatency, 1), [], 0>;
+
+def Zn4WriteMOVSlow : SchedWriteRes<[Zn4AGU012, Zn4Load]> {
+ let Latency = !add(Znver4Model.LoadLatency, 1);
+ let ResourceCycles = [3, 1];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteMOVSlow], (instrs MOV8rm, MOV8rm_NOREX, MOV16rm, MOVSX16rm16, MOVSX16rm32, MOVZX16rm16, MOVSX16rm8, MOVZX16rm8)>;
+
+defm : Zn4WriteResInt<WriteStore, [Zn4AGU012, Zn4Store], Znver4Model.StoreLatency, [1, 2], 1>;
+defm : Zn4WriteResInt<WriteStoreNT, [Zn4AGU012, Zn4Store], Znver4Model.StoreLatency, [1, 2], 1>;
+defm : Zn4WriteResInt<WriteMove, [Zn4ALU0123], 1, [4], 1>;
+
+// Treat misc copies as a move.
+def : InstRW<[WriteMove], (instrs COPY)>;
+
+def Zn4WriteMOVBE16rm : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4ALU0123]> {
+ let Latency = Znver4Model.LoadLatency;
+ let ResourceCycles = [1, 1, 4];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteMOVBE16rm], (instrs MOVBE16rm)>;
+
+def Zn4WriteMOVBEmr : SchedWriteRes<[Zn4ALU0123, Zn4AGU012, Zn4Store]> {
+ let Latency = Znver4Model.StoreLatency;
+ let ResourceCycles = [4, 1, 1];
+ let NumMicroOps = 2;
+}
+def : InstRW<[Zn4WriteMOVBEmr], (instrs MOVBE16mr, MOVBE32mr, MOVBE64mr)>;
+
+// Arithmetic.
+defm : Zn4WriteResIntPair<WriteALU, [Zn4ALU0123], 1, [1], 1>; // Simple integer ALU op.
+
+def Zn4WriteALUSlow : SchedWriteRes<[Zn4ALU0123]> {
+ let Latency = 1;
+ let ResourceCycles = [4];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteALUSlow], (instrs ADD8i8, ADD16i16, ADD32i32, ADD64i32,
+ AND8i8, AND16i16, AND32i32, AND64i32,
+ OR8i8, OR16i16, OR32i32, OR64i32,
+ SUB8i8, SUB16i16, SUB32i32, SUB64i32,
+ XOR8i8, XOR16i16, XOR32i32, XOR64i32)>;
+
+def Zn4WriteMoveExtend : SchedWriteRes<[Zn4ALU0123]> {
+ let Latency = 1;
+ let ResourceCycles = [4];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteMoveExtend], (instrs MOVSX16rr16, MOVSX16rr32, MOVZX16rr16, MOVSX16rr8, MOVZX16rr8)>;
+
+def Zn4WriteMaterialize32bitImm: SchedWriteRes<[Zn4ALU0123]> {
+ let Latency = 1;
+ let ResourceCycles = [2];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteMaterialize32bitImm], (instrs MOV32ri, MOV32ri_alt, MOV64ri32)>;
+
+def Zn4WritePDEP_PEXT : SchedWriteRes<[Zn4ALU1]> {
+ let Latency = 3;
+ let ResourceCycles = [1];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WritePDEP_PEXT], (instrs PDEP32rr, PDEP64rr,
+ PEXT32rr, PEXT64rr)>;
+
+defm : Zn4WriteResIntPair<WriteADC, [Zn4ALU0123], 1, [4], 1>; // Integer ALU + flags op.
+
+def Zn4WriteADC8mr_SBB8mr : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4ALU0123, Zn4Store]> {
+ let Latency = 1;
+ let ResourceCycles = [1, 1, 7, 1];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteADC8mr_SBB8mr], (instrs ADC8mr, SBB8mr)>;
+
+// This is for simple LEAs with one or two input operands.
+defm : Zn4WriteResInt<WriteLEA, [Zn4AGU012], 1, [1], 1>; // LEA instructions can't fold loads.
+
+// This write is used for slow LEA instructions.
+def Zn4Write3OpsLEA : SchedWriteRes<[Zn4ALU0123]> {
+ let Latency = 2;
+ let ResourceCycles = [1];
+ let NumMicroOps = 2;
+}
+
+// On Znver4, a slow LEA is either a 3Ops LEA (base, index, offset),
+// or an LEA with a `Scale` value different than 1.
+def Zn4SlowLEAPredicate : MCSchedPredicate<
+ CheckAny<[
+ // A 3-operand LEA (base, index, offset).
+ IsThreeOperandsLEAFn,
+ // An LEA with a "Scale" different than 1.
+ CheckAll<[
+ CheckIsImmOperand<2>,
+ CheckNot<CheckImmOperand<2, 1>>
+ ]>
+ ]>
+>;
+
+def Zn4WriteLEA : SchedWriteVariant<[
+ SchedVar<Zn4SlowLEAPredicate, [Zn4Write3OpsLEA]>,
+ SchedVar<NoSchedPred, [WriteLEA]>
+]>;
+
+def : InstRW<[Zn4WriteLEA], (instrs LEA32r, LEA64r, LEA64_32r)>;
+
+def Zn4SlowLEA16r : SchedWriteRes<[Zn4ALU0123]> {
+ let Latency = 2; // FIXME: not from llvm-exegesis
+ let ResourceCycles = [4];
+ let NumMicroOps = 2;
+}
+
+def : InstRW<[Zn4SlowLEA16r], (instrs LEA16r)>;
+
+// Integer multiplication
+defm : Zn4WriteResIntPair<WriteIMul8, [Zn4Multiplier], 3, [3], 1>; // Integer 8-bit multiplication.
+defm : Zn4WriteResIntPair<WriteIMul16, [Zn4Multiplier], 3, [3], 3, /*LoadUOps=*/1>; // Integer 16-bit multiplication.
+defm : Zn4WriteResIntPair<WriteIMul16Imm, [Zn4Multiplier], 4, [4], 2>; // Integer 16-bit multiplication by immediate.
+defm : Zn4WriteResIntPair<WriteIMul16Reg, [Zn4Multiplier], 3, [1], 1>; // Integer 16-bit multiplication by register.
+defm : Zn4WriteResIntPair<WriteIMul32, [Zn4Multiplier], 3, [3], 2>; // Integer 32-bit multiplication.
+defm : Zn4WriteResIntPair<WriteMULX32, [Zn4Multiplier], 3, [1], 2>; // Integer 32-bit Unsigned Multiply Without Affecting Flags.
+defm : Zn4WriteResIntPair<WriteIMul32Imm, [Zn4Multiplier], 3, [1], 1>; // Integer 32-bit multiplication by immediate.
+defm : Zn4WriteResIntPair<WriteIMul32Reg, [Zn4Multiplier], 3, [1], 1>; // Integer 32-bit multiplication by register.
+defm : Zn4WriteResIntPair<WriteIMul64, [Zn4Multiplier], 3, [3], 2>; // Integer 64-bit multiplication.
+defm : Zn4WriteResIntPair<WriteMULX64, [Zn4Multiplier], 3, [1], 2>; // Integer 32-bit Unsigned Multiply Without Affecting Flags.
+defm : Zn4WriteResIntPair<WriteIMul64Imm, [Zn4Multiplier], 3, [1], 1>; // Integer 64-bit multiplication by immediate.
+defm : Zn4WriteResIntPair<WriteIMul64Reg, [Zn4Multiplier], 3, [1], 1>; // Integer 64-bit multiplication by register.
+defm : Zn4WriteResInt<WriteIMulHLd, [], !add(4, Znver4Model.LoadLatency), [], 0>; // Integer multiplication, high part.
+defm : Zn4WriteResInt<WriteIMulH, [], 4, [], 0>; // Integer multiplication, high part.
+
+defm : Zn4WriteResInt<WriteBSWAP32, [Zn4ALU0123], 1, [1], 1>; // Byte Order (Endianness) 32-bit Swap.
+defm : Zn4WriteResInt<WriteBSWAP64, [Zn4ALU0123], 1, [1], 1>; // Byte Order (Endianness) 64-bit Swap.
+
+defm : Zn4WriteResIntPair<WriteCMPXCHG, [Zn4ALU0123], 3, [12], 5>; // Compare and set, compare and swap.
+
+def Zn4WriteCMPXCHG8rr : SchedWriteRes<[Zn4ALU0123]> {
+ let Latency = 3;
+ let ResourceCycles = [12];
+ let NumMicroOps = 3;
+}
+def : InstRW<[Zn4WriteCMPXCHG8rr], (instrs CMPXCHG8rr)>;
+
+defm : Zn4WriteResInt<WriteCMPXCHGRMW, [Zn4ALU0123], 3, [12], 6>; // Compare and set, compare and swap.
+
+def Zn4WriteCMPXCHG8rm_LCMPXCHG8 : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4ALU0123]> {
+ let Latency = !add(Znver4Model.LoadLatency, Zn4WriteCMPXCHG8rr.Latency);
+ let ResourceCycles = [1, 1, 12];
+ let NumMicroOps = !add(Zn4WriteCMPXCHG8rr.NumMicroOps, 2);
+}
+def : InstRW<[Zn4WriteCMPXCHG8rm_LCMPXCHG8], (instrs CMPXCHG8rm, LCMPXCHG8)>;
+
+def Zn4WriteCMPXCHG8B : SchedWriteRes<[Zn4ALU0123]> {
+ let Latency = 3; // FIXME: not from llvm-exegesis
+ let ResourceCycles = [24];
+ let NumMicroOps = 19;
+}
+def : InstRW<[Zn4WriteCMPXCHG8B], (instrs CMPXCHG8B)>;
+
+def Zn4WriteCMPXCHG16B_LCMPXCHG16B : SchedWriteRes<[Zn4ALU0123]> {
+ let Latency = 4; // FIXME: not from llvm-exegesis
+ let ResourceCycles = [59];
+ let NumMicroOps = 28;
+}
+def : InstRW<[Zn4WriteCMPXCHG16B_LCMPXCHG16B], (instrs CMPXCHG16B, LCMPXCHG16B)>;
+
+def Zn4WriteWriteXCHGUnrenameable : SchedWriteRes<[Zn4ALU0123]> {
+ let Latency = 1;
+ let ResourceCycles = [2];
+ let NumMicroOps = 2;
+}
+def : InstRW<[Zn4WriteWriteXCHGUnrenameable], (instrs XCHG8rr, XCHG16rr, XCHG16ar)>;
+
+def Zn4WriteXCHG8rm_XCHG16rm : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4ALU0123]> {
+ let Latency = !add(Znver4Model.LoadLatency, 3); // FIXME: not from llvm-exegesis
+ let ResourceCycles = [1, 1, 2];
+ let NumMicroOps = 5;
+}
+def : InstRW<[Zn4WriteXCHG8rm_XCHG16rm], (instrs XCHG8rm, XCHG16rm)>;
+
+def Zn4WriteXCHG32rm_XCHG64rm : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4ALU0123]> {
+ let Latency = !add(Znver4Model.LoadLatency, 2); // FIXME: not from llvm-exegesis
+ let ResourceCycles = [1, 1, 2];
+ let NumMicroOps = 2;
+}
+def : InstRW<[Zn4WriteXCHG32rm_XCHG64rm], (instrs XCHG32rm, XCHG64rm)>;
+
+// Integer division.
+// FIXME: uops for 8-bit division measures as 2. for others it's a guess.
+// FIXME: latency for 8-bit division measures as 10. for others it's a guess.
+defm : Zn4WriteResIntPair<WriteDiv8, [Zn4Divider], 10, [10], 2>;
+defm : Zn4WriteResIntPair<WriteDiv16, [Zn4Divider], 11, [11], 2>;
+defm : Zn4WriteResIntPair<WriteDiv32, [Zn4Divider], 13, [13], 2>;
+defm : Zn4WriteResIntPair<WriteDiv64, [Zn4Divider], 17, [17], 2>;
+defm : Zn4WriteResIntPair<WriteIDiv8, [Zn4Divider], 10, [10], 2>;
+defm : Zn4WriteResIntPair<WriteIDiv16, [Zn4Divider], 11, [11], 2>;
+defm : Zn4WriteResIntPair<WriteIDiv32, [Zn4Divider], 13, [13], 2>;
+defm : Zn4WriteResIntPair<WriteIDiv64, [Zn4Divider], 17, [17], 2>;
+
+defm : Zn4WriteResIntPair<WriteBSF, [Zn4ALU1], 1, [1], 6, /*LoadUOps=*/1>; // Bit scan forward.
+defm : Zn4WriteResIntPair<WriteBSR, [Zn4ALU1], 1, [1], 6, /*LoadUOps=*/1>; // Bit scan reverse.
+
+defm : Zn4WriteResIntPair<WritePOPCNT, [Zn4ALU0123], 1, [1], 1>; // Bit population count.
+
+def Zn4WritePOPCNT16rr : SchedWriteRes<[Zn4ALU0123]> {
+ let Latency = 1;
+ let ResourceCycles = [4];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WritePOPCNT16rr], (instrs POPCNT16rr)>;
+
+defm : Zn4WriteResIntPair<WriteLZCNT, [Zn4ALU0123], 1, [1], 1>; // Leading zero count.
+
+def Zn4WriteLZCNT16rr : SchedWriteRes<[Zn4ALU0123]> {
+ let Latency = 1;
+ let ResourceCycles = [4];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteLZCNT16rr], (instrs LZCNT16rr)>;
+
+defm : Zn4WriteResIntPair<WriteTZCNT, [Zn4ALU12], 2, [1], 2>; // Trailing zero count.
+
+def Zn4WriteTZCNT16rr : SchedWriteRes<[Zn4ALU0123]> {
+ let Latency = 2;
+ let ResourceCycles = [4];
+ let NumMicroOps = 2;
+}
+def : InstRW<[Zn4WriteTZCNT16rr], (instrs TZCNT16rr)>;
+
+defm : Zn4WriteResIntPair<WriteCMOV, [Zn4ALU03], 1, [1], 1>; // Conditional move.
+defm : Zn4WriteResInt<WriteFCMOV, [Zn4ALU0123], 7, [28], 7>; // FIXME: not from llvm-exegesis // X87 conditional move.
+defm : Zn4WriteResInt<WriteSETCC, [Zn4ALU03], 1, [2], 1>; // Set register based on condition code.
+defm : Zn4WriteResInt<WriteSETCCStore, [Zn4ALU03, Zn4AGU012, Zn4Store], 2, [2, 1, 1], 2>; // FIXME: latency not from llvm-exegesis
+defm : Zn4WriteResInt<WriteLAHFSAHF, [Zn4ALU3], 1, [1], 1>; // Load/Store flags in AH.
+
+defm : Zn4WriteResInt<WriteBitTest, [Zn4ALU12], 1, [1], 1>; // Bit Test
+defm : Zn4WriteResInt<WriteBitTestImmLd, [Zn4AGU012, Zn4Load, Zn4ALU12], !add(Znver4Model.LoadLatency, 1), [1, 1, 1], 2>;
+defm : Zn4WriteResInt<WriteBitTestRegLd, [Zn4AGU012, Zn4Load, Zn4ALU12], !add(Znver4Model.LoadLatency, 1), [1, 1, 1], 7>;
+
+defm : Zn4WriteResInt<WriteBitTestSet, [Zn4ALU12], 2, [2], 2>; // Bit Test + Set
+defm : Zn4WriteResInt<WriteBitTestSetImmLd, [Zn4AGU012, Zn4Load, Zn4ALU12], !add(Znver4Model.LoadLatency, 2), [1, 1, 1], 4>;
+defm : Zn4WriteResInt<WriteBitTestSetRegLd, [Zn4AGU012, Zn4Load, Zn4ALU12], !add(Znver4Model.LoadLatency, 2), [1, 1, 1], 9>;
+
+// Integer shifts and rotates.
+defm : Zn4WriteResIntPair<WriteShift, [Zn4ALU12], 1, [1], 1, /*LoadUOps=*/1>;
+defm : Zn4WriteResIntPair<WriteShiftCL, [Zn4ALU12], 1, [1], 1, /*LoadUOps=*/1>;
+defm : Zn4WriteResIntPair<WriteRotate, [Zn4ALU12], 1, [1], 1, /*LoadUOps=*/1>;
+
+def Zn4WriteRotateR1 : SchedWriteRes<[Zn4ALU12]> {
+ let Latency = 1;
+ let ResourceCycles = [2];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteRotateR1], (instrs RCL8r1, RCL16r1, RCL32r1, RCL64r1,
+ RCR8r1, RCR16r1, RCR32r1, RCR64r1)>;
+
+def Zn4WriteRotateM1 : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4ALU12]> {
+ let Latency = !add(Znver4Model.LoadLatency, Zn4WriteRotateR1.Latency);
+ let ResourceCycles = [1, 1, 2];
+ let NumMicroOps = !add(Zn4WriteRotateR1.NumMicroOps, 1);
+}
+def : InstRW<[Zn4WriteRotateM1], (instrs RCL8m1, RCL16m1, RCL32m1, RCL64m1,
+ RCR8m1, RCR16m1, RCR32m1, RCR64m1)>;
+
+def Zn4WriteRotateRightRI : SchedWriteRes<[Zn4ALU12]> {
+ let Latency = 3;
+ let ResourceCycles = [6];
+ let NumMicroOps = 7;
+}
+def : InstRW<[Zn4WriteRotateRightRI], (instrs RCR8ri, RCR16ri, RCR32ri, RCR64ri)>;
+
+def Zn4WriteRotateRightMI : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4ALU12]> {
+ let Latency = !add(Znver4Model.LoadLatency, Zn4WriteRotateRightRI.Latency);
+ let ResourceCycles = [1, 1, 8];
+ let NumMicroOps = !add(Zn4WriteRotateRightRI.NumMicroOps, 3);
+}
+def : InstRW<[Zn4WriteRotateRightMI], (instrs RCR8mi, RCR16mi, RCR32mi, RCR64mi)>;
+
+def Zn4WriteRotateLeftRI : SchedWriteRes<[Zn4ALU12]> {
+ let Latency = 4;
+ let ResourceCycles = [8];
+ let NumMicroOps = 9;
+}
+def : InstRW<[Zn4WriteRotateLeftRI], (instrs RCL8ri, RCL16ri, RCL32ri, RCL64ri)>;
+
+def Zn4WriteRotateLeftMI : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4ALU12]> {
+ let Latency = !add(Znver4Model.LoadLatency, Zn4WriteRotateLeftRI.Latency);
+ let ResourceCycles = [1, 1, 8];
+ let NumMicroOps = !add(Zn4WriteRotateLeftRI.NumMicroOps, 2);
+}
+def : InstRW<[Zn4WriteRotateLeftMI], (instrs RCL8mi, RCL16mi, RCL32mi, RCL64mi)>;
+
+defm : Zn4WriteResIntPair<WriteRotateCL, [Zn4ALU12], 1, [1], 1, /*LoadUOps=*/1>;
+
+def Zn4WriteRotateRightRCL : SchedWriteRes<[Zn4ALU12]> {
+ let Latency = 3;
+ let ResourceCycles = [6];
+ let NumMicroOps = 7;
+}
+def : InstRW<[Zn4WriteRotateRightRCL], (instrs RCR8rCL, RCR16rCL, RCR32rCL, RCR64rCL)>;
+
+def Zn4WriteRotateRightMCL : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4ALU12]> {
+ let Latency = !add(Znver4Model.LoadLatency, Zn4WriteRotateRightRCL.Latency);
+ let ResourceCycles = [1, 1, 8];
+ let NumMicroOps = !add(Zn4WriteRotateRightRCL.NumMicroOps, 2);
+}
+def : InstRW<[Zn4WriteRotateRightMCL], (instrs RCR8mCL, RCR16mCL, RCR32mCL, RCR64mCL)>;
+
+def Zn4WriteRotateLeftRCL : SchedWriteRes<[Zn4ALU12]> {
+ let Latency = 4;
+ let ResourceCycles = [8];
+ let NumMicroOps = 9;
+}
+def : InstRW<[Zn4WriteRotateLeftRCL], (instrs RCL8rCL, RCL16rCL, RCL32rCL, RCL64rCL)>;
+
+def Zn4WriteRotateLeftMCL : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4ALU12]> {
+ let Latency = !add(Znver4Model.LoadLatency, Zn4WriteRotateLeftRCL.Latency);
+ let ResourceCycles = [1, 1, 8];
+ let NumMicroOps = !add(Zn4WriteRotateLeftRCL.NumMicroOps, 2);
+}
+def : InstRW<[Zn4WriteRotateLeftMCL], (instrs RCL8mCL, RCL16mCL, RCL32mCL, RCL64mCL)>;
+
+// Double shift instructions.
+defm : Zn4WriteResInt<WriteSHDrri, [Zn4ALU12], 2, [3], 4>;
+defm : Zn4WriteResInt<WriteSHDrrcl, [Zn4ALU12], 2, [3], 5>;
+defm : Zn4WriteResInt<WriteSHDmri, [Zn4AGU012, Zn4Load, Zn4ALU12], !add(Znver4Model.LoadLatency, 2), [1, 1, 4], 6>;
+defm : Zn4WriteResInt<WriteSHDmrcl, [Zn4AGU012, Zn4Load, Zn4ALU12], !add(Znver4Model.LoadLatency, 2), [1, 1, 4], 6>;
+
+// BMI1 BEXTR/BLS, BMI2 BZHI
+defm : Zn4WriteResIntPair<WriteBEXTR, [Zn4ALU12], 1, [1], 1, /*LoadUOps=*/1>;
+defm : Zn4WriteResIntPair<WriteBLS, [Zn4ALU0123], 1, [1], 1, /*LoadUOps=*/1>;
+defm : Zn4WriteResIntPair<WriteBZHI, [Zn4ALU12], 1, [1], 1, /*LoadUOps=*/1>;
+
+// Idioms that clear a register, like xorps %xmm0, %xmm0.
+// These can often bypass execution ports completely.
+defm : Zn4WriteResInt<WriteZero, [Zn4ALU0123], 0, [0], 1>;
+
+// Branches don't produce values, so they have no latency, but they still
+// consume resources. Indirect branches can fold loads.
+defm : Zn4WriteResIntPair<WriteJump, [Zn4BRU01], 1, [1], 1>; // FIXME: not from llvm-exegesis
+
+// Floating point. This covers both scalar and vector operations.
+defm : Zn4WriteResInt<WriteFLD0, [Zn4FPLd01, Zn4Load, Zn4FP1], !add(Znver4Model.LoadLatency, 4), [1, 1, 1], 1>;
+defm : Zn4WriteResInt<WriteFLD1, [Zn4FPLd01, Zn4Load, Zn4FP1], !add(Znver4Model.LoadLatency, 7), [1, 1, 1], 1>;
+defm : Zn4WriteResInt<WriteFLDC, [Zn4FPLd01, Zn4Load, Zn4FP1], !add(Znver4Model.LoadLatency, 7), [1, 1, 1], 1>;
+defm : Zn4WriteResXMM<WriteFLoad, [Zn4FPLd01, Zn4Load], !add(Znver4Model.VecLoadLatency, 1), [1, 1], 1>;
+defm : Zn4WriteResXMM<WriteFLoadX, [Zn4FPLd01, Zn4Load], !add(Znver4Model.VecLoadLatency, 1), [1, 1], 1>;
+defm : Zn4WriteResYMM<WriteFLoadY, [Zn4FPLd01, Zn4Load], !add(Znver4Model.VecLoadLatency, 1), [1, 1], 1>;
+defm : Zn4WriteResXMM<WriteFMaskedLoad, [Zn4FPLd01, Zn4Load], !add(Znver4Model.VecLoadLatency, 1), [1, 1], 1>;
+defm : Zn4WriteResYMM<WriteFMaskedLoadY, [Zn4FPLd01, Zn4Load], !add(Znver4Model.VecLoadLatency, 1), [1, 1], 1>;
+defm : Zn4WriteResXMM<WriteFStore, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [1, 1], 1>;
+
+def Zn4WriteWriteFStoreMMX : SchedWriteRes<[Zn4FPSt, Zn4Store]> {
+ let Latency = 2; // FIXME: not from llvm-exegesis
+ let ResourceCycles = [1, 1];
+ let NumMicroOps = 2;
+}
+def : InstRW<[Zn4WriteWriteFStoreMMX], (instrs MOVHPDmr, MOVHPSmr,
+ VMOVHPDmr, VMOVHPSmr)>;
+
+defm : Zn4WriteResXMM<WriteFStoreX, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [1, 1], 1>;
+defm : Zn4WriteResYMM<WriteFStoreY, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [1, 1], 1>;
+defm : Zn4WriteResXMM<WriteFStoreNT, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [1, 1], 1>;
+defm : Zn4WriteResXMM<WriteFStoreNTX, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [1, 1], 1>;
+defm : Zn4WriteResYMM<WriteFStoreNTY, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [1, 1], 1>;
+
+defm : Zn4WriteResXMM<WriteFMaskedStore32, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [6, 1], 18>;
+defm : Zn4WriteResXMM<WriteFMaskedStore64, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [4, 1], 10>;
+defm : Zn4WriteResYMM<WriteFMaskedStore32Y, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [12, 1], 42>;
+defm : Zn4WriteResYMM<WriteFMaskedStore64Y, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [6, 1], 18>;
+
+defm : Zn4WriteResXMMPair<WriteFAdd, [Zn4FPFAdd01], 3, [1], 1>; // Floating point add/sub.
+
+def Zn4WriteX87Arith : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4FPU0123]> {
+ let Latency = !add(Znver4Model.LoadLatency, 1); // FIXME: not from llvm-exegesis
+ let ResourceCycles = [1, 1, 24];
+ let NumMicroOps = 2;
+}
+def : InstRW<[Zn4WriteX87Arith], (instrs ADD_FI16m, ADD_FI32m,
+ SUB_FI16m, SUB_FI32m,
+ SUBR_FI16m, SUBR_FI32m,
+ MUL_FI16m, MUL_FI32m)>;
+
+def Zn4WriteX87Div : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4FPU0123]> {
+ let Latency = !add(Znver4Model.LoadLatency, 1); // FIXME: not from llvm-exegesis
+ let ResourceCycles = [1, 1, 62];
+ let NumMicroOps = 2;
+}
+def : InstRW<[Zn4WriteX87Div], (instrs DIV_FI16m, DIV_FI32m,
+ DIVR_FI16m, DIVR_FI32m)>;
+
+defm : Zn4WriteResXMMPair<WriteFAddX, [Zn4FPFAdd01], 3, [1], 1>; // Floating point add/sub (XMM).
+defm : Zn4WriteResYMMPair<WriteFAddY, [Zn4FPFAdd01], 3, [1], 1>; // Floating point add/sub (YMM).
+defm : Zn4WriteResZMMPair<WriteFAddZ, [Zn4FPFAdd01], 3, [2], 1>; // Floating point add/sub (ZMM).
+defm : Zn4WriteResXMMPair<WriteFAdd64, [Zn4FPFAdd01], 3, [1], 1>; // Floating point double add/sub.
+defm : Zn4WriteResXMMPair<WriteFAdd64X, [Zn4FPFAdd01], 3, [1], 1>; // Floating point double add/sub (XMM).
+defm : Zn4WriteResYMMPair<WriteFAdd64Y, [Zn4FPFAdd01], 3, [1], 1>; // Floating point double add/sub (YMM).
+defm : Zn4WriteResZMMPair<WriteFAdd64Z, [Zn4FPFAdd01], 3, [2], 1>; // Floating point double add/sub (ZMM).
+defm : Zn4WriteResXMMPair<WriteFCmp, [Zn4FPFMul01], 2, [2], 1>; // Floating point compare.
+defm : Zn4WriteResXMMPair<WriteFCmpX, [Zn4FPFMul01], 1, [1], 1>; // Floating point compare (XMM).
+defm : Zn4WriteResYMMPair<WriteFCmpY, [Zn4FPFMul01], 2, [2], 1>; // Floating point compare (YMM).
+defm : Zn4WriteResZMMPair<WriteFCmpZ, [Zn4FPFMul01], 2, [4], 1>; // Floating point compare (ZMM).
+defm : Zn4WriteResXMMPair<WriteFCmp64, [Zn4FPFMul01], 1, [1], 1>; // Floating point double compare.
+defm : Zn4WriteResXMMPair<WriteFCmp64X, [Zn4FPFMul01], 1, [1], 1>; // Floating point double compare (XMM).
+defm : Zn4WriteResYMMPair<WriteFCmp64Y, [Zn4FPFMul01], 2, [2], 1>; // Floating point double compare (YMM).
+defm : Zn4WriteResZMMPair<WriteFCmp64Z, [Zn4FPFMul01], 2, [4], 1>; // Floating point double compare (ZMM).
+defm : Zn4WriteResXMMPair<WriteFCom, [Zn4FPFMul01], 3, [2], 1>; // FIXME: latency not from llvm-exegesis // Floating point compare to flags (X87).
+defm : Zn4WriteResXMMPair<WriteFComX, [Zn4FPFMul01], 4, [2], 2>; // FIXME: latency not from llvm-exegesis // Floating point compare to flags (SSE).
+defm : Zn4WriteResXMMPair<WriteFMul, [Zn4FPFMul01], 3, [1], 1>; // Floating point multiplication.
+defm : Zn4WriteResXMMPair<WriteFMulX, [Zn4FPFMul01], 3, [1], 1>; // Floating point multiplication (XMM).
+defm : Zn4WriteResYMMPair<WriteFMulY, [Zn4FPFMul01], 3, [1], 1>; // Floating point multiplication (YMM).
+defm : Zn4WriteResZMMPair<WriteFMulZ, [Zn4FPFMul01], 3, [2], 1>; // Floating point multiplication (ZMM).
+defm : Zn4WriteResXMMPair<WriteFMul64, [Zn4FPFMul01], 3, [1], 1>; // Floating point double multiplication.
+defm : Zn4WriteResXMMPair<WriteFMul64X, [Zn4FPFMul01], 3, [1], 1>; // Floating point double multiplication (XMM).
+defm : Zn4WriteResYMMPair<WriteFMul64Y, [Zn4FPFMul01], 3, [1], 1>; // Floating point double multiplication (YMM).
+defm : Zn4WriteResZMMPair<WriteFMul64Z, [Zn4FPFMul01], 3, [2], 1>; // Floating point double multiplication (ZMM).
+defm : Zn4WriteResXMMPair<WriteFDiv, [Zn4FPFDiv], 11, [3], 1>; // Floating point division.
+defm : Zn4WriteResXMMPair<WriteFDivX, [Zn4FPFDiv], 11, [3], 1>; // Floating point division (XMM).
+defm : Zn4WriteResYMMPair<WriteFDivY, [Zn4FPFDiv], 11, [3], 1>; // Floating point division (YMM).
+defm : Zn4WriteResZMMPair<WriteFDivZ, [Zn4FPFDiv], 11, [6], 1>; // Floating point division (ZMM).
+defm : Zn4WriteResXMMPair<WriteFDiv64, [Zn4FPFDiv], 13, [5], 1>; // Floating point double division.
+defm : Zn4WriteResXMMPair<WriteFDiv64X, [Zn4FPFDiv], 13, [5], 1>; // Floating point double division (XMM).
+defm : Zn4WriteResYMMPair<WriteFDiv64Y, [Zn4FPFDiv], 13, [5], 1>; // Floating point double division (YMM).
+defm : Zn4WriteResZMMPair<WriteFDiv64Z, [Zn4FPFDiv], 13, [10], 1>; // Floating point double division (ZMM).
+defm : Zn4WriteResXMMPair<WriteFSqrt, [Zn4FPFDiv], 15, [5], 1>; // Floating point square root.
+defm : Zn4WriteResXMMPair<WriteFSqrtX, [Zn4FPFDiv], 15, [5], 1>; // Floating point square root (XMM).
+defm : Zn4WriteResYMMPair<WriteFSqrtY, [Zn4FPFDiv], 15, [5], 1>; // Floating point square root (YMM).
+defm : Zn4WriteResZMMPair<WriteFSqrtZ, [Zn4FPFDiv], 15, [10], 1>; // Floating point square root (ZMM).
+defm : Zn4WriteResXMMPair<WriteFSqrt64, [Zn4FPFDiv], 21, [9], 1>; // Floating point double square root.
+defm : Zn4WriteResXMMPair<WriteFSqrt64X, [Zn4FPFDiv], 21, [9], 1>; // Floating point double square root (XMM).
+defm : Zn4WriteResYMMPair<WriteFSqrt64Y, [Zn4FPFDiv], 21, [9], 1>; // Floating point double square root (YMM).
+defm : Zn4WriteResZMMPair<WriteFSqrt64Z, [Zn4FPFDiv], 21, [18], 1>; // Floating point double square root (ZMM).
+defm : Zn4WriteResXMMPair<WriteFSqrt80, [Zn4FPFDiv], 22, [23], 1>; // FIXME: latency not from llvm-exegesis // Floating point long double square root.
+defm : Zn4WriteResXMMPair<WriteFRcp, [Zn4FPFMul01], 4, [1], 1>; // Floating point reciprocal estimate.
+defm : Zn4WriteResXMMPair<WriteFRcpX, [Zn4FPFMul01], 4, [1], 1>; // Floating point reciprocal estimate (XMM).
+defm : Zn4WriteResYMMPair<WriteFRcpY, [Zn4FPFMul01], 5, [1], 1>; // Floating point reciprocal estimate (YMM).
+defm : Zn4WriteResZMMPair<WriteFRcpZ, [Zn4FPFMul01], 5, [2], 1>; // Floating point reciprocal estimate (ZMM).
+defm : Zn4WriteResXMMPair<WriteFRsqrt, [Zn4FPFDiv], 4, [1], 1>; // Floating point reciprocal square root estimate.
+defm : Zn4WriteResXMMPair<WriteFRsqrtX, [Zn4FPFDiv], 4, [1], 1>; // Floating point reciprocal square root estimate (XMM).
+defm : Zn4WriteResYMMPair<WriteFRsqrtY, [Zn4FPFDiv], 4, [1], 1>; // Floating point reciprocal square root estimate (YMM).
+defm : Zn4WriteResZMMPair<WriteFRsqrtZ, [Zn4FPFDiv], 5, [2], 1>; // Floating point reciprocal square root estimate (ZMM).
+defm : Zn4WriteResXMMPair<WriteFMA, [Zn4FPFMul01], 4, [2], 1>; // Fused Multiply Add.
+defm : Zn4WriteResXMMPair<WriteFMAX, [Zn4FPFMul01], 4, [2], 1>; // Fused Multiply Add (XMM).
+defm : Zn4WriteResYMMPair<WriteFMAY, [Zn4FPFMul01], 4, [2], 1>; // Fused Multiply Add (YMM).
+defm : Zn4WriteResZMMPair<WriteFMAZ, [Zn4FPFMul01], 4, [4], 1>; // Fused Multiply Add (ZMM).
+defm : Zn4WriteResXMMPair<WriteDPPD, [Zn4FPFMul01], 7, [6], 3, /*LoadUOps=*/2>; // Floating point double dot product.
+defm : Zn4WriteResXMMPair<WriteDPPS, [Zn4FPFMul01], 11, [8], 8, /*LoadUOps=*/2>; // Floating point single dot product.
+defm : Zn4WriteResYMMPair<WriteDPPSY, [Zn4FPFMul01], 11, [8], 7, /*LoadUOps=*/1>; // Floating point single dot product (YMM).
+defm : Zn4WriteResXMMPair<WriteFSign, [Zn4FPFMul01], 1, [2], 1>; // FIXME: latency not from llvm-exegesis // Floating point fabs/fchs.
+defm : Zn4WriteResXMMPair<WriteFRnd, [Zn4FPFCvt01], 3, [1], 1>; // Floating point rounding.
+defm : Zn4WriteResYMMPair<WriteFRndY, [Zn4FPFCvt01], 3, [1], 1>; // Floating point rounding (YMM).
+defm : Zn4WriteResZMMPair<WriteFRndZ, [Zn4FPFCvt01], 3, [2], 1>; // Floating point rounding (ZMM).
+
+defm : Zn4WriteResXMMPair<WriteFLogic, [Zn4FPVMisc0123], 1, [1], 1>; // Floating point and/or/xor logicals.
+defm : Zn4WriteResYMMPair<WriteFLogicY, [Zn4FPVMisc0123], 1, [1], 1>; // Floating point and/or/xor logicals (YMM).
+defm : Zn4WriteResZMMPair<WriteFLogicZ, [Zn4FPVMisc0123], 1, [2], 1>; // Floating point and/or/xor logicals (ZMM).
+defm : Zn4WriteResXMMPair<WriteFTest, [Zn4FPFMisc12], 1, [2], 2>; // FIXME: latency not from llvm-exegesis // Floating point TEST instructions.
+defm : Zn4WriteResYMMPair<WriteFTestY, [Zn4FPFMisc12], 1, [2], 2>; // FIXME: latency not from llvm-exegesis // Floating point TEST instructions (YMM).
+defm : Zn4WriteResZMMPair<WriteFTestZ, [Zn4FPFMisc12], 1, [4], 1>; // FIXME: latency not from llvm-exegesis // Floating point TEST instructions (ZMM).
+defm : Zn4WriteResXMMPair<WriteFShuffle, [Zn4FPVShuf01], 1, [1], 1>; // Floating point vector shuffles.
+defm : Zn4WriteResYMMPair<WriteFShuffleY, [Zn4FPVShuf01], 1, [1], 1>; // Floating point vector shuffles (YMM).
+defm : Zn4WriteResZMMPair<WriteFShuffleZ, [Zn4FPVShuf01], 1, [2], 1>; // Floating point vector shuffles (ZMM).
+defm : Zn4WriteResXMMPair<WriteFVarShuffle, [Zn4FPVShuf01], 3, [1], 1>; // Floating point vector variable shuffles.
+defm : Zn4WriteResYMMPair<WriteFVarShuffleY, [Zn4FPVShuf01], 3, [1], 1>; // Floating point vector variable shuffles (YMM).
+defm : Zn4WriteResZMMPair<WriteFVarShuffleZ, [Zn4FPVShuf01], 3, [2], 1>; // Floating point vector variable shuffles (ZMM).
+defm : Zn4WriteResXMMPair<WriteFBlend, [Zn4FPFMul01], 1, [1], 1>; // Floating point vector blends.
+defm : Zn4WriteResYMMPair<WriteFBlendY, [Zn4FPFMul01], 1, [1], 1>; // Floating point vector blends (YMM).
+defm : Zn4WriteResZMMPair<WriteFBlendZ, [Zn4FPFMul01], 1, [2], 1>; // Floating point vector blends (ZMM).
+defm : Zn4WriteResXMMPair<WriteFVarBlend, [Zn4FPFMul01], 1, [1], 1>; // Fp vector variable blends.
+defm : Zn4WriteResYMMPair<WriteFVarBlendY, [Zn4FPFMul01], 1, [1], 1>; // Fp vector variable blends (YMM).
+defm : Zn4WriteResZMMPair<WriteFVarBlendZ, [Zn4FPFMul01], 1, [2], 1>; // Fp vector variable blends (ZMM).
+
+// Horizontal Add/Sub (float and integer)
+defm : Zn4WriteResXMMPair<WriteFHAdd, [Zn4FPFAdd0], 4, [2], 3>;
+defm : Zn4WriteResYMMPair<WriteFHAddY, [Zn4FPFAdd0], 4, [2], 3, /*LoadUOps=*/1>;
+defm : Zn4WriteResZMMPair<WriteFHAddZ, [Zn4FPFAdd0], 6, [4], 3, /*LoadUOps=*/1>;
+defm : Zn4WriteResXMMPair<WritePHAdd, [Zn4FPVAdd0], 2, [2], 3, /*LoadUOps=*/1>;
+defm : Zn4WriteResXMMPair<WritePHAddX, [Zn4FPVAdd0], 2, [2], 3>;
+defm : Zn4WriteResYMMPair<WritePHAddY, [Zn4FPVAdd0], 3, [3], 3, /*LoadUOps=*/1>;
+defm : Zn4WriteResZMMPair<WritePHAddZ, [Zn4FPVAdd0], 2, [4], 3, /*LoadUOps=*/1>;
+
+// Vector integer operations.
+defm : Zn4WriteResXMM<WriteVecLoad, [Zn4FPLd01, Zn4Load], !add(Znver4Model.VecLoadLatency, 1), [1, 1], 1>;
+defm : Zn4WriteResXMM<WriteVecLoadX, [Zn4FPLd01, Zn4Load], !add(Znver4Model.VecLoadLatency, 1), [1, 1], 1>;
+defm : Zn4WriteResYMM<WriteVecLoadY, [Zn4FPLd01, Zn4Load], !add(Znver4Model.VecLoadLatency, 1), [1, 1], 1>;
+defm : Zn4WriteResXMM<WriteVecLoadNT, [Zn4FPLd01, Zn4Load], !add(Znver4Model.VecLoadLatency, 1), [1, 1], 1>;
+defm : Zn4WriteResYMM<WriteVecLoadNTY, [Zn4FPLd01, Zn4Load], !add(Znver4Model.VecLoadLatency, 1), [1, 1], 1>;
+defm : Zn4WriteResXMM<WriteVecMaskedLoad, [Zn4FPLd01, Zn4Load], !add(Znver4Model.VecLoadLatency, 1), [1, 1], 1>;
+defm : Zn4WriteResYMM<WriteVecMaskedLoadY, [Zn4FPLd01, Zn4Load], !add(Znver4Model.VecLoadLatency, 1), [1, 1], 1>;
+defm : Zn4WriteResXMM<WriteVecStore, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [1, 1], 1>;
+defm : Zn4WriteResXMM<WriteVecStoreX, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [1, 1], 1>;
+
+def Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr : SchedWriteRes<[Zn4FPFMisc0]> {
+ let Latency = 4;
+ let ResourceCycles = [1];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr], (instrs VEXTRACTF128rr, VEXTRACTI128rr)>;
+
+def Zn4WriteVEXTRACTI128mr : SchedWriteRes<[Zn4FPFMisc0, Zn4FPSt, Zn4Store]> {
+ let Latency = !add(Znver4Model.LoadLatency, Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr.Latency);
+ let ResourceCycles = [1, 1, 1];
+ let NumMicroOps = !add(Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr.NumMicroOps, 1);
+}
+def : InstRW<[Zn4WriteVEXTRACTI128mr], (instrs VEXTRACTI128mr, VEXTRACTF128mr)>;
+
+def Zn4WriteVINSERTF128rmr : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4FPFMisc0]> {
+ let Latency = !add(Znver4Model.LoadLatency, Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr.Latency);
+ let ResourceCycles = [1, 1, 1];
+ let NumMicroOps = !add(Zn4WriteVEXTRACTF128rr_VEXTRACTI128rr.NumMicroOps, 0);
+}
+def : InstRW<[Zn4WriteVINSERTF128rmr], (instrs VINSERTF128rm)>;
+
+defm : Zn4WriteResYMM<WriteVecStoreY, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [1, 1], 1>;
+defm : Zn4WriteResXMM<WriteVecStoreNT, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [1, 1], 1>;
+defm : Zn4WriteResYMM<WriteVecStoreNTY, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [1, 1], 1>;
+defm : Zn4WriteResXMM<WriteVecMaskedStore32, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [6, 1], 18>;
+defm : Zn4WriteResXMM<WriteVecMaskedStore64, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [4, 1], 10>;
+defm : Zn4WriteResYMM<WriteVecMaskedStore32Y, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [12, 1], 42>;
+defm : Zn4WriteResYMM<WriteVecMaskedStore64Y, [Zn4FPSt, Zn4Store], Znver4Model.StoreLatency, [6, 1], 18>;
+
+defm : Zn4WriteResXMM<WriteVecMoveToGpr, [Zn4FPLd01], 1, [2], 1>;
+defm : Zn4WriteResXMM<WriteVecMoveFromGpr, [Zn4FPLd01], 1, [2], 1>;
+
+def Zn4WriteMOVMMX : SchedWriteRes<[Zn4FPLd01, Zn4FPFMisc0123]> {
+ let Latency = 1;
+ let ResourceCycles = [1, 2];
+ let NumMicroOps = 2;
+}
+def : InstRW<[Zn4WriteMOVMMX], (instrs MMX_MOVQ2FR64rr, MMX_MOVQ2DQrr)>;
+
+def Zn4WriteMOVMMXSlow : SchedWriteRes<[Zn4FPLd01, Zn4FPFMisc0123]> {
+ let Latency = 1;
+ let ResourceCycles = [1, 4];
+ let NumMicroOps = 2;
+}
+def : InstRW<[Zn4WriteMOVMMXSlow], (instrs MMX_MOVD64rr, MMX_MOVD64to64rr)>;
+
+defm : Zn4WriteResXMMPair<WriteVecALU, [Zn4FPVAdd0123], 1, [1], 1>; // Vector integer ALU op, no logicals.
+
+def Zn4WriteEXTRQ_INSERTQ : SchedWriteRes<[Zn4FPVShuf01, Zn4FPLd01]> {
+ let Latency = 3;
+ let ResourceCycles = [1, 1];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteEXTRQ_INSERTQ], (instrs EXTRQ, INSERTQ)>;
+
+def Zn4WriteEXTRQI_INSERTQI : SchedWriteRes<[Zn4FPVShuf01, Zn4FPLd01]> {
+ let Latency = 3;
+ let ResourceCycles = [1, 1];
+ let NumMicroOps = 2;
+}
+def : InstRW<[Zn4WriteEXTRQI_INSERTQI], (instrs EXTRQI, INSERTQI)>;
+
+defm : Zn4WriteResXMMPair<WriteVecALUX, [Zn4FPVAdd0123], 1, [1], 1>; // Vector integer ALU op, no logicals (XMM).
+
+def Zn4WriteVecALUXSlow : SchedWriteRes<[Zn4FPVAdd01]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteVecALUXSlow], (instrs PABSBrr, PABSDrr, PABSWrr,
+ PADDSBrr, PADDSWrr, PADDUSBrr, PADDUSWrr,
+ PAVGBrr, PAVGWrr,
+ PSIGNBrr, PSIGNDrr, PSIGNWrr,
+ VPABSBrr, VPABSDrr, VPABSWrr,
+ VPADDSBrr, VPADDSWrr, VPADDUSBrr, VPADDUSWrr,
+ VPAVGBrr, VPAVGWrr,
+ VPCMPEQQrr,
+ VPSIGNBrr, VPSIGNDrr, VPSIGNWrr,
+ PSUBSBrr, PSUBSWrr, PSUBUSBrr, PSUBUSWrr, VPSUBSBrr, VPSUBSWrr, VPSUBUSBrr, VPSUBUSWrr)>;
+
+def Zn4WriteVecOpMask : SchedWriteRes<[Zn4FPOpMask01]> {
+ let Latency = 1;
+ let ResourceCycles = [1];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteVecOpMask], (instrs KADDBrr, KADDDrr, KADDQrr, KADDWrr,
+ KANDBrr, KANDDrr, KANDQrr, KANDWrr,
+ KANDNBrr, KANDNDrr, KANDNQrr, KANDNWrr,
+ KMOVBkk, KMOVDkk, KMOVQkk, KMOVWkk,
+ KMOVBrk, KMOVDrk, KMOVQrk, KMOVWrk,
+ KNOTBrr, KNOTDrr, KNOTQrr, KNOTWrr,
+ KORBrr, KORDrr, KORQrr, KORWrr,
+ KORTESTBrr, KORTESTDrr, KORTESTQrr, KORTESTWrr,
+ KTESTBrr, KTESTDrr, KTESTQrr, KTESTWrr,
+ KUNPCKBWrr, KUNPCKDQrr, KUNPCKWDrr,
+ KXNORBrr, KXNORDrr, KXNORQrr, KXNORWrr,
+ KXORBrr, KXORDrr, KXORQrr, KXORWrr)>;
+
+def Zn4WriteVecOpMaskMemMov : SchedWriteRes<[Zn4FPOpMask4]> {
+ let Latency = 1;
+ let ResourceCycles = [1];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteVecOpMaskMemMov], (instrs KMOVBmk, KMOVDmk, KMOVQmk, KMOVWmk)>;
+
+def Zn4WriteVecOpMaskKRMov : SchedWriteRes<[Zn4FPOpMask4]> {
+ let Latency = 1;
+ let ResourceCycles = [1];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteVecOpMaskKRMov], (instrs KMOVBkr, KMOVDkr, KMOVQkr, KMOVWkr)>;
+
+def Zn4WriteVecALU2Slow : SchedWriteRes<[Zn4FPVAdd12]> {
+ // TODO: All align instructions are expected to be of 4 cycle latency
+ let Latency = 4;
+ let ResourceCycles = [1];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteVecALU2Slow], (instrs VALIGNDZrri, VALIGNDZ128rri, VALIGNDZ256rri,
+ VALIGNQZrri, VALIGNQZ128rri, VALIGNQZ256rri)
+ >;
+defm : Zn4WriteResYMMPair<WriteVecALUY, [Zn4FPVAdd0123], 1, [1], 1>; // Vector integer ALU op, no logicals (YMM).
+
+def Zn4WriteVecALUYSlow : SchedWriteRes<[Zn4FPVAdd01]> {
+ let Latency = 1;
+ let ResourceCycles = [1];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteVecALUYSlow], (instrs VPABSBYrr, VPABSDYrr, VPABSWYrr,
+ VPADDSBYrr, VPADDSWYrr, VPADDUSBYrr, VPADDUSWYrr,
+ VPSUBSBYrr, VPSUBSWYrr, VPSUBUSBYrr, VPSUBUSWYrr,
+ VPAVGBYrr, VPAVGWYrr,
+ VPCMPEQQYrr,
+ VPSIGNBYrr, VPSIGNDYrr, VPSIGNWYrr)>;
+
+defm : Zn4WriteResZMMPair<WriteVecALUZ, [Zn4FPVAdd0123], 1, [2], 1>; // Vector integer ALU op, no logicals (ZMM).
+
+defm : Zn4WriteResXMMPair<WriteVecLogic, [Zn4FPVMisc0123], 1, [1], 1>; // Vector integer and/or/xor logicals.
+defm : Zn4WriteResXMMPair<WriteVecLogicX, [Zn4FPVMisc0123], 1, [1], 1>; // Vector integer and/or/xor logicals (XMM).
+defm : Zn4WriteResYMMPair<WriteVecLogicY, [Zn4FPVMisc0123], 1, [1], 1>; // Vector integer and/or/xor logicals (YMM).
+defm : Zn4WriteResZMMPair<WriteVecLogicZ, [Zn4FPVMisc0123], 1, [2], 1>; // Vector integer and/or/xor logicals (ZMM).
+defm : Zn4WriteResXMMPair<WriteVecTest, [Zn4FPVAdd12, Zn4FPSt], 1, [1, 1], 2>; // FIXME: latency not from llvm-exegesis // Vector integer TEST instructions.
+defm : Zn4WriteResYMMPair<WriteVecTestY, [Zn4FPVAdd12, Zn4FPSt], 1, [1, 1], 2>; // FIXME: latency not from llvm-exegesis // Vector integer TEST instructions (YMM).
+defm : Zn4WriteResZMMPair<WriteVecTestZ, [Zn4FPVAdd12, Zn4FPSt], 1, [2, 2], 2>; // FIXME: latency not from llvm-exegesis // Vector integer TEST instructions (ZMM).
+defm : Zn4WriteResXMMPair<WriteVecShift, [Zn4FPVShift01], 1, [1], 1>; // Vector integer shifts (default).
+defm : Zn4WriteResXMMPair<WriteVecShiftX, [Zn4FPVShift01], 2, [2], 1>; // Vector integer shifts (XMM).
+defm : Zn4WriteResYMMPair<WriteVecShiftY, [Zn4FPVShift01], 1, [1], 1>; // Vector integer shifts (YMM).
+defm : Zn4WriteResZMMPair<WriteVecShiftZ, [Zn4FPVShift01], 1, [2], 1>; // Vector integer shifts (ZMM).
+defm : Zn4WriteResXMMPair<WriteVecShiftImm, [Zn4FPVShift01], 1, [1], 1>; // Vector integer immediate shifts (default).
+defm : Zn4WriteResXMMPair<WriteVecShiftImmX, [Zn4FPVShift01], 1, [1], 1>; // Vector integer immediate shifts (XMM).
+defm : Zn4WriteResYMMPair<WriteVecShiftImmY, [Zn4FPVShift01], 1, [1], 1>; // Vector integer immediate shifts (YMM).
+defm : Zn4WriteResZMMPair<WriteVecShiftImmZ, [Zn4FPVShift01], 1, [2], 1>; // Vector integer immediate shifts (ZMM).
+defm : Zn4WriteResXMMPair<WriteVecIMul, [Zn4FPVMul01], 3, [1], 1>; // Vector integer multiply (default).
+defm : Zn4WriteResXMMPair<WriteVecIMulX, [Zn4FPVMul01], 3, [1], 1>; // Vector integer multiply (XMM).
+defm : Zn4WriteResYMMPair<WriteVecIMulY, [Zn4FPVMul01], 3, [1], 1>; // Vector integer multiply (YMM).
+defm : Zn4WriteResZMMPair<WriteVecIMulZ, [Zn4FPVMul01], 3, [2], 1>; // Vector integer multiply (ZMM).
+defm : Zn4WriteResXMMPair<WritePMULLD, [Zn4FPVMul01], 3, [1], 1>; // Vector PMULLD.
+defm : Zn4WriteResYMMPair<WritePMULLDY, [Zn4FPVMul01], 3, [1], 1>; // Vector PMULLD (YMM).
+defm : Zn4WriteResZMMPair<WritePMULLDZ, [Zn4FPVMul01], 3, [2], 1>; // Vector PMULLD (ZMM).
+defm : Zn4WriteResXMMPair<WriteShuffle, [Zn4FPVShuf01], 1, [1], 1>; // Vector shuffles.
+defm : Zn4WriteResXMMPair<WriteShuffleX, [Zn4FPVShuf01], 1, [1], 1>; // Vector shuffles (XMM).
+defm : Zn4WriteResYMMPair<WriteShuffleY, [Zn4FPVShuf01], 1, [1], 1>; // Vector shuffles (YMM).
+defm : Zn4WriteResZMMPair<WriteShuffleZ, [Zn4FPVShuf01], 1, [2], 1>; // Vector shuffles (ZMM).
+defm : Zn4WriteResXMMPair<WriteVarShuffle, [Zn4FPVShuf01], 1, [1], 1>; // Vector variable shuffles.
+defm : Zn4WriteResXMMPair<WriteVarShuffleX, [Zn4FPVShuf01], 1, [1], 1>; // Vector variable shuffles (XMM).
+defm : Zn4WriteResYMMPair<WriteVarShuffleY, [Zn4FPVShuf01], 1, [1], 1>; // Vector variable shuffles (YMM).
+defm : Zn4WriteResZMMPair<WriteVarShuffleZ, [Zn4FPVShuf01], 1, [2], 1>; // Vector variable shuffles (ZMM).
+defm : Zn4WriteResXMMPair<WriteBlend, [Zn4FPVMisc0123], 1, [1], 1>; // Vector blends.
+defm : Zn4WriteResYMMPair<WriteBlendY, [Zn4FPVMisc0123], 1, [1], 1>; // Vector blends (YMM).
+defm : Zn4WriteResZMMPair<WriteBlendZ, [Zn4FPVMisc0123], 1, [2], 1>; // Vector blends (ZMM).
+defm : Zn4WriteResXMMPair<WriteVarBlend, [Zn4FPVMul01], 1, [1], 1>; // Vector variable blends.
+defm : Zn4WriteResYMMPair<WriteVarBlendY, [Zn4FPVMul01], 1, [1], 1>; // Vector variable blends (YMM).
+defm : Zn4WriteResZMMPair<WriteVarBlendZ, [Zn4FPVMul01], 1, [2], 1>; // Vector variable blends (ZMM).
+defm : Zn4WriteResXMMPair<WritePSADBW, [Zn4FPVAdd0123], 3, [2], 1>; // Vector PSADBW.
+defm : Zn4WriteResXMMPair<WritePSADBWX, [Zn4FPVAdd0123], 3, [2], 1>; // Vector PSADBW (XMM).
+defm : Zn4WriteResYMMPair<WritePSADBWY, [Zn4FPVAdd0123], 3, [2], 1>; // Vector PSADBW (YMM).
+defm : Zn4WriteResZMMPair<WritePSADBWZ, [Zn4FPVAdd0123], 4, [4], 1>; // Vector PSADBW (ZMM).
+defm : Zn4WriteResXMMPair<WriteMPSAD, [Zn4FPVAdd0123], 4, [8], 4, /*LoadUOps=*/2>; // Vector MPSAD.
+defm : Zn4WriteResYMMPair<WriteMPSADY, [Zn4FPVAdd0123], 4, [8], 3, /*LoadUOps=*/1>; // Vector MPSAD (YMM).
+defm : Zn4WriteResZMMPair<WriteMPSADZ, [Zn4FPVAdd0123], 4, [16], 3, /*LoadUOps=*/1>; // Vector MPSAD (ZMM).
+defm : Zn4WriteResXMMPair<WritePHMINPOS, [Zn4FPVAdd01], 3, [1], 1>; // Vector PHMINPOS.
+
+// Vector insert/extract operations.
+defm : Zn4WriteResXMMPair<WriteVecInsert, [Zn4FPLd01], 1, [2], 2, /*LoadUOps=*/-1>; // Insert gpr to vector element.
+defm : Zn4WriteResXMM<WriteVecExtract, [Zn4FPLd01], 1, [2], 2>; // Extract vector element to gpr.
+defm : Zn4WriteResXMM<WriteVecExtractSt, [Zn4FPSt, Zn4Store], !add(1, Znver4Model.StoreLatency), [1, 1], 2>; // Extract vector element and store.
+
+// MOVMSK operations.
+defm : Zn4WriteResXMM<WriteFMOVMSK, [Zn4FPVMisc2], 1, [1], 1>;
+defm : Zn4WriteResXMM<WriteVecMOVMSK, [Zn4FPVMisc2], 1, [1], 1>;
+defm : Zn4WriteResYMM<WriteVecMOVMSKY, [Zn4FPVMisc2], 1, [1], 1>;
+defm : Zn4WriteResXMM<WriteMMXMOVMSK, [Zn4FPVMisc2], 1, [1], 1>;
+
+// Conversion between integer and float.
+defm : Zn4WriteResXMMPair<WriteCvtSD2I, [Zn4FPFCvt01], 1, [1], 1>; // Double -> Integer.
+defm : Zn4WriteResXMMPair<WriteCvtPD2I, [Zn4FPFCvt01], 3, [2], 1>; // Double -> Integer (XMM).
+defm : Zn4WriteResYMMPair<WriteCvtPD2IY, [Zn4FPFCvt01], 3, [2], 2>; // Double -> Integer (YMM).
+defm : Zn4WriteResZMMPair<WriteCvtPD2IZ, [Zn4FPFCvt01], 3, [4], 2>; // Double -> Integer (ZMM).
+
+def Zn4WriteCvtPD2IMMX : SchedWriteRes<[Zn4FPFCvt01]> {
+ let Latency = 1;
+ let ResourceCycles = [2];
+ let NumMicroOps = 2;
+}
+defm : Zn4WriteResXMMPair<WriteCvtSS2I, [Zn4FPFCvt01], 5, [5], 2>; // Float -> Integer.
+
+defm : Zn4WriteResXMMPair<WriteCvtPS2I, [Zn4FPFCvt01], 3, [1], 1>; // Float -> Integer (XMM).
+defm : Zn4WriteResYMMPair<WriteCvtPS2IY, [Zn4FPFCvt01], 4, [1], 1>; // Float -> Integer (YMM).
+defm : Zn4WriteResZMMPair<WriteCvtPS2IZ, [Zn4FPFCvt01], 4, [2], 2>; // Float -> Integer (ZMM).
+
+defm : Zn4WriteResXMMPair<WriteCvtI2SD, [Zn4FPFCvt01], 4, [2], 2, /*LoadUOps=*/-1>; // Integer -> Double.
+defm : Zn4WriteResXMMPair<WriteCvtI2PD, [Zn4FPFCvt01], 3, [1], 1>; // Integer -> Double (XMM).
+defm : Zn4WriteResYMMPair<WriteCvtI2PDY, [Zn4FPFCvt01], 3, [2], 2, /*LoadUOps=*/-1>; // Integer -> Double (YMM).
+defm : Zn4WriteResZMMPair<WriteCvtI2PDZ, [Zn4FPFCvt01], 4, [4], 4, /*LoadUOps=*/-1>; // Integer -> Double (ZMM).
+
+def Zn4WriteCvtI2PDMMX : SchedWriteRes<[Zn4FPFCvt01]> {
+ let Latency = 2;
+ let ResourceCycles = [6];
+ let NumMicroOps = 2;
+}
+
+defm : Zn4WriteResXMMPair<WriteCvtI2SS, [Zn4FPFCvt01], 3, [2], 2, /*LoadUOps=*/-1>; // Integer -> Float.
+defm : Zn4WriteResXMMPair<WriteCvtI2PS, [Zn4FPFCvt01], 3, [1], 1>; // Integer -> Float (XMM).
+defm : Zn4WriteResYMMPair<WriteCvtI2PSY, [Zn4FPFCvt01], 3, [1], 1>; // Integer -> Float (YMM).
+defm : Zn4WriteResZMMPair<WriteCvtI2PSZ, [Zn4FPFCvt01], 3, [2], 2>; // Integer -> Float (ZMM).
+
+def Zn4WriteCvtI2PSMMX : SchedWriteRes<[Zn4FPFCvt01]> {
+ let Latency = 3;
+ let ResourceCycles = [1];
+ let NumMicroOps = 2;
+}
+
+defm : Zn4WriteResXMMPair<WriteCvtSS2SD, [Zn4FPFCvt01], 3, [1], 1>; // Float -> Double size conversion.
+defm : Zn4WriteResXMMPair<WriteCvtPS2PD, [Zn4FPFCvt01], 3, [1], 1>; // Float -> Double size conversion (XMM).
+defm : Zn4WriteResYMMPair<WriteCvtPS2PDY, [Zn4FPFCvt01], 4, [2], 2, /*LoadUOps=*/-1>; // Float -> Double size conversion (YMM).
+defm : Zn4WriteResZMMPair<WriteCvtPS2PDZ, [Zn4FPFCvt01], 6, [4], 4, /*LoadUOps=*/-1>; // Float -> Double size conversion (ZMM).
+
+defm : Zn4WriteResXMMPair<WriteCvtSD2SS, [Zn4FPFCvt01], 3, [1], 1>; // Double -> Float size conversion.
+defm : Zn4WriteResXMMPair<WriteCvtPD2PS, [Zn4FPFCvt01], 3, [1], 1>; // Double -> Float size conversion (XMM).
+defm : Zn4WriteResYMMPair<WriteCvtPD2PSY, [Zn4FPFCvt01], 6, [2], 2>; // Double -> Float size conversion (YMM).
+defm : Zn4WriteResZMMPair<WriteCvtPD2PSZ, [Zn4FPFCvt01], 6, [4], 4>; // Double -> Float size conversion (ZMM).
+
+defm : Zn4WriteResXMMPair<WriteCvtPH2PS, [Zn4FPFCvt01], 3, [1], 1>; // Half -> Float size conversion.
+defm : Zn4WriteResYMMPair<WriteCvtPH2PSY, [Zn4FPFCvt01], 4, [2], 2, /*LoadUOps=*/-1>; // Half -> Float size conversion (YMM).
+defm : Zn4WriteResZMMPair<WriteCvtPH2PSZ, [Zn4FPFCvt01], 4, [4], 4, /*LoadUOps=*/-1>; // Half -> Float size conversion (ZMM).
+
+defm : Zn4WriteResXMM<WriteCvtPS2PH, [Zn4FPFCvt01], 3, [2], 1>; // Float -> Half size conversion.
+defm : Zn4WriteResYMM<WriteCvtPS2PHY, [Zn4FPFCvt01], 6, [2], 2>; // Float -> Half size conversion (YMM).
+defm : Zn4WriteResZMM<WriteCvtPS2PHZ, [Zn4FPFCvt01], 6, [2], 2>; // Float -> Half size conversion (ZMM).
+
+defm : Zn4WriteResXMM<WriteCvtPS2PHSt, [Zn4FPFCvt01, Zn4FPSt, Zn4Store], !add(3, Znver4Model.StoreLatency), [1, 1, 1], 2>; // Float -> Half + store size conversion.
+defm : Zn4WriteResYMM<WriteCvtPS2PHYSt, [Zn4FPFCvt01, Zn4FPSt, Zn4Store], !add(6, Znver4Model.StoreLatency), [2, 1, 1], 3>; // Float -> Half + store size conversion (YMM).
+defm : Zn4WriteResYMM<WriteCvtPS2PHZSt, [Zn4FPFCvt01, Zn4FPSt, Zn4Store], !add(6, Znver4Model.StoreLatency), [2, 1, 1], 3>; // Float -> Half + store size conversion (ZMM).
+
+// CRC32 instruction.
+defm : Zn4WriteResIntPair<WriteCRC32, [Zn4ALU1], 3, [1], 1>;
+
+def Zn4WriteSHA1MSG1rr : SchedWriteRes<[Zn4FPU0123]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+ let NumMicroOps = 2;
+}
+def : InstRW<[Zn4WriteSHA1MSG1rr], (instrs SHA1MSG1rr)>;
+
+def Zn4WriteSHA1MSG1rm : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4FPU0123]> {
+ let Latency = !add(Znver4Model.LoadLatency, Zn4WriteSHA1MSG1rr.Latency);
+ let ResourceCycles = [1, 1, 2];
+ let NumMicroOps = !add(Zn4WriteSHA1MSG1rr.NumMicroOps, 0);
+}
+def : InstRW<[Zn4WriteSHA1MSG1rm], (instrs SHA1MSG1rm)>;
+
+def Zn4WriteSHA1MSG2rr_SHA1NEXTErr : SchedWriteRes<[Zn4FPU0123]> {
+ let Latency = 1;
+ let ResourceCycles = [2];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteSHA1MSG2rr_SHA1NEXTErr], (instrs SHA1MSG2rr, SHA1NEXTErr)>;
+
+def Zn4Writerm_SHA1MSG2rm_SHA1NEXTErm : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4FPU0123]> {
+ let Latency = !add(Znver4Model.LoadLatency, Zn4WriteSHA1MSG2rr_SHA1NEXTErr.Latency);
+ let ResourceCycles = [1, 1, 2];
+ let NumMicroOps = !add(Zn4WriteSHA1MSG2rr_SHA1NEXTErr.NumMicroOps, 0);
+}
+def : InstRW<[Zn4Writerm_SHA1MSG2rm_SHA1NEXTErm], (instrs SHA1MSG2rm, SHA1NEXTErm)>;
+
+def Zn4WriteSHA256MSG1rr : SchedWriteRes<[Zn4FPU0123]> {
+ let Latency = 2;
+ let ResourceCycles = [3];
+ let NumMicroOps = 2;
+}
+def : InstRW<[Zn4WriteSHA256MSG1rr], (instrs SHA256MSG1rr)>;
+
+def Zn4Writerm_SHA256MSG1rm : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4FPU0123]> {
+ let Latency = !add(Znver4Model.LoadLatency, Zn4WriteSHA256MSG1rr.Latency);
+ let ResourceCycles = [1, 1, 3];
+ let NumMicroOps = !add(Zn4WriteSHA256MSG1rr.NumMicroOps, 0);
+}
+def : InstRW<[Zn4Writerm_SHA256MSG1rm], (instrs SHA256MSG1rm)>;
+
+def Zn4WriteSHA256MSG2rr : SchedWriteRes<[Zn4FPU0123]> {
+ let Latency = 3;
+ let ResourceCycles = [8];
+ let NumMicroOps = 4;
+}
+def : InstRW<[Zn4WriteSHA256MSG2rr], (instrs SHA256MSG2rr)>;
+
+def Zn4WriteSHA256MSG2rm : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4FPU0123]> {
+ let Latency = !add(Znver4Model.LoadLatency, Zn4WriteSHA256MSG2rr.Latency);
+ let ResourceCycles = [1, 1, 8];
+ let NumMicroOps = !add(Zn4WriteSHA256MSG2rr.NumMicroOps, 1);
+}
+def : InstRW<[Zn4WriteSHA256MSG2rm], (instrs SHA256MSG2rm)>;
+
+def Zn4WriteSHA1RNDS4rri : SchedWriteRes<[Zn4FPU0123]> {
+ let Latency = 6;
+ let ResourceCycles = [8];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteSHA1RNDS4rri], (instrs SHA1RNDS4rri)>;
+
+def Zn4WriteSHA256RNDS2rr : SchedWriteRes<[Zn4FPU0123]> {
+ let Latency = 4;
+ let ResourceCycles = [8];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteSHA256RNDS2rr], (instrs SHA256RNDS2rr)>;
+
+// Strings instructions.
+// Packed Compare Implicit Length Strings, Return Mask
+defm : Zn4WriteResXMMPair<WritePCmpIStrM, [Zn4FPVAdd0123], 6, [8], 3, /*LoadUOps=*/1>;
+// Packed Compare Explicit Length Strings, Return Mask
+defm : Zn4WriteResXMMPair<WritePCmpEStrM, [Zn4FPVAdd0123], 6, [12], 7, /*LoadUOps=*/5>;
+// Packed Compare Implicit Length Strings, Return Index
+defm : Zn4WriteResXMMPair<WritePCmpIStrI, [Zn4FPVAdd0123], 2, [8], 4>;
+// Packed Compare Explicit Length Strings, Return Index
+defm : Zn4WriteResXMMPair<WritePCmpEStrI, [Zn4FPVAdd0123], 6, [12], 8, /*LoadUOps=*/4>;
+
+// AES instructions.
+defm : Zn4WriteResXMMPair<WriteAESDecEnc, [Zn4FPAES01], 4, [1], 1>; // Decryption, encryption.
+defm : Zn4WriteResXMMPair<WriteAESIMC, [Zn4FPAES01], 4, [1], 1>; // InvMixColumn.
+defm : Zn4WriteResXMMPair<WriteAESKeyGen, [Zn4FPAES01], 4, [1], 1>; // Key Generation.
+
+// Carry-less multiplication instructions.
+defm : Zn4WriteResXMMPair<WriteCLMul, [Zn4FPCLM01], 4, [4], 4>;
+
+// EMMS/FEMMS
+defm : Zn4WriteResInt<WriteEMMS, [Zn4ALU0123], 2, [1], 1>; // FIXME: latency not from llvm-exegesis
+
+// Load/store MXCSR
+defm : Zn4WriteResInt<WriteLDMXCSR, [Zn4AGU012, Zn4Load, Zn4ALU0123], !add(Znver4Model.LoadLatency, 1), [1, 1, 6], 1>; // FIXME: latency not from llvm-exegesis
+defm : Zn4WriteResInt<WriteSTMXCSR, [Zn4ALU0123, Zn4AGU012, Zn4Store], !add(1, Znver4Model.StoreLatency), [60, 1, 1], 2>; // FIXME: latency not from llvm-exegesis
+
+// Catch-all for expensive system instructions.
+defm : Zn4WriteResInt<WriteSystem, [Zn4ALU0123], 100, [100], 100>;
+
+def Zn4WriteVZEROUPPER : SchedWriteRes<[Zn4FPU0123]> {
+ let Latency = 0; // FIXME: not from llvm-exegesis
+ let ResourceCycles = [1];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteVZEROUPPER], (instrs VZEROUPPER)>;
+
+def Zn4WriteVZEROALL : SchedWriteRes<[Zn4FPU0123]> {
+ let Latency = 10; // FIXME: not from llvm-exegesis
+ let ResourceCycles = [24];
+ let NumMicroOps = 18;
+}
+def : InstRW<[Zn4WriteVZEROALL], (instrs VZEROALL)>;
+
+// AVX2.
+defm : Zn4WriteResYMMPair<WriteFShuffle256, [Zn4FPVShuf], 2, [1], 1, /*LoadUOps=*/2>; // Fp 256-bit width vector shuffles.
+defm : Zn4WriteResYMMPair<WriteFVarShuffle256, [Zn4FPVShuf], 7, [1], 2, /*LoadUOps=*/1>; // Fp 256-bit width variable shuffles.
+defm : Zn4WriteResYMMPair<WriteShuffle256, [Zn4FPVShuf], 1, [1], 1>; // 256-bit width vector shuffles.
+
+def Zn4WriteVPERM2I128rr_VPERM2F128rr : SchedWriteRes<[Zn4FPVShuf]> {
+ let Latency = 3;
+ let ResourceCycles = [1];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteVPERM2I128rr_VPERM2F128rr], (instrs VPERM2I128rr, VPERM2F128rr)>;
+
+def Zn4WriteVPERM2F128rm : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4FPVShuf]> {
+ let Latency = !add(Znver4Model.LoadLatency, Zn4WriteVPERM2I128rr_VPERM2F128rr.Latency);
+ let ResourceCycles = [1, 1, 1];
+ let NumMicroOps = !add(Zn4WriteVPERM2I128rr_VPERM2F128rr.NumMicroOps, 0);
+}
+def : InstRW<[Zn4WriteVPERM2F128rm], (instrs VPERM2F128rm)>;
+
+def Zn4WriteVPERMPSYrr : SchedWriteRes<[Zn4FPVShuf]> {
+ let Latency = 7;
+ let ResourceCycles = [1];
+ let NumMicroOps = 2;
+}
+def : InstRW<[Zn4WriteVPERMPSYrr], (instrs VPERMPSYrr)>;
+
+def Zn4WriteVPERMPSYrm : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4FPVShuf]> {
+ let Latency = !add(Znver4Model.LoadLatency, Zn4WriteVPERMPSYrr.Latency);
+ let ResourceCycles = [1, 1, 2];
+ let NumMicroOps = !add(Zn4WriteVPERMPSYrr.NumMicroOps, 1);
+}
+def : InstRW<[Zn4WriteVPERMPSYrm], (instrs VPERMPSYrm)>;
+
+def Zn4WriteVPERMYri : SchedWriteRes<[Zn4FPVShuf]> {
+ let Latency = 6;
+ let ResourceCycles = [1];
+ let NumMicroOps = 2;
+}
+def : InstRW<[Zn4WriteVPERMYri], (instrs VPERMPDYri, VPERMQYri)>;
+
+def Zn4WriteVPERMPDYmi : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4FPVShuf]> {
+ let Latency = !add(Znver4Model.LoadLatency, Zn4WriteVPERMYri.Latency);
+ let ResourceCycles = [1, 1, 2];
+ let NumMicroOps = !add(Zn4WriteVPERMYri.NumMicroOps, 1);
+}
+def : InstRW<[Zn4WriteVPERMPDYmi], (instrs VPERMPDYmi)>;
+
+def Zn4WriteVPERMDYrr : SchedWriteRes<[Zn4FPVShuf]> {
+ let Latency = 5;
+ let ResourceCycles = [1];
+ let NumMicroOps = 2;
+}
+def : InstRW<[Zn4WriteVPERMDYrr], (instrs VPERMDYrr)>;
+
+def Zn4WriteVPERMYm : SchedWriteRes<[Zn4AGU012, Zn4Load, Zn4FPVShuf]> {
+ let Latency = !add(Znver4Model.LoadLatency, Zn4WriteVPERMDYrr.Latency);
+ let ResourceCycles = [1, 1, 2];
+ let NumMicroOps = !add(Zn4WriteVPERMDYrr.NumMicroOps, 0);
+}
+def : InstRW<[Zn4WriteVPERMYm], (instrs VPERMQYmi, VPERMDYrm)>;
+
+defm : Zn4WriteResYMMPair<WriteVPMOV256, [Zn4FPVShuf01], 4, [3], 2, /*LoadUOps=*/-1>; // 256-bit width packed vector width-changing move.
+defm : Zn4WriteResYMMPair<WriteVarShuffle256, [Zn4FPVShuf01], 1, [1], 2>; // 256-bit width vector variable shuffles.
+defm : Zn4WriteResXMMPair<WriteVarVecShift, [Zn4FPVShift01], 1, [1], 1>; // Variable vector shifts.
+defm : Zn4WriteResYMMPair<WriteVarVecShiftY, [Zn4FPVShift01], 1, [1], 1>; // Variable vector shifts (YMM).
+defm : Zn4WriteResZMMPair<WriteVarVecShiftZ, [Zn4FPVShift01], 1, [2], 2>; // Variable vector shifts (ZMM).
+
+// Old microcoded instructions that nobody use.
+defm : Zn4WriteResInt<WriteMicrocoded, [Zn4ALU0123], 100, [100], 100>;
+
+// Fence instructions.
+defm : Zn4WriteResInt<WriteFence, [Zn4ALU0123], 1, [100], 1>;
+
+def Zn4WriteLFENCE : SchedWriteRes<[Zn4LSU]> {
+ let Latency = 1;
+ let ResourceCycles = [30];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteLFENCE], (instrs LFENCE)>;
+
+def Zn4WriteSFENCE : SchedWriteRes<[Zn4LSU]> {
+ let Latency = 1;
+ let ResourceCycles = [1];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteSFENCE], (instrs SFENCE)>;
+
+// Nop, not very useful expect it provides a model for nops!
+defm : Zn4WriteResInt<WriteNop, [Zn4ALU0123], 0, [1], 1>; // FIXME: latency not from llvm-exegesis
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Zero Cycle Move
+///////////////////////////////////////////////////////////////////////////////
+
+def Zn4WriteZeroLatency : SchedWriteRes<[]> {
+ let Latency = 0;
+ let ResourceCycles = [];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteZeroLatency], (instrs MOV32rr, MOV32rr_REV,
+ MOV64rr, MOV64rr_REV,
+ MOVSX32rr32)>;
+
+def Zn4WriteSwapRenameable : SchedWriteRes<[]> {
+ let Latency = 0;
+ let ResourceCycles = [];
+ let NumMicroOps = 2;
+}
+def : InstRW<[Zn4WriteSwapRenameable], (instrs XCHG32rr, XCHG32ar,
+ XCHG64rr, XCHG64ar)>;
+
+defm : Zn4WriteResInt<WriteXCHG, [Zn4ALU0123], 0, [8], 2>; // Compare+Exchange - TODO RMW support.
+
+defm : Zn4WriteResXMM<WriteFMoveX, [], 0, [], 1>;
+defm : Zn4WriteResYMM<WriteFMoveY, [], 0, [], 1>;
+defm : Zn4WriteResYMM<WriteFMoveZ, [], 0, [], 1>;
+
+defm : Zn4WriteResXMM<WriteVecMove, [Zn4FPFMisc0123], 1, [1], 1>; // MMX
+defm : Zn4WriteResXMM<WriteVecMoveX, [], 0, [], 1>;
+defm : Zn4WriteResYMM<WriteVecMoveY, [], 0, [], 1>;
+defm : Zn4WriteResYMM<WriteVecMoveZ, [], 0, [], 1>;
+
+def : IsOptimizableRegisterMove<[
+ InstructionEquivalenceClass<[
+ // GPR variants.
+ MOV32rr, MOV32rr_REV,
+ MOV64rr, MOV64rr_REV,
+ MOVSX32rr32,
+ XCHG32rr, XCHG32ar,
+ XCHG64rr, XCHG64ar,
+
+ // MMX variants.
+ // MMX moves are *NOT* eliminated.
+
+ // SSE variants.
+ MOVAPSrr, MOVAPSrr_REV,
+ MOVUPSrr, MOVUPSrr_REV,
+ MOVAPDrr, MOVAPDrr_REV,
+ MOVUPDrr, MOVUPDrr_REV,
+ MOVDQArr, MOVDQArr_REV,
+ MOVDQUrr, MOVDQUrr_REV,
+
+ // AVX variants.
+ VMOVAPSrr, VMOVAPSrr_REV,
+ VMOVUPSrr, VMOVUPSrr_REV,
+ VMOVAPDrr, VMOVAPDrr_REV,
+ VMOVUPDrr, VMOVUPDrr_REV,
+ VMOVDQArr, VMOVDQArr_REV,
+ VMOVDQUrr, VMOVDQUrr_REV,
+
+ // AVX YMM variants.
+ VMOVAPSYrr, VMOVAPSYrr_REV,
+ VMOVUPSYrr, VMOVUPSYrr_REV,
+ VMOVAPDYrr, VMOVAPDYrr_REV,
+ VMOVUPDYrr, VMOVUPDYrr_REV,
+ VMOVDQAYrr, VMOVDQAYrr_REV,
+ VMOVDQUYrr, VMOVDQUYrr_REV,
+ ], TruePred >
+]>;
+
+// FIXUP and RANGE Instructions
+def Zn4WriteVFIXUPIMMPDZrr_VRANGESDrr : SchedWriteRes<[Zn4FPFMisc01]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteVFIXUPIMMPDZrr_VRANGESDrr], (instregex
+ "VFIXUPIMM(S|P)(S|D)(Z|Z128|Z256?)rrik", "VFIXUPIMM(S|P)(S|D)(Z?|Z128?|Z256?)rrikz",
+ "VFIXUPIMM(S|P)(S|D)(Z128|Z256?)rri", "VRANGE(S|P)(S|D)(Z?|Z128?|Z256?)rri(b?)",
+ "VRANGE(S|P)(S|D)(Z|Z128|Z256?)rri(b?)k","VRANGE(S|P)(S|D)(Z?|Z128?|Z256?)rri(b?)kz"
+ )>;
+
+// SCALE & REDUCE instructions
+def Zn4WriteSCALErr: SchedWriteRes<[Zn4FPFMisc23]> {
+ let Latency = 6;
+ let ResourceCycles = [6];
+ let NumMicroOps = 2;
+}
+def : InstRW<[Zn4WriteSCALErr], (instregex
+ "V(SCALEF|REDUCE)(S|P)(S|D)(Z?|Z128?|Z256?)(rr|rrb|rrkz|rrik|rrikz|rri)(_Int?|_Intkz?)",
+ "(V?)REDUCE(PD|PS|SD|SS)(Z?|Z128?)(rri|rrikz|rrib)"
+ )>;
+
+//BF16PS Instructions
+def Zn4WriteBF16: SchedWriteRes<[Zn4FPFMisc23]> {
+ let Latency = 6;
+ let ResourceCycles = [6];
+ let NumMicroOps = 2;
+}
+def : InstRW<[Zn4WriteBF16], (instregex
+ "(V?)DPBF16PS(Z?|Z128?|Z256?)(r|rk|rkz)"
+ )>;
+
+// BUSD and VPMADD Instructions
+def Zn4WriteBUSDr_VPMADDr: SchedWriteRes<[Zn4FPFMisc01]> {
+ let Latency = 4;
+ let ResourceCycles = [4];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteBUSDr_VPMADDr], (instregex
+ "VPDP(BU|WS)(S|P)(S|D|DS)(Z|Z128|Z256)(r|rk|rkz)",
+ "VPMADD52(H|L)UQ(Z|Z128|Z256)(r|rk|rkz)"
+ )>;
+
+// SHIFT instructions
+def Zn4WriteSHIFTrr: SchedWriteRes<[Zn4FPFMisc01]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteSHIFTrr], (instregex
+ "VP(LZCNT|SHLD|SHRD?)(D|Q|W|VD|VQ|VW?)(Z?|Z128?|Z256?)(rr|rk|rrk|rrkz|rri|rrik|rrikz)",
+ "(V?)P(SLL|SRL|SRA)(D|Q|W|DQ)(Y?|Z?|Z128?|Z256?)(rr|rrk|rrkz)",
+ "(V?)P(SLL|SRL|SRA)DQYri",
+ "(V?)P(SLL|SRL)DQ(Z?|Z256?)ri",
+ "(V?)P(SHUFB)(Y|Z|Z128|Z256?)(rr|rrk|rrkz)",
+ "(V?)P(ROL|ROR)(D|Q|VD|VQ)(Z?|Z128?|Z256?)(rr|rrk|rrkz)",
+ "(V?)P(ROL|ROR)(D|Q|VD|VQ)(Z256?)(ri|rik|rikz)",
+ "(V?)P(ROL|ROR)(D|Q)(Z?|Z128?)(ri|rik|rikz)",
+ "VPSHUFBITQMBZ128rr", "VFMSUB231SSZr_Intkz"
+ )>;
+
+def Zn4WriteSHIFTri: SchedWriteRes<[Zn4FPFMisc01]> {
+ let Latency = 1;
+ let ResourceCycles = [1];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteSHIFTri], (instregex
+ "VP(SLL|SRL|SRA)(D|Q|W)(Z|Z128|Z256?)(ri|rik|rikz)"
+ )>;
+
+// ALIGN Instructions
+def Zn4WriteALIGN: SchedWriteRes<[Zn4FPFMisc12]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteALIGN], (instregex
+ "(V?)PALIGNR(Z?|Z128?|Z256?)(rri|rrik|rrikz)"
+ )>;
+
+//PACK Instructions
+def Zn4WritePACK: SchedWriteRes<[Zn4FPFMisc12]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WritePACK], (instregex
+ "(V?)PACK(SS|US)(DW|WB)(Z?|Z128?|Z256?)(rr|rrk|rrkz)"
+ )>;
+
+// MAX and MIN Instructions
+def Zn4WriteFCmp64: SchedWriteRes<[Zn4FPFMisc01]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4WriteFCmp64], (instregex
+ "(V?)CMP(S|P)(S|D)(rr|rri|rr_Int)",
+ "(V?|VP?)(MAX|MIN|MINC|MAXC)(S|P|U)(S|D|Q)(Z?|Z128?|Z256?)(rr|rri|rrk|rrkz)(_Int?)",
+ "VP(MAX|MIN)(SQ|UQ)(Z|Z128|Z256)(rr|rrk|rrkz)",
+ "(V?)(MAX|MAXC|MIN|MINC)PD(Z|Z128|Z256?)(rr|rrk|rrkz)"
+ )>;
+
+// MOV Instructions
+def Zn4MOVS: SchedWriteRes<[Zn4FPFMisc12]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4MOVS], (instregex
+ "(V?)PMOV(SX|ZX)(BD|BQ|BW|WD|WQ|DQ)(Z128?|Z256?)(rr|rrk|rrkz)",
+ "(V?)PMOV(SX|QD|UZ|ZX)(BD|BQ|BW?)(Y|Z128?)(rr|rrk|rrkz)",
+ "(V?)PMOV(SX|US|ZX)(DQ|WD|QW|WQ?)(Y|Z128?)(rr|rrk|rrkz)",
+ "(V?)VMOVDDUP(Z|Z128|Z256)(rr|rrk|rrkz)",
+ "VPMOV(DB|DW|QB|QD|QW|SDB|SDW|SQB|SQD|SQW|SWB|USDB|USDW|USQB|USQD|USWB|WB)(Z128?)(rr|rrk|rrkz)"
+ )>;
+
+def Zn4MOVSZ: SchedWriteRes<[Zn4FPFMisc12]> {
+ let Latency = 4;
+ let ResourceCycles = [4];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4MOVSZ], (instregex
+ "(V?)PMOV(SX|ZX)(BD|BQ|BW|WD|WQ|DQ)(Z?)(rr|rrk|rrkz)"
+ )>;
+
+def Zn4MOVSrr: SchedWriteRes<[Zn4FPFMisc12]> {
+ let Latency = 5;
+ let ResourceCycles = [5];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4MOVSrr], (instregex
+ "(V?)PMOV(DB|QB|QW|SDB|SQB|SQW|USDB|USQB|USQW)(Z?)(rr|rrk|rrkz)"
+ )>;
+
+
+//VPTEST Instructions
+def Zn4VPTESTZ128: SchedWriteRes<[Zn4FPFMisc01]> {
+ let Latency = 3;
+ let ResourceCycles = [3];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4VPTESTZ128], (instregex
+ "(V?)PTEST(N?)(MB|MD|MQ|MW)(Z128?)(rrk)"
+ )>;
+
+def Zn4VPTESTZ256: SchedWriteRes<[Zn4FPFMisc01]> {
+ let Latency = 4;
+ let ResourceCycles = [4];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4VPTESTZ256], (instregex
+ "(V?)PTEST(N?)(MB|MD|MQ|MW)(Z256?)(rr|rrk)"
+ )>;
+
+def Zn4VPTESTZ: SchedWriteRes<[Zn4FPFMisc01]> {
+ let Latency = 5;
+ let ResourceCycles = [5];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4VPTESTZ], (instregex
+ "(V?)PTEST(N?)(MB|MD|MQ|MW)(Z?)(rrk)"
+ )>;
+
+// CONFLICT Instructions
+def Zn4CONFLICTZ128: SchedWriteRes<[Zn4FPFMisc01]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4CONFLICTZ128], (instregex
+ "VPCONFLICT(D|Q)(Z128)(rr|rrk|rrkz)"
+ )>;
+
+def Zn4CONFLICTrr: SchedWriteRes<[Zn4FPFMisc01,Zn4FPFMisc12,Zn4FPFMisc23]> {
+ let Latency = 6;
+ let ResourceCycles = [2,2,2];
+ let NumMicroOps = 4;
+}
+def : InstRW<[Zn4CONFLICTrr], (instregex
+ "VPCONFLICT(D|Q)(Z|Z256)(rr|rrkz)"
+ )>;
+
+// RSQRT Instructions
+def Zn4VRSQRT14PDZ256: SchedWriteRes<[Zn4FPFMisc01]> {
+ let Latency = 5;
+ let ResourceCycles = [2];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4VRSQRT14PDZ256], (instregex
+ "VRSQRT14(PD|PS)(Z?|Z128?|Z256?)(r|rr|rk|rrk|rkz|rrkz)"
+ )>;
+
+
+// PERM Instructions
+def Zn4PERMILP: SchedWriteRes<[Zn4FPFMisc123]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4PERMILP], (instregex
+ "VPERMILP(S|D)(Y|Z|Z128|Z256)(rr|rrk|rrkz)"
+ )>;
+
+def Zn4PERMIT2_128: SchedWriteRes<[Zn4FPFMisc12]> {
+ let Latency = 3;
+ let ResourceCycles = [2];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4PERMIT2_128], (instregex
+ "VPERM(I2|T2)(PS|PD|W)128(rr|rrk|rrkz)",
+ "VPERM(I2|T2)(B|D|Q)128(rr|rrk|rrkz)"
+ )>;
+
+def Zn4PERMIT2_128rr:SchedWriteRes<[Zn4FPFMisc12]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4PERMIT2_128rr], (instregex
+ "V(P?)COMPRESS(B|W|D|Q|PD|PS|SD|SQ)Z128(rr|rrk|rrkz)",
+ "VPERM(B|D|Q|W)(Z128?)(rr|rrk|rrkz)"
+ )>;
+
+def Zn4PERMIT2_256: SchedWriteRes<[Zn4FPFMisc12]> {
+ let Latency = 4;
+ let ResourceCycles = [2];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4PERMIT2_256], (instregex
+ "VPERM(I2|T2)(PS|PD|W)256(rr|rrk|rrkz)",
+ "VPERMP(S|D)Z256(rr|rrk|rrkz)",
+ "V(P?)COMPRESS(B|W|D|Q|PD|PS|SD|SQ)Z256(rr|rrk|rrkz)",
+ "VPERM(B|D|Q|W)Z256(rr|rrk|rrkz)",
+ "VPERM(I2|Q|T2)(B|D|Q)(Z?)256(rr|rrk|rrkz)",
+ "VPEXPAND(B|W)Z256(rr|rrk|rrkz)"
+ )>;
+
+def Zn4PERMIT2Z: SchedWriteRes<[Zn4FPFMisc12]> {
+ let Latency = 5;
+ let ResourceCycles = [2];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4PERMIT2Z], (instregex
+ "VPERM(I2|T2)(PS|PD|W)(rr|rrk|rrkz)",
+ "VPERM(B|D|W)Z(rr|rrk|rrkz)",
+ "VPERM(I2|Q|T2)(B|D|Q)(Z?)(rr|rrk|rrkz)",
+ "V(P?)COMPRESS(B|W|D|Q|PD|PS|SD|SQ)Z(rr|rrk|rrkz)",
+ "VPEXPAND(B|W)Z(rr|rrk|rrkz)",
+ "VPERMP(S|D)Z(rr|rrk|rrkz)"
+ )>;
+
+// ALU SLOW Misc Instructions
+def Zn4VecALUZSlow: SchedWriteRes<[Zn4FPFMisc01]> {
+ let Latency = 2;
+ let ResourceCycles = [2];
+ let NumMicroOps = 1;
+}
+def : InstRW<[Zn4VecALUZSlow], (instrs
+ VPABSBZ128rr, VPABSBZ128rrk, VPABSBZ128rrkz, VPABSDZ128rr,
+ VPABSDZ128rrk, VPABSDZ128rrkz, VPABSQZ128rr, VPABSQZ128rrk,
+ VPABSQZ128rrkz, VPABSWZ128rr, VPABSWZ128rrk, VPABSWZ128rrkz,
+ VPADDSBZ128rr, VPADDSBZ128rrk, VPADDSBZ128rrkz, VPADDSWZ128rr,
+ VPADDSWZ128rrk, VPADDSWZ128rrkz,VPADDUSBZ128rr, VPADDUSBZ128rrk,
+ VPADDUSBZ128rrkz, VPADDUSWZ128rr, VPADDUSWZ128rrk, VPADDUSWZ128rrkz,
+ VPAVGBZ128rr, VPAVGBZ128rrk, VPAVGBZ128rrkz, VPAVGWZ128rr,
+ VPAVGWZ128rrk, VPAVGWZ128rrkz, VPOPCNTBZ128rr, VPOPCNTBZ128rrk,
+ VPOPCNTBZ128rrkz, VPOPCNTDZ128rr, VPOPCNTDZ128rrk, VPOPCNTDZ128rrkz,
+ VPOPCNTQZ128rr, VPOPCNTQZ128rrk,VPOPCNTQZ128rrkz, VPOPCNTWZ128rr,
+ VPOPCNTWZ128rrk, VPOPCNTWZ128rrkz,VPSUBSBZ128rr, VPSUBSBZ128rrk,
+ VPSUBSBZ128rrkz, VPSUBSWZ128rr, VPSUBSWZ128rrk, VPSUBSWZ128rrkz,
+ VPSUBUSBZ128rr, VPSUBUSBZ128rrk, VPSUBUSBZ128rrkz,VPSUBUSWZ128rr,
+ VPSUBUSWZ128rrk, VPSUBUSWZ128rrkz
+ )>;
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Dependency breaking instructions.
+///////////////////////////////////////////////////////////////////////////////
+
+def Zn4WriteZeroIdiom : SchedWriteVariant<[
+ SchedVar<MCSchedPredicate<ZeroIdiomPredicate>, [Zn4WriteZeroLatency]>,
+ SchedVar<NoSchedPred, [WriteALU]>
+]>;
+def : InstRW<[Zn4WriteZeroIdiom], (instrs XOR32rr, XOR32rr_REV,
+ XOR64rr, XOR64rr_REV,
+ SUB32rr, SUB32rr_REV,
+ SUB64rr, SUB64rr_REV)>;
+
+def Zn4WriteZeroIdiomEFLAGS : SchedWriteVariant<[
+ SchedVar<MCSchedPredicate<CheckSameRegOperand<0, 1>>, [Zn4WriteZeroLatency]>,
+ SchedVar<NoSchedPred, [WriteALU]>
+]>;
+def : InstRW<[Zn4WriteZeroIdiomEFLAGS], (instrs CMP8rr, CMP8rr_REV,
+ CMP16rr, CMP16rr_REV,
+ CMP32rr, CMP32rr_REV,
+ CMP64rr, CMP64rr_REV)>;
+
+def Zn4WriteFZeroIdiom : SchedWriteVariant<[
+ SchedVar<MCSchedPredicate<ZeroIdiomPredicate>, [Zn4WriteZeroLatency]>,
+ SchedVar<NoSchedPred, [WriteFLogic]>
+]>;
+// NOTE: XORPSrr, XORPDrr are not zero-cycle!
+def : InstRW<[Zn4WriteFZeroIdiom], (instrs VXORPSrr, VXORPDrr,
+ VANDNPSrr, VANDNPDrr)>;
+
+def Zn4WriteFZeroIdiomY : SchedWriteVariant<[
+ SchedVar<MCSchedPredicate<ZeroIdiomPredicate>, [Zn4WriteZeroLatency]>,
+ SchedVar<NoSchedPred, [WriteFLogicY]>
+]>;
+def : InstRW<[Zn4WriteFZeroIdiomY], (instrs VXORPSYrr, VXORPDYrr,
+ VANDNPSYrr, VANDNPDYrr)>;
+
+def Zn4WriteVZeroIdiomLogicX : SchedWriteVariant<[
+ SchedVar<MCSchedPredicate<ZeroIdiomPredicate>, [Zn4WriteZeroLatency]>,
+ SchedVar<NoSchedPred, [WriteVecLogicX]>
+]>;
+// NOTE: PXORrr,PANDNrr are not zero-cycle!
+def : InstRW<[Zn4WriteVZeroIdiomLogicX], (instrs VPXORrr, VPANDNrr)>;
+
+def Zn4WriteVZeroIdiomLogicY : SchedWriteVariant<[
+ SchedVar<MCSchedPredicate<ZeroIdiomPredicate>, [Zn4WriteZeroLatency]>,
+ SchedVar<NoSchedPred, [WriteVecLogicY]>
+]>;
+def : InstRW<[Zn4WriteVZeroIdiomLogicY], (instrs VPXORYrr, VPANDNYrr)>;
+
+def Zn4WriteVZeroIdiomALUX : SchedWriteVariant<[
+ SchedVar<MCSchedPredicate<ZeroIdiomPredicate>, [Zn4WriteZeroLatency]>,
+ SchedVar<NoSchedPred, [WriteVecALUX]>
+]>;
+// NOTE: PSUBBrr, PSUBWrr, PSUBDrr, PSUBQrr,
+// PCMPGTBrr, PCMPGTWrr, PCMPGTDrr, PCMPGTQrr are not zero-cycle!
+def : InstRW<[Zn4WriteVZeroIdiomALUX],
+ (instrs VPSUBBrr, VPSUBWrr, VPSUBDrr, VPSUBQrr,
+ VPCMPGTBrr, VPCMPGTWrr, VPCMPGTDrr, VPCMPGTQrr)>;
+
+def Zn4WriteVZeroIdiomALUY : SchedWriteVariant<[
+ SchedVar<MCSchedPredicate<ZeroIdiomPredicate>, [Zn4WriteZeroLatency]>,
+ SchedVar<NoSchedPred, [WriteVecALUY]>
+]>;
+def : InstRW<[Zn4WriteVZeroIdiomALUY],
+ (instrs VPSUBBYrr, VPSUBWYrr, VPSUBDYrr, VPSUBQYrr,
+ VPCMPGTBYrr, VPCMPGTWYrr, VPCMPGTDYrr, VPCMPGTQYrr)>;
+
+def : IsZeroIdiomFunction<[
+ // GPR Zero-idioms.
+ DepBreakingClass<[ XOR32rr, XOR32rr_REV,
+ XOR64rr, XOR64rr_REV,
+ SUB32rr, SUB32rr_REV,
+ SUB64rr, SUB64rr_REV ], ZeroIdiomPredicate>,
+
+ // SSE XMM Zero-idioms.
+ DepBreakingClass<[
+ // fp variants.
+ XORPSrr, XORPDrr,
+ ANDNPSrr, ANDNPDrr,
+
+ // int variants.
+ PXORrr,
+ PANDNrr,
+ PSUBBrr, PSUBWrr, PSUBDrr, PSUBQrr,
+ PSUBSBrr, PSUBSWrr,
+ PSUBUSBrr, PSUBUSWrr,
+ PCMPGTBrr, PCMPGTWrr, PCMPGTDrr, PCMPGTQrr
+ ], ZeroIdiomPredicate>,
+
+ // AVX XMM Zero-idioms.
+ DepBreakingClass<[
+ // fp variants.
+ VXORPSrr, VXORPDrr,
+ VANDNPSrr, VANDNPDrr,
+
+ // int variants.
+ VPXORrr,
+ VPANDNrr,
+ VPSUBBrr, VPSUBWrr, VPSUBDrr, VPSUBQrr,
+ VPSUBSBrr, VPSUBSWrr,
+ VPSUBUSBrr, VPSUBUSWrr,
+ VPCMPGTBrr, VPCMPGTWrr, VPCMPGTDrr, VPCMPGTQrr,
+ ], ZeroIdiomPredicate>,
+
+ // AVX YMM Zero-idioms.
+ DepBreakingClass<[
+ // fp variants.
+ VXORPSYrr, VXORPDYrr,
+ VANDNPSYrr, VANDNPDYrr,
+
+ // int variants.
+ VPXORYrr,
+ VPANDNYrr,
+ VPSUBBYrr, VPSUBWYrr, VPSUBDYrr, VPSUBQYrr,
+ VPSUBSBYrr, VPSUBSWYrr,
+ VPSUBUSBYrr, VPSUBUSWYrr,
+ VPCMPGTBYrr, VPCMPGTWYrr, VPCMPGTDYrr, VPCMPGTQYrr
+ ], ZeroIdiomPredicate>,
+]>;
+
+def : IsDepBreakingFunction<[
+ // GPR
+ DepBreakingClass<[ SBB32rr, SBB32rr_REV,
+ SBB64rr, SBB64rr_REV ], ZeroIdiomPredicate>,
+ DepBreakingClass<[ CMP8rr, CMP8rr_REV,
+ CMP16rr, CMP16rr_REV,
+ CMP32rr, CMP32rr_REV,
+ CMP64rr, CMP64rr_REV ], CheckSameRegOperand<0, 1> >,
+ // SSE
+ DepBreakingClass<[
+ PCMPEQBrr, PCMPEQWrr, PCMPEQDrr, PCMPEQQrr
+ ], ZeroIdiomPredicate>,
+
+ // AVX XMM
+ DepBreakingClass<[
+ VPCMPEQBrr, VPCMPEQWrr, VPCMPEQDrr, VPCMPEQQrr
+ ], ZeroIdiomPredicate>,
+
+ // AVX YMM
+ DepBreakingClass<[
+ VPCMPEQBYrr, VPCMPEQWYrr, VPCMPEQDYrr, VPCMPEQQYrr
+ ], ZeroIdiomPredicate>,
+]>;
+
+} // SchedModel
+
diff --git a/contrib/llvm-project/llvm/lib/TargetParser/ARMTargetParser.cpp b/contrib/llvm-project/llvm/lib/TargetParser/ARMTargetParser.cpp
index af98ecb122d6..22b985df9302 100644
--- a/contrib/llvm-project/llvm/lib/TargetParser/ARMTargetParser.cpp
+++ b/contrib/llvm-project/llvm/lib/TargetParser/ARMTargetParser.cpp
@@ -523,7 +523,7 @@ StringRef ARM::computeDefaultTargetABI(const Triple &TT, StringRef CPU) {
default:
if (TT.isOSNetBSD())
return "apcs-gnu";
- if (TT.isOSOpenBSD())
+ if (TT.isOSFreeBSD() || TT.isOSOpenBSD())
return "aapcs-linux";
return "aapcs";
}
diff --git a/contrib/llvm-project/llvm/lib/TargetParser/RISCVTargetParser.cpp b/contrib/llvm-project/llvm/lib/TargetParser/RISCVTargetParser.cpp
index 89cd5c082d72..933a82b7c6cb 100644
--- a/contrib/llvm-project/llvm/lib/TargetParser/RISCVTargetParser.cpp
+++ b/contrib/llvm-project/llvm/lib/TargetParser/RISCVTargetParser.cpp
@@ -14,6 +14,7 @@
#include "llvm/TargetParser/RISCVTargetParser.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/TargetParser/Triple.h"
namespace llvm {
namespace RISCV {
@@ -100,5 +101,10 @@ bool getCPUFeaturesExceptStdExt(CPUKind Kind,
return true;
}
+bool isX18ReservedByDefault(const Triple &TT) {
+ // X18 is reserved for the ShadowCallStack ABI (even when not enabled).
+ return TT.isOSFuchsia();
+}
+
} // namespace RISCV
} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/Attributor.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/Attributor.cpp
index b9134ce26e80..84013a8909db 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/Attributor.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/Attributor.cpp
@@ -396,6 +396,18 @@ static bool getPotentialCopiesOfMemoryValue(
NullOnly = false;
};
+ auto AdjustWrittenValueType = [&](const AAPointerInfo::Access &Acc,
+ Value &V) {
+ Value *AdjV = AA::getWithType(V, *I.getType());
+ if (!AdjV) {
+ LLVM_DEBUG(dbgs() << "Underlying object written but stored value "
+ "cannot be converted to read type: "
+ << *Acc.getRemoteInst() << " : " << *I.getType()
+ << "\n";);
+ }
+ return AdjV;
+ };
+
auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
return true;
@@ -417,7 +429,10 @@ static bool getPotentialCopiesOfMemoryValue(
if (IsLoad) {
assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
if (!Acc.isWrittenValueUnknown()) {
- NewCopies.push_back(Acc.getWrittenValue());
+ Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue());
+ if (!V)
+ return false;
+ NewCopies.push_back(V);
NewCopyOrigins.push_back(Acc.getRemoteInst());
return true;
}
@@ -428,7 +443,10 @@ static bool getPotentialCopiesOfMemoryValue(
<< *Acc.getRemoteInst() << "\n";);
return false;
}
- NewCopies.push_back(SI->getValueOperand());
+ Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand());
+ if (!V)
+ return false;
+ NewCopies.push_back(V);
NewCopyOrigins.push_back(SI);
} else {
assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 001ef55ba472..42158e4e05dd 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -1043,12 +1043,14 @@ struct AAPointerInfoImpl
const auto &NoSyncAA = A.getAAFor<AANoSync>(
QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
- IRPosition::function(Scope), &QueryingAA, DepClassTy::OPTIONAL);
+ IRPosition::function(Scope), &QueryingAA, DepClassTy::NONE);
bool AllInSameNoSyncFn = NoSyncAA.isAssumedNoSync();
bool InstIsExecutedByInitialThreadOnly =
ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I);
bool InstIsExecutedInAlignedRegion =
ExecDomainAA && ExecDomainAA->isExecutedInAlignedRegion(A, I);
+ if (InstIsExecutedInAlignedRegion || InstIsExecutedByInitialThreadOnly)
+ A.recordDependence(*ExecDomainAA, QueryingAA, DepClassTy::OPTIONAL);
InformationCache &InfoCache = A.getInfoCache();
bool IsThreadLocalObj =
@@ -1063,14 +1065,24 @@ struct AAPointerInfoImpl
auto CanIgnoreThreadingForInst = [&](const Instruction &I) -> bool {
if (IsThreadLocalObj || AllInSameNoSyncFn)
return true;
- if (!ExecDomainAA)
+ const auto *FnExecDomainAA =
+ I.getFunction() == &Scope
+ ? ExecDomainAA
+ : A.lookupAAFor<AAExecutionDomain>(
+ IRPosition::function(*I.getFunction()), &QueryingAA,
+ DepClassTy::NONE);
+ if (!FnExecDomainAA)
return false;
if (InstIsExecutedInAlignedRegion ||
- ExecDomainAA->isExecutedInAlignedRegion(A, I))
+ FnExecDomainAA->isExecutedInAlignedRegion(A, I)) {
+ A.recordDependence(*FnExecDomainAA, QueryingAA, DepClassTy::OPTIONAL);
return true;
+ }
if (InstIsExecutedByInitialThreadOnly &&
- ExecDomainAA->isExecutedByInitialThreadOnly(I))
+ FnExecDomainAA->isExecutedByInitialThreadOnly(I)) {
+ A.recordDependence(*FnExecDomainAA, QueryingAA, DepClassTy::OPTIONAL);
return true;
+ }
return false;
};
@@ -4161,12 +4173,14 @@ struct AAIsDeadFloating : public AAIsDeadValueImpl {
return true;
if (auto *LI = dyn_cast<LoadInst>(V)) {
if (llvm::all_of(LI->uses(), [&](const Use &U) {
- return InfoCache.isOnlyUsedByAssume(
- cast<Instruction>(*U.getUser())) ||
- A.isAssumedDead(U, this, nullptr, UsedAssumedInformation);
+ auto &UserI = cast<Instruction>(*U.getUser());
+ if (InfoCache.isOnlyUsedByAssume(UserI)) {
+ if (AssumeOnlyInst)
+ AssumeOnlyInst->insert(&UserI);
+ return true;
+ }
+ return A.isAssumedDead(U, this, nullptr, UsedAssumedInformation);
})) {
- if (AssumeOnlyInst)
- AssumeOnlyInst->insert(LI);
return true;
}
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
index bee154dab10f..eb499a1aa912 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
@@ -188,9 +188,9 @@ struct AAICVTracker;
struct OMPInformationCache : public InformationCache {
OMPInformationCache(Module &M, AnalysisGetter &AG,
BumpPtrAllocator &Allocator, SetVector<Function *> *CGSCC,
- KernelSet &Kernels)
+ KernelSet &Kernels, bool OpenMPPostLink)
: InformationCache(M, AG, Allocator, CGSCC), OMPBuilder(M),
- Kernels(Kernels) {
+ Kernels(Kernels), OpenMPPostLink(OpenMPPostLink) {
OMPBuilder.initialize();
initializeRuntimeFunctions(M);
@@ -448,6 +448,24 @@ struct OMPInformationCache : public InformationCache {
CI->setCallingConv(Fn->getCallingConv());
}
+ // Helper function to determine if it's legal to create a call to the runtime
+ // functions.
+ bool runtimeFnsAvailable(ArrayRef<RuntimeFunction> Fns) {
+ // We can always emit calls if we haven't yet linked in the runtime.
+ if (!OpenMPPostLink)
+ return true;
+
+ // Once the runtime has been already been linked in we cannot emit calls to
+ // any undefined functions.
+ for (RuntimeFunction Fn : Fns) {
+ RuntimeFunctionInfo &RFI = RFIs[Fn];
+
+ if (RFI.Declaration && RFI.Declaration->isDeclaration())
+ return false;
+ }
+ return true;
+ }
+
/// Helper to initialize all runtime function information for those defined
/// in OpenMPKinds.def.
void initializeRuntimeFunctions(Module &M) {
@@ -523,6 +541,9 @@ struct OMPInformationCache : public InformationCache {
/// Collection of known OpenMP runtime functions..
DenseSet<const Function *> RTLFunctions;
+
+ /// Indicates if we have already linked in the OpenMP device library.
+ bool OpenMPPostLink = false;
};
template <typename Ty, bool InsertInvalidates = true>
@@ -1412,7 +1433,10 @@ private:
Changed |= WasSplit;
return WasSplit;
};
- RFI.foreachUse(SCC, SplitMemTransfers);
+ if (OMPInfoCache.runtimeFnsAvailable(
+ {OMPRTL___tgt_target_data_begin_mapper_issue,
+ OMPRTL___tgt_target_data_begin_mapper_wait}))
+ RFI.foreachUse(SCC, SplitMemTransfers);
return Changed;
}
@@ -2656,7 +2680,9 @@ struct AAExecutionDomainFunction : public AAExecutionDomain {
bool isExecutedInAlignedRegion(Attributor &A,
const Instruction &I) const override {
- if (!isValidState() || isa<CallBase>(I))
+ assert(I.getFunction() == getAnchorScope() &&
+ "Instruction is out of scope!");
+ if (!isValidState())
return false;
const Instruction *CurI;
@@ -2667,14 +2693,18 @@ struct AAExecutionDomainFunction : public AAExecutionDomain {
auto *CB = dyn_cast<CallBase>(CurI);
if (!CB)
continue;
+ if (CB != &I && AlignedBarriers.contains(const_cast<CallBase *>(CB))) {
+ break;
+ }
const auto &It = CEDMap.find(CB);
if (It == CEDMap.end())
continue;
- if (!It->getSecond().IsReachedFromAlignedBarrierOnly)
+ if (!It->getSecond().IsReachingAlignedBarrierOnly)
return false;
+ break;
} while ((CurI = CurI->getNextNonDebugInstruction()));
- if (!CurI && !BEDMap.lookup(I.getParent()).IsReachedFromAlignedBarrierOnly)
+ if (!CurI && !BEDMap.lookup(I.getParent()).IsReachingAlignedBarrierOnly)
return false;
// Check backward until a call or the block beginning is reached.
@@ -2683,12 +2713,16 @@ struct AAExecutionDomainFunction : public AAExecutionDomain {
auto *CB = dyn_cast<CallBase>(CurI);
if (!CB)
continue;
+ if (CB != &I && AlignedBarriers.contains(const_cast<CallBase *>(CB))) {
+ break;
+ }
const auto &It = CEDMap.find(CB);
if (It == CEDMap.end())
continue;
if (!AA::isNoSyncInst(A, *CB, *this)) {
- if (It->getSecond().IsReachedFromAlignedBarrierOnly)
+ if (It->getSecond().IsReachedFromAlignedBarrierOnly) {
break;
+ }
return false;
}
@@ -2984,7 +3018,8 @@ ChangeStatus AAExecutionDomainFunction::updateImpl(Attributor &A) {
if (EDAA.getState().isValidState()) {
const auto &CalleeED = EDAA.getFunctionExecutionDomain();
ED.IsReachedFromAlignedBarrierOnly =
- CalleeED.IsReachedFromAlignedBarrierOnly;
+ CallED.IsReachedFromAlignedBarrierOnly =
+ CalleeED.IsReachedFromAlignedBarrierOnly;
AlignedBarrierLastInBlock = ED.IsReachedFromAlignedBarrierOnly;
if (IsNoSync || !CalleeED.IsReachedFromAlignedBarrierOnly)
ED.EncounteredNonLocalSideEffect |=
@@ -2999,8 +3034,9 @@ ChangeStatus AAExecutionDomainFunction::updateImpl(Attributor &A) {
continue;
}
}
- ED.IsReachedFromAlignedBarrierOnly =
- IsNoSync && ED.IsReachedFromAlignedBarrierOnly;
+ if (!IsNoSync)
+ ED.IsReachedFromAlignedBarrierOnly =
+ CallED.IsReachedFromAlignedBarrierOnly = false;
AlignedBarrierLastInBlock &= ED.IsReachedFromAlignedBarrierOnly;
ED.EncounteredNonLocalSideEffect |= !CB->doesNotAccessMemory();
if (!IsNoSync)
@@ -3914,6 +3950,12 @@ struct AAKernelInfoFunction : AAKernelInfo {
bool changeToSPMDMode(Attributor &A, ChangeStatus &Changed) {
auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
+ // We cannot change to SPMD mode if the runtime functions aren't availible.
+ if (!OMPInfoCache.runtimeFnsAvailable(
+ {OMPRTL___kmpc_get_hardware_thread_id_in_block,
+ OMPRTL___kmpc_barrier_simple_spmd}))
+ return false;
+
if (!SPMDCompatibilityTracker.isAssumed()) {
for (Instruction *NonCompatibleI : SPMDCompatibilityTracker) {
if (!NonCompatibleI)
@@ -4021,6 +4063,13 @@ struct AAKernelInfoFunction : AAKernelInfo {
if (!ReachedKnownParallelRegions.isValidState())
return ChangeStatus::UNCHANGED;
+ auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
+ if (!OMPInfoCache.runtimeFnsAvailable(
+ {OMPRTL___kmpc_get_hardware_num_threads_in_block,
+ OMPRTL___kmpc_get_warp_size, OMPRTL___kmpc_barrier_simple_generic,
+ OMPRTL___kmpc_kernel_parallel, OMPRTL___kmpc_kernel_end_parallel}))
+ return ChangeStatus::UNCHANGED;
+
const int InitModeArgNo = 1;
const int InitUseStateMachineArgNo = 2;
@@ -4167,7 +4216,6 @@ struct AAKernelInfoFunction : AAKernelInfo {
BranchInst::Create(IsWorkerCheckBB, UserCodeEntryBB, IsWorker, InitBB);
Module &M = *Kernel->getParent();
- auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
FunctionCallee BlockHwSizeFn =
OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
M, OMPRTL___kmpc_get_hardware_num_threads_in_block);
@@ -5343,7 +5391,10 @@ PreservedAnalyses OpenMPOptPass::run(Module &M, ModuleAnalysisManager &AM) {
BumpPtrAllocator Allocator;
CallGraphUpdater CGUpdater;
- OMPInformationCache InfoCache(M, AG, Allocator, /*CGSCC*/ nullptr, Kernels);
+ bool PostLink = LTOPhase == ThinOrFullLTOPhase::FullLTOPostLink ||
+ LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink;
+ OMPInformationCache InfoCache(M, AG, Allocator, /*CGSCC*/ nullptr, Kernels,
+ PostLink);
unsigned MaxFixpointIterations =
(isOpenMPDevice(M)) ? SetFixpointIterations : 32;
@@ -5417,9 +5468,11 @@ PreservedAnalyses OpenMPOptCGSCCPass::run(LazyCallGraph::SCC &C,
CallGraphUpdater CGUpdater;
CGUpdater.initialize(CG, C, AM, UR);
+ bool PostLink = LTOPhase == ThinOrFullLTOPhase::FullLTOPostLink ||
+ LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink;
SetVector<Function *> Functions(SCC.begin(), SCC.end());
OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, Allocator,
- /*CGSCC*/ &Functions, Kernels);
+ /*CGSCC*/ &Functions, Kernels, PostLink);
unsigned MaxFixpointIterations =
(isOpenMPDevice(M)) ? SetFixpointIterations : 32;
diff --git a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 1480a0ff9e2f..de3095852048 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -3184,16 +3184,6 @@ Instruction *InstCombinerImpl::foldICmpBinOpEqualityWithConstant(
}
break;
}
- case Instruction::And: {
- const APInt *BOC;
- if (match(BOp1, m_APInt(BOC))) {
- // If we have ((X & C) == C), turn it into ((X & C) != 0).
- if (C == *BOC && C.isPowerOf2())
- return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE,
- BO, Constant::getNullValue(RHS->getType()));
- }
- break;
- }
case Instruction::UDiv:
if (C.isZero()) {
// (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A)
@@ -5653,6 +5643,12 @@ Instruction *InstCombinerImpl::foldICmpUsingKnownBits(ICmpInst &I) {
}
}
}
+
+ // Op0 eq C_Pow2 -> Op0 ne 0 if Op0 is known to be C_Pow2 or zero.
+ if (Op1Known.isConstant() && Op1Known.getConstant().isPowerOf2() &&
+ (Op0Known & Op1Known) == Op0Known)
+ return new ICmpInst(CmpInst::getInversePredicate(Pred), Op0,
+ ConstantInt::getNullValue(Op1->getType()));
break;
}
case ICmpInst::ICMP_ULT: {
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Utils/Local.cpp b/contrib/llvm-project/llvm/lib/Transforms/Utils/Local.cpp
index 31cdd2ee56b9..b2ed95b05e04 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Utils/Local.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Utils/Local.cpp
@@ -2930,7 +2930,8 @@ void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
return;
unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
- if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
+ if (BitWidth == OldLI.getType()->getScalarSizeInBits() &&
+ !getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
MDNode *NN = MDNode::get(OldLI.getContext(), std::nullopt);
NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
}
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index bb8544356c6d..0bd519a6d945 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -5959,7 +5959,7 @@ LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
// Saves the list of values that are used in the loop but are defined outside
// the loop (not including non-instruction values such as arguments and
// constants).
- SmallPtrSet<Value *, 8> LoopInvariants;
+ SmallPtrSet<Instruction *, 8> LoopInvariants;
for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
for (Instruction &I : BB->instructionsWithoutDebug()) {
@@ -6085,11 +6085,16 @@ LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
for (auto *Inst : LoopInvariants) {
// FIXME: The target might use more than one register for the type
// even in the scalar case.
- unsigned Usage =
- VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
+ bool IsScalar = all_of(Inst->users(), [&](User *U) {
+ auto *I = cast<Instruction>(U);
+ return TheLoop != LI->getLoopFor(I->getParent()) ||
+ isScalarAfterVectorization(I, VFs[i]);
+ });
+
+ ElementCount VF = IsScalar ? ElementCount::getFixed(1) : VFs[i];
unsigned ClassID =
- TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
- Invariant[ClassID] += Usage;
+ TTI.getRegisterClassForType(VF.isVector(), Inst->getType());
+ Invariant[ClassID] += GetRegUsage(Inst->getType(), VF);
}
LLVM_DEBUG({
diff --git a/contrib/llvm-project/llvm/tools/llvm-cov/CodeCoverage.cpp b/contrib/llvm-project/llvm/tools/llvm-cov/CodeCoverage.cpp
index 2b2eda5d8587..7b71d5ad4554 100644
--- a/contrib/llvm-project/llvm/tools/llvm-cov/CodeCoverage.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-cov/CodeCoverage.cpp
@@ -23,6 +23,10 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/Debuginfod/BuildIDFetcher.h"
+#include "llvm/Debuginfod/Debuginfod.h"
+#include "llvm/Debuginfod/HTTPClient.h"
+#include "llvm/Object/BuildID.h"
#include "llvm/ProfileData/Coverage/CoverageMapping.h"
#include "llvm/ProfileData/InstrProfReader.h"
#include "llvm/Support/CommandLine.h"
@@ -179,6 +183,8 @@ private:
/// Allowlist from -name-allowlist to be used for filtering.
std::unique_ptr<SpecialCaseList> NameAllowlist;
+
+ std::unique_ptr<object::BuildIDFetcher> BIDFetcher;
};
}
@@ -435,7 +441,7 @@ std::unique_ptr<CoverageMapping> CodeCoverageTool::load() {
ObjectFilename);
auto CoverageOrErr =
CoverageMapping::load(ObjectFilenames, PGOFilename, CoverageArches,
- ViewOpts.CompilationDirectory);
+ ViewOpts.CompilationDirectory, BIDFetcher.get());
if (Error E = CoverageOrErr.takeError()) {
error("Failed to load coverage: " + toString(std::move(E)));
return nullptr;
@@ -629,7 +635,7 @@ int CodeCoverageTool::run(Command Cmd, int argc, const char **argv) {
"dump-collected-objects", cl::Optional, cl::Hidden,
cl::desc("Show the collected coverage object files"));
- cl::list<std::string> InputSourceFiles(cl::Positional,
+ cl::list<std::string> InputSourceFiles("sources", cl::Positional,
cl::desc("<Source files>"));
cl::opt<bool> DebugDumpCollectedPaths(
@@ -647,6 +653,14 @@ int CodeCoverageTool::run(Command Cmd, int argc, const char **argv) {
cl::opt<bool> DebugDump("dump", cl::Optional,
cl::desc("Show internal debug dump"));
+ cl::list<std::string> DebugFileDirectory(
+ "debug-file-directory",
+ cl::desc("Directories to search for object files by build ID"));
+ cl::opt<bool> Debuginfod(
+ "debuginfod", cl::ZeroOrMore,
+ cl::desc("Use debuginfod to look up object files from profile"),
+ cl::init(canUseDebuginfod()));
+
cl::opt<CoverageViewOptions::OutputFormat> Format(
"format", cl::desc("Output format for line-based coverage reports"),
cl::values(clEnumValN(CoverageViewOptions::OutputFormat::Text, "text",
@@ -749,12 +763,18 @@ int CodeCoverageTool::run(Command Cmd, int argc, const char **argv) {
auto commandLineParser = [&, this](int argc, const char **argv) -> int {
cl::ParseCommandLineOptions(argc, argv, "LLVM code coverage tool\n");
ViewOpts.Debug = DebugDump;
+ if (Debuginfod) {
+ HTTPClient::initialize();
+ BIDFetcher = std::make_unique<DebuginfodFetcher>(DebugFileDirectory);
+ } else {
+ BIDFetcher = std::make_unique<object::BuildIDFetcher>(DebugFileDirectory);
+ }
if (!CovFilename.empty())
ObjectFilenames.emplace_back(CovFilename);
for (const std::string &Filename : CovFilenames)
ObjectFilenames.emplace_back(Filename);
- if (ObjectFilenames.empty()) {
+ if (ObjectFilenames.empty() && !Debuginfod && DebugFileDirectory.empty()) {
errs() << "No filenames specified!\n";
::exit(1);
}
@@ -867,10 +887,8 @@ int CodeCoverageTool::run(Command Cmd, int argc, const char **argv) {
}
CoverageArches.emplace_back(Arch);
}
- if (CoverageArches.size() == 1)
- CoverageArches.insert(CoverageArches.end(), ObjectFilenames.size() - 1,
- CoverageArches[0]);
- if (CoverageArches.size() != ObjectFilenames.size()) {
+ if (CoverageArches.size() != 1 &&
+ CoverageArches.size() != ObjectFilenames.size()) {
error("Number of architectures doesn't match the number of objects");
return 1;
}
diff --git a/contrib/llvm-project/llvm/tools/llvm-objdump/ObjdumpOpts.td b/contrib/llvm-project/llvm/tools/llvm-objdump/ObjdumpOpts.td
index de7f883d24a8..c6627c75157b 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objdump/ObjdumpOpts.td
+++ b/contrib/llvm-project/llvm/tools/llvm-objdump/ObjdumpOpts.td
@@ -145,10 +145,10 @@ def reloc : Flag<["--"], "reloc">,
def : Flag<["-"], "r">, Alias<reloc>, HelpText<"Alias for --reloc">;
def print_imm_hex : Flag<["--"], "print-imm-hex">,
- HelpText<"Use hex format for immediate values">;
+ HelpText<"Use hex format for immediate values (default)">;
def no_print_imm_hex : Flag<["--"], "no-print-imm-hex">,
- HelpText<"Do not use hex format for immediate values (default)">;
+ HelpText<"Do not use hex format for immediate values">;
def : Flag<["--"], "print-imm-hex=false">, Alias<no_print_imm_hex>;
def private_headers : Flag<["--"], "private-headers">,
diff --git a/contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.cpp b/contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.cpp
index 930b132533cd..9979a26cf115 100644
--- a/contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-objdump/llvm-objdump.cpp
@@ -3198,9 +3198,7 @@ int main(int argc, char **argv) {
// Initialize debuginfod.
const bool ShouldUseDebuginfodByDefault =
- InputArgs.hasArg(OBJDUMP_build_id) ||
- (HTTPClient::isAvailable() &&
- !ExitOnErr(getDefaultDebuginfodUrls()).empty());
+ InputArgs.hasArg(OBJDUMP_build_id) || canUseDebuginfod();
std::vector<std::string> DebugFileDirectories =
InputArgs.getAllArgValues(OBJDUMP_debug_file_directory);
if (InputArgs.hasFlag(OBJDUMP_debuginfod, OBJDUMP_no_debuginfod,
diff --git a/contrib/llvm-project/llvm/tools/llvm-symbolizer/llvm-symbolizer.cpp b/contrib/llvm-project/llvm/tools/llvm-symbolizer/llvm-symbolizer.cpp
index 1b86134dda51..ed24e8550291 100644
--- a/contrib/llvm-project/llvm/tools/llvm-symbolizer/llvm-symbolizer.cpp
+++ b/contrib/llvm-project/llvm/tools/llvm-symbolizer/llvm-symbolizer.cpp
@@ -443,13 +443,7 @@ int main(int argc, char **argv) {
LLVMSymbolizer Symbolizer(Opts);
- // A debuginfod lookup could succeed if a HTTP client is available and at
- // least one backing URL is configured.
- bool ShouldUseDebuginfodByDefault =
- HTTPClient::isAvailable() &&
- !ExitOnErr(getDefaultDebuginfodUrls()).empty();
- if (Args.hasFlag(OPT_debuginfod, OPT_no_debuginfod,
- ShouldUseDebuginfodByDefault))
+ if (Args.hasFlag(OPT_debuginfod, OPT_no_debuginfod, canUseDebuginfod()))
enableDebuginfod(Symbolizer, Args);
if (Args.hasArg(OPT_filter_markup)) {
diff --git a/contrib/llvm-project/openmp/runtime/src/kmp_os.h b/contrib/llvm-project/openmp/runtime/src/kmp_os.h
index 1b07dca9031d..c78f3eeaf7cd 100644
--- a/contrib/llvm-project/openmp/runtime/src/kmp_os.h
+++ b/contrib/llvm-project/openmp/runtime/src/kmp_os.h
@@ -610,7 +610,7 @@ inline kmp_int32 __kmp_compare_and_store_ptr(void *volatile *p, void *cv,
#define KMP_XCHG_FIXED8(p, v) \
_InterlockedExchange8((volatile kmp_int8 *)(p), (kmp_int8)(v));
#define KMP_XCHG_FIXED16(p, v) _InterlockedExchange16((p), (v));
-#define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v)));
+#define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v));
inline kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v) {
kmp_int64 tmp = _InterlockedExchange64((volatile kmp_int64 *)p, *(kmp_int64
diff --git a/contrib/llvm-project/openmp/runtime/src/kmp_runtime.cpp b/contrib/llvm-project/openmp/runtime/src/kmp_runtime.cpp
index 7393dba90b87..88b9e58fca54 100644
--- a/contrib/llvm-project/openmp/runtime/src/kmp_runtime.cpp
+++ b/contrib/llvm-project/openmp/runtime/src/kmp_runtime.cpp
@@ -24,7 +24,6 @@
#include "kmp_wait_release.h"
#include "kmp_wrapper_getpid.h"
#include "kmp_dispatch.h"
-#include <cstdio>
#if KMP_USE_HIER_SCHED
#include "kmp_dispatch_hier.h"
#endif
@@ -6917,12 +6916,11 @@ void __kmp_unregister_library(void) {
// File did not open. Try the temporary file.
use_shm = false;
KMP_DEBUG_ASSERT(temp_reg_status_file_name);
- FILE *tf = fopen(temp_reg_status_file_name, O_RDONLY);
- if (!tf) {
+ fd1 = open(temp_reg_status_file_name, O_RDONLY);
+ if (fd1 == -1) {
// give it up now.
return;
}
- fd1 = fileno(tf);
}
char *data1 = (char *)mmap(0, SHM_SIZE, PROT_READ, MAP_SHARED, fd1, 0);
if (data1 != MAP_FAILED) {
diff --git a/contrib/llvm-project/openmp/runtime/src/kmp_safe_c_api.h b/contrib/llvm-project/openmp/runtime/src/kmp_safe_c_api.h
index 3db1ada37b07..72f26fd9897d 100644
--- a/contrib/llvm-project/openmp/runtime/src/kmp_safe_c_api.h
+++ b/contrib/llvm-project/openmp/runtime/src/kmp_safe_c_api.h
@@ -30,6 +30,7 @@
#define KMP_SSCANF sscanf_s
#define KMP_STRCPY_S strcpy_s
#define KMP_STRNCPY_S strncpy_s
+#define KMP_STRNCAT_S strncat_s
// Use this only when buffer size is unknown
#define KMP_MEMCPY(dst, src, cnt) memcpy_s(dst, cnt, src, cnt)
@@ -61,6 +62,7 @@ template <typename T> struct kmp_get_rmax_t<T, true> {
#define KMP_SSCANF sscanf
#define KMP_STRCPY_S(dst, bsz, src) strcpy(dst, src)
#define KMP_STRNCPY_S(dst, bsz, src, cnt) strncpy(dst, src, cnt)
+#define KMP_STRNCAT_S(dst, bsz, src, cnt) strncat(dst, src, cnt)
#define KMP_VSNPRINTF vsnprintf
#define KMP_STRNCPY strncpy
#define KMP_STRLEN strlen
diff --git a/contrib/llvm-project/openmp/runtime/src/kmp_str.cpp b/contrib/llvm-project/openmp/runtime/src/kmp_str.cpp
index e64f989fbc69..4cba56964a09 100644
--- a/contrib/llvm-project/openmp/runtime/src/kmp_str.cpp
+++ b/contrib/llvm-project/openmp/runtime/src/kmp_str.cpp
@@ -137,8 +137,8 @@ void __kmp_str_buf_cat(kmp_str_buf_t *buffer, char const *str, size_t len) {
KMP_DEBUG_ASSERT(len >= 0);
__kmp_str_buf_reserve(buffer, buffer->used + len + 1);
- KMP_MEMCPY(buffer->str + buffer->used, str, len);
- buffer->str[buffer->used + len] = 0;
+ buffer->str[buffer->used] = '\0';
+ KMP_STRNCAT_S(buffer->str + buffer->used, len + 1, str, len);
__kmp_type_convert(buffer->used + len, &(buffer->used));
KMP_STR_BUF_INVARIANT(buffer);
} // __kmp_str_buf_cat
@@ -151,8 +151,8 @@ void __kmp_str_buf_catbuf(kmp_str_buf_t *dest, const kmp_str_buf_t *src) {
if (!src->str || !src->used)
return;
__kmp_str_buf_reserve(dest, dest->used + src->used + 1);
- KMP_MEMCPY(dest->str + dest->used, src->str, src->used);
- dest->str[dest->used + src->used] = 0;
+ dest->str[dest->used] = '\0';
+ KMP_STRNCAT_S(dest->str + dest->used, src->used + 1, src->str, src->used);
dest->used += src->used;
KMP_STR_BUF_INVARIANT(dest);
} // __kmp_str_buf_catbuf
diff --git a/contrib/llvm-project/openmp/runtime/src/z_Linux_asm.S b/contrib/llvm-project/openmp/runtime/src/z_Linux_asm.S
index 557f3784b3c4..8aa48d10c3af 100644
--- a/contrib/llvm-project/openmp/runtime/src/z_Linux_asm.S
+++ b/contrib/llvm-project/openmp/runtime/src/z_Linux_asm.S
@@ -1426,7 +1426,10 @@ __tid = 8
// for when we call pkfn below
push {r3-r11,lr}
// Load p_argv and &exit_frame
- ldrd r4, r5, [sp, #10*4]
+ ldr r4, [sp, #10*4]
+# if OMPT_SUPPORT
+ ldr r5, [sp, #11*4]
+# endif
# if KMP_OS_DARWIN || (defined(__thumb__) && !KMP_OS_WINDOWS)
# define FP r7
diff --git a/lib/clang/include/VCSVersion.inc b/lib/clang/include/VCSVersion.inc
index 5a44e0b5022a..17a4e9fbc6bc 100644
--- a/lib/clang/include/VCSVersion.inc
+++ b/lib/clang/include/VCSVersion.inc
@@ -1,10 +1,10 @@
// $FreeBSD$
-#define LLVM_REVISION "llvmorg-16-init-18548-gb0daacf58f41"
+#define LLVM_REVISION "llvmorg-16.0.1-0-gcd89023f7979"
#define LLVM_REPOSITORY "https://github.com/llvm/llvm-project.git"
-#define CLANG_REVISION "llvmorg-16-init-18548-gb0daacf58f41"
+#define CLANG_REVISION "llvmorg-16.0.1-0-gcd89023f7979"
#define CLANG_REPOSITORY "https://github.com/llvm/llvm-project.git"
-#define LLDB_REVISION "llvmorg-16-init-18548-gb0daacf58f41"
+#define LLDB_REVISION "llvmorg-16.0.1-0-gcd89023f7979"
#define LLDB_REPOSITORY "https://github.com/llvm/llvm-project.git"
diff --git a/lib/clang/include/clang/Basic/Version.inc b/lib/clang/include/clang/Basic/Version.inc
index 441152e63123..7b3afbcccb01 100644
--- a/lib/clang/include/clang/Basic/Version.inc
+++ b/lib/clang/include/clang/Basic/Version.inc
@@ -1,10 +1,10 @@
/* $FreeBSD$ */
-#define CLANG_VERSION 16.0.0
-#define CLANG_VERSION_STRING "16.0.0"
+#define CLANG_VERSION 16.0.1
+#define CLANG_VERSION_STRING "16.0.1"
#define CLANG_VERSION_MAJOR 16
#define CLANG_VERSION_MAJOR_STRING "16"
#define CLANG_VERSION_MINOR 0
-#define CLANG_VERSION_PATCHLEVEL 0
+#define CLANG_VERSION_PATCHLEVEL 1
#define CLANG_VENDOR "FreeBSD "
diff --git a/lib/clang/include/lld/Common/Version.inc b/lib/clang/include/lld/Common/Version.inc
index a68bf58c1b14..b5cd91c34032 100644
--- a/lib/clang/include/lld/Common/Version.inc
+++ b/lib/clang/include/lld/Common/Version.inc
@@ -1,4 +1,4 @@
// Local identifier in __FreeBSD_version style
#define LLD_FREEBSD_VERSION 1400006
-#define LLD_VERSION_STRING "16.0.0 (FreeBSD llvmorg-16-init-18548-gb0daacf58f41-" __XSTRING(LLD_FREEBSD_VERSION) ")"
+#define LLD_VERSION_STRING "16.0.1 (FreeBSD llvmorg-16.0.1-0-gcd89023f7979-" __XSTRING(LLD_FREEBSD_VERSION) ")"
diff --git a/lib/clang/include/lldb/Version/Version.inc b/lib/clang/include/lldb/Version/Version.inc
index 97d5d5c781f5..7060f0d93cbc 100644
--- a/lib/clang/include/lldb/Version/Version.inc
+++ b/lib/clang/include/lldb/Version/Version.inc
@@ -1,6 +1,6 @@
-#define LLDB_VERSION 16.0.0git
-#define LLDB_VERSION_STRING "16.0.0git"
+#define LLDB_VERSION 16.0.1
+#define LLDB_VERSION_STRING "16.0.1"
#define LLDB_VERSION_MAJOR 16
#define LLDB_VERSION_MINOR 0
-#define LLDB_VERSION_PATCH 0
+#define LLDB_VERSION_PATCH 1
/* #undef LLDB_FULL_VERSION_STRING */
diff --git a/lib/clang/include/llvm/Config/config.h b/lib/clang/include/llvm/Config/config.h
index 4f156ca5669e..46ac23cf47a1 100644
--- a/lib/clang/include/llvm/Config/config.h
+++ b/lib/clang/include/llvm/Config/config.h
@@ -139,11 +139,6 @@
#define HAVE_LINK_H 0
#endif
-/* Define to 1 if you have the `lseek64' function. */
-#if defined(__linux__)
-#define HAVE_LSEEK64 1
-#endif
-
/* Define to 1 if you have the <mach/mach.h> header file. */
#if __has_include(<mach/mach.h>)
#define HAVE_MACH_MACH_H 1
@@ -353,10 +348,10 @@
#define PACKAGE_NAME "LLVM"
/* Define to the full name and version of this package. */
-#define PACKAGE_STRING "LLVM 16.0.0git"
+#define PACKAGE_STRING "LLVM 16.0.1"
/* Define to the version of this package. */
-#define PACKAGE_VERSION "16.0.0git"
+#define PACKAGE_VERSION "16.0.1"
/* Define to the vendor of this package. */
/* #undef PACKAGE_VENDOR */
diff --git a/lib/clang/include/llvm/Config/llvm-config.h b/lib/clang/include/llvm/Config/llvm-config.h
index e2f8a396bf24..640003331a8e 100644
--- a/lib/clang/include/llvm/Config/llvm-config.h
+++ b/lib/clang/include/llvm/Config/llvm-config.h
@@ -74,10 +74,10 @@
#define LLVM_VERSION_MINOR 0
/* Patch version of the LLVM API */
-#define LLVM_VERSION_PATCH 0
+#define LLVM_VERSION_PATCH 1
/* LLVM version string */
-#define LLVM_VERSION_STRING "16.0.0git"
+#define LLVM_VERSION_STRING "16.0.1"
/* Whether LLVM records statistics for use with GetStatistics(),
* PrintStatistics() or PrintStatisticsJSON()
diff --git a/lib/clang/include/llvm/Support/VCSRevision.h b/lib/clang/include/llvm/Support/VCSRevision.h
index 5733d7c225dc..5194bb986dc0 100644
--- a/lib/clang/include/llvm/Support/VCSRevision.h
+++ b/lib/clang/include/llvm/Support/VCSRevision.h
@@ -1,3 +1,3 @@
/* $FreeBSD$ */
-#define LLVM_REVISION "llvmorg-16-init-18548-gb0daacf58f41"
+#define LLVM_REVISION "llvmorg-16.0.1-0-gcd89023f7979"
#define LLVM_REPOSITORY "https://github.com/llvm/llvm-project.git"
diff --git a/usr.bin/clang/llvm-cov/Makefile b/usr.bin/clang/llvm-cov/Makefile
index d5754223ca0e..9c6cafab0561 100644
--- a/usr.bin/clang/llvm-cov/Makefile
+++ b/usr.bin/clang/llvm-cov/Makefile
@@ -21,5 +21,6 @@ SRCS+= gcov.cpp
SRCS+= llvm-cov.cpp
LIBADD+= z
+LIBADD+= zstd
.include "../llvm.prog.mk"