aboutsummaryrefslogtreecommitdiff
path: root/contrib
diff options
context:
space:
mode:
Diffstat (limited to 'contrib')
-rwxr-xr-x[-rw-r--r--]contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h0
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Attr.td2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td26
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Builtins.def26
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Builtins.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td2
-rwxr-xr-x[-rw-r--r--]contrib/llvm-project/clang/include/clang/Format/Format.h0
-rw-r--r--[-rwxr-xr-x]contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/SourceExtraction.h0
-rwxr-xr-x[-rw-r--r--]contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp0
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp113
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h3
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatToken.h6
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_cuda_complex_builtins.h6
-rw-r--r--contrib/llvm-project/clang/lib/Headers/cuda_wrappers/new8
-rw-r--r--contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex.h2
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp5
-rwxr-xr-x[-rw-r--r--]contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp0
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/Sema/UsedDeclVisitor.h11
-rw-r--r--[-rwxr-xr-x]contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h0
-rw-r--r--[-rwxr-xr-x]contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc0
-rw-r--r--contrib/llvm-project/lld/COFF/DebugTypes.cpp5
-rw-r--r--contrib/llvm-project/lld/ELF/LinkerScript.cpp7
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Core/ModuleList.h19
-rw-r--r--contrib/llvm-project/lldb/include/lldb/Target/Platform.h9
-rw-r--r--contrib/llvm-project/lldb/source/Core/ModuleList.cpp39
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/minidump/ProcessMinidump.cpp152
-rw-r--r--contrib/llvm-project/lldb/source/Plugins/Process/minidump/ProcessMinidump.h4
-rw-r--r--contrib/llvm-project/lldb/source/Target/Platform.cpp43
-rw-r--r--contrib/llvm-project/lldb/source/Target/Target.cpp98
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ADT/ImmutableMap.h2
-rw-r--r--contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h6
-rw-r--r--contrib/llvm-project/llvm/include/llvm/Transforms/IPO/DeadArgumentElimination.h1
-rw-r--r--contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFDebugArangeSet.cpp13
-rw-r--r--contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFUnitIndex.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp19
-rw-r--r--contrib/llvm-project/llvm/lib/IR/ConstantFold.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/MC/MCObjectFileInfo.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/MC/MCParser/ELFAsmParser.cpp7
-rw-r--r--contrib/llvm-project/llvm/lib/MCA/HardwareUnits/LSUnit.cpp2
-rw-r--r--contrib/llvm-project/llvm/lib/Support/Host.cpp5
-rw-r--r--contrib/llvm-project/llvm/lib/Support/Windows/Path.inc21
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp15
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp9
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp17
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp30
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp134
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp38
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp1
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp24
-rw-r--r--contrib/llvm-project/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp4
-rw-r--r--[-rwxr-xr-x]contrib/llvm-project/llvm/lib/Target/Sparc/LeonFeatures.td0
-rw-r--r--[-rwxr-xr-x]contrib/llvm-project/llvm/lib/Target/Sparc/LeonPasses.cpp0
-rw-r--r--[-rwxr-xr-x]contrib/llvm-project/llvm/lib/Target/Sparc/LeonPasses.h0
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp2
-rw-r--r--[-rwxr-xr-x]contrib/llvm-project/llvm/lib/Target/Sparc/SparcSchedule.td0
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp17
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td4
-rw-r--r--contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp10
-rw-r--r--[-rwxr-xr-x]contrib/llvm-project/llvm/lib/Target/X86/X86EvexToVex.cpp0
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.cpp222
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.h8
-rw-r--r--contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp29
-rw-r--r--[-rwxr-xr-x]contrib/llvm-project/llvm/lib/Target/X86/X86SchedBroadwell.td0
-rw-r--r--[-rwxr-xr-x]contrib/llvm-project/llvm/lib/Target/X86/X86SchedSkylakeServer.td0
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp29
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp20
-rw-r--r--contrib/llvm-project/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp20
79 files changed, 1006 insertions, 415 deletions
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h b/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h
index e9c4879b41e8..e9c4879b41e8 100644..100755
--- a/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Attr.td b/contrib/llvm-project/clang/include/clang/Basic/Attr.td
index dad15bc5b2d9..60eaee7839e2 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Attr.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/Attr.td
@@ -2115,7 +2115,7 @@ def InitPriority : InheritableAttr {
let Spellings = [GCC<"init_priority", /*AllowInC*/0>];
let Args = [UnsignedArgument<"Priority">];
let Subjects = SubjectList<[Var], ErrorDiag>;
- let Documentation = [Undocumented];
+ let Documentation = [InitPriorityDocs];
}
def Section : InheritableAttr {
diff --git a/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td b/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td
index 3cba3a3d96f9..833127ed44eb 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td
@@ -57,6 +57,32 @@ global variable or function should be in after translation.
let Heading = "section, __declspec(allocate)";
}
+def InitPriorityDocs : Documentation {
+ let Category = DocCatVariable;
+ let Content = [{
+In C++, the order in which global variables are initialized across translation
+units is unspecified, unlike the ordering within a single translation unit. The
+``init_priority`` attribute allows you to specify a relative ordering for the
+initialization of objects declared at namespace scope in C++. The priority is
+given as an integer constant expression between 101 and 65535 (inclusive).
+Priorities outside of that range are reserved for use by the implementation. A
+lower value indicates a higher priority of initialization. Note that only the
+relative ordering of values is important. For example:
+
+.. code-block:: c++
+
+ struct SomeType { SomeType(); };
+ __attribute__((init_priority(200))) SomeType Obj1;
+ __attribute__((init_priority(101))) SomeType Obj2;
+
+``Obj1`` will be initialized *before* ``Obj2`` despite the usual order of
+initialization being the opposite.
+
+This attribute is only supported for C++ and Objective-C++ and is ignored in
+other language modes.
+ }];
+}
+
def InitSegDocs : Documentation {
let Category = DocCatVariable;
let Content = [{
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Builtins.def b/contrib/llvm-project/clang/include/clang/Basic/Builtins.def
index 01c28ebab763..5463b7dfc18c 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Builtins.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/Builtins.def
@@ -75,6 +75,9 @@
// U -> pure
// c -> const
// t -> signature is meaningless, use custom typechecking
+// T -> type is not important to semantic analysis and codegen; recognize as
+// builtin even if type doesn't match signature, and don't warn if we
+// can't be sure the type is right
// F -> this is a libc/libm function with a '__builtin_' prefix added.
// f -> this is a libc/libm function without the '__builtin_' prefix. It can
// be followed by ':headername:' to state which header this function
@@ -893,7 +896,7 @@ LANGBUILTIN(__va_start, "vc**.", "nt", ALL_MS_LANGUAGES)
LANGBUILTIN(__fastfail, "vUi", "nr", ALL_MS_LANGUAGES)
// Microsoft library builtins.
-LIBBUILTIN(_setjmpex, "iJ", "fj", "setjmpex.h", ALL_MS_LANGUAGES)
+LIBBUILTIN(_setjmpex, "iJ", "fjT", "setjmpex.h", ALL_MS_LANGUAGES)
// C99 library functions
// C99 stdarg.h
@@ -987,8 +990,8 @@ LIBBUILTIN(wmemmove,"w*w*wC*z", "f", "wchar.h", ALL_LANGUAGES)
// In some systems setjmp is a macro that expands to _setjmp. We undefine
// it here to avoid having two identical LIBBUILTIN entries.
#undef setjmp
-LIBBUILTIN(setjmp, "iJ", "fj", "setjmp.h", ALL_LANGUAGES)
-LIBBUILTIN(longjmp, "vJi", "fr", "setjmp.h", ALL_LANGUAGES)
+LIBBUILTIN(setjmp, "iJ", "fjT", "setjmp.h", ALL_LANGUAGES)
+LIBBUILTIN(longjmp, "vJi", "frT", "setjmp.h", ALL_LANGUAGES)
// Non-C library functions, active in GNU mode only.
// Functions with (returns_twice) attribute (marked as "j") are still active in
@@ -1015,21 +1018,22 @@ LIBBUILTIN(strcasecmp, "icC*cC*", "f", "strings.h", ALL_GNU_LANGUAGES)
LIBBUILTIN(strncasecmp, "icC*cC*z", "f", "strings.h", ALL_GNU_LANGUAGES)
// POSIX unistd.h
LIBBUILTIN(_exit, "vi", "fr", "unistd.h", ALL_GNU_LANGUAGES)
-LIBBUILTIN(vfork, "p", "fj", "unistd.h", ALL_LANGUAGES)
+LIBBUILTIN(vfork, "p", "fjT", "unistd.h", ALL_LANGUAGES)
// POSIX pthread.h
// FIXME: Should specify argument types.
LIBBUILTIN(pthread_create, "", "fC<2,3>", "pthread.h", ALL_GNU_LANGUAGES)
// POSIX setjmp.h
-LIBBUILTIN(_setjmp, "iJ", "fj", "setjmp.h", ALL_LANGUAGES)
-LIBBUILTIN(__sigsetjmp, "iSJi", "fj", "setjmp.h", ALL_LANGUAGES)
-LIBBUILTIN(sigsetjmp, "iSJi", "fj", "setjmp.h", ALL_LANGUAGES)
-LIBBUILTIN(savectx, "iJ", "fj", "setjmp.h", ALL_LANGUAGES)
-LIBBUILTIN(getcontext, "iK*", "fj", "setjmp.h", ALL_LANGUAGES)
+// FIXME: MinGW _setjmp has an additional void* parameter.
+LIBBUILTIN(_setjmp, "iJ", "fjT", "setjmp.h", ALL_LANGUAGES)
+LIBBUILTIN(__sigsetjmp, "iSJi", "fjT", "setjmp.h", ALL_LANGUAGES)
+LIBBUILTIN(sigsetjmp, "iSJi", "fjT", "setjmp.h", ALL_LANGUAGES)
+LIBBUILTIN(savectx, "iJ", "fjT", "setjmp.h", ALL_LANGUAGES)
+LIBBUILTIN(getcontext, "iK*", "fjT", "setjmp.h", ALL_LANGUAGES)
-LIBBUILTIN(_longjmp, "vJi", "fr", "setjmp.h", ALL_GNU_LANGUAGES)
-LIBBUILTIN(siglongjmp, "vSJi", "fr", "setjmp.h", ALL_GNU_LANGUAGES)
+LIBBUILTIN(_longjmp, "vJi", "frT", "setjmp.h", ALL_GNU_LANGUAGES)
+LIBBUILTIN(siglongjmp, "vSJi", "frT", "setjmp.h", ALL_GNU_LANGUAGES)
// non-standard but very common
LIBBUILTIN(strlcpy, "zc*cC*z", "f", "string.h", ALL_GNU_LANGUAGES)
LIBBUILTIN(strlcat, "zc*cC*z", "f", "string.h", ALL_GNU_LANGUAGES)
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Builtins.h b/contrib/llvm-project/clang/include/clang/Basic/Builtins.h
index e4ed482d9068..15bfcf797917 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Builtins.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Builtins.h
@@ -158,6 +158,13 @@ public:
return strchr(getRecord(ID).Attributes, 't') != nullptr;
}
+ /// Determines whether a declaration of this builtin should be recognized
+ /// even if the type doesn't match the specified signature.
+ bool allowTypeMismatch(unsigned ID) const {
+ return strchr(getRecord(ID).Attributes, 'T') != nullptr ||
+ hasCustomTypechecking(ID);
+ }
+
/// Determines whether this builtin has a result or any arguments which
/// are pointer types.
bool hasPtrArgsOrResult(unsigned ID) const {
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td
index 558639ecad6a..acdad15cdf6c 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -69,7 +69,7 @@ def err_drv_cuda_version_unsupported : Error<
"install, pass a different GPU arch with --cuda-gpu-arch, or pass "
"--no-cuda-version-check.">;
def warn_drv_unknown_cuda_version: Warning<
- "Unknown CUDA version %0. Assuming the latest supported version %1">,
+ "Unknown CUDA version. %0 Assuming the latest supported version %1">,
InGroup<CudaUnknownVersion>;
def err_drv_cuda_host_arch : Error<"unsupported architecture '%0' for host compilation.">;
def err_drv_mix_cuda_hip : Error<"Mixed Cuda and HIP compilation is not supported.">;
diff --git a/contrib/llvm-project/clang/include/clang/Format/Format.h b/contrib/llvm-project/clang/include/clang/Format/Format.h
index 3549ec9eee0e..3549ec9eee0e 100644..100755
--- a/contrib/llvm-project/clang/include/clang/Format/Format.h
+++ b/contrib/llvm-project/clang/include/clang/Format/Format.h
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/SourceExtraction.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/SourceExtraction.h
index 034a0aaaf6db..034a0aaaf6db 100755..100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/SourceExtraction.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/SourceExtraction.h
diff --git a/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp b/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp
index 7857e75f57a1..7857e75f57a1 100644..100755
--- a/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
index 8994b939093e..6b93f1b60af5 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
@@ -3754,11 +3754,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI_abnormal_termination:
return RValue::get(EmitSEHAbnormalTermination());
case Builtin::BI_setjmpex:
- if (getTarget().getTriple().isOSMSVCRT())
+ if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
+ E->getArg(0)->getType()->isPointerType())
return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
break;
case Builtin::BI_setjmp:
- if (getTarget().getTriple().isOSMSVCRT()) {
+ if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
+ E->getArg(0)->getType()->isPointerType()) {
if (getTarget().getTriple().getArch() == llvm::Triple::x86)
return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
index bb9c494ae68e..c64faf4c0af7 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -3511,19 +3511,6 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
ClassMethods.insert(ClassMethods.begin(), OID->classmeth_begin(),
OID->classmeth_end());
- // Collect the same information about synthesized properties, which don't
- // show up in the instance method lists.
- for (auto *propertyImpl : OID->property_impls())
- if (propertyImpl->getPropertyImplementation() ==
- ObjCPropertyImplDecl::Synthesize) {
- auto addPropertyMethod = [&](const ObjCMethodDecl *accessor) {
- if (accessor)
- InstanceMethods.push_back(accessor);
- };
- addPropertyMethod(propertyImpl->getGetterMethodDecl());
- addPropertyMethod(propertyImpl->getSetterMethodDecl());
- }
-
llvm::Constant *Properties = GeneratePropertyList(OID, ClassDecl);
// Collect the names of referenced protocols
diff --git a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
index 9cd63ebe29ee..a061651d8b21 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
@@ -5471,6 +5471,11 @@ private:
Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const override {
+ llvm::Type *BaseTy = CGF.ConvertType(Ty);
+ if (isa<llvm::ScalableVectorType>(BaseTy))
+ llvm::report_fatal_error("Passing SVE types to variadic functions is "
+ "currently not supported");
+
return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
: isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
: EmitAAPCSVAArg(VAListAddr, Ty, CGF);
@@ -10390,7 +10395,6 @@ bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
return false;
Field1Ty = CGT.ConvertType(EltTy);
Field1Off = CurOff;
- assert(CurOff.isZero() && "Unexpected offset for first field");
Field2Ty = Field1Ty;
Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
return true;
@@ -10485,7 +10489,7 @@ bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
NeededArgFPRs++;
else if (Field2Ty)
NeededArgGPRs++;
- return IsCandidate;
+ return true;
}
// Call getCoerceAndExpand for the two-element flattened struct described by
@@ -10511,15 +10515,15 @@ ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
CharUnits Field2Align =
CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(Field2Ty));
- CharUnits Field1Size =
+ CharUnits Field1End = Field1Off +
CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
- CharUnits Field2OffNoPadNoPack = Field1Size.alignTo(Field2Align);
+ CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align);
CharUnits Padding = CharUnits::Zero();
if (Field2Off > Field2OffNoPadNoPack)
Padding = Field2Off - Field2OffNoPadNoPack;
- else if (Field2Off != Field2Align && Field2Off > Field1Size)
- Padding = Field2Off - Field1Size;
+ else if (Field2Off != Field2Align && Field2Off > Field1End)
+ Padding = Field2Off - Field1End;
bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
index 110a0bca9bc1..ffc606dd554b 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
@@ -16,6 +16,7 @@
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Host.h"
@@ -32,30 +33,81 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
+namespace {
+struct CudaVersionInfo {
+ std::string DetectedVersion;
+ CudaVersion Version;
+};
// Parses the contents of version.txt in an CUDA installation. It should
// contain one line of the from e.g. "CUDA Version 7.5.2".
-void CudaInstallationDetector::ParseCudaVersionFile(llvm::StringRef V) {
- Version = CudaVersion::UNKNOWN;
+CudaVersionInfo parseCudaVersionFile(llvm::StringRef V) {
+ V = V.trim();
if (!V.startswith("CUDA Version "))
- return;
+ return {V.str(), CudaVersion::UNKNOWN};
V = V.substr(strlen("CUDA Version "));
SmallVector<StringRef,4> VersionParts;
V.split(VersionParts, '.');
- if (VersionParts.size() < 2)
- return;
- DetectedVersion = join_items(".", VersionParts[0], VersionParts[1]);
- Version = CudaStringToVersion(DetectedVersion);
- if (Version != CudaVersion::UNKNOWN) {
- // TODO(tra): remove the warning once we have all features of 10.2 and 11.0
- // implemented.
- DetectedVersionIsNotSupported = Version > CudaVersion::LATEST_SUPPORTED;
- return;
- }
+ return {"version.txt: " + V.str() + ".",
+ VersionParts.size() < 2
+ ? CudaVersion::UNKNOWN
+ : CudaStringToVersion(
+ join_items(".", VersionParts[0], VersionParts[1]))};
+}
- Version = CudaVersion::LATEST_SUPPORTED;
- DetectedVersionIsNotSupported = true;
+CudaVersion getCudaVersion(uint32_t raw_version) {
+ if (raw_version < 7050)
+ return CudaVersion::CUDA_70;
+ if (raw_version < 8000)
+ return CudaVersion::CUDA_75;
+ if (raw_version < 9000)
+ return CudaVersion::CUDA_80;
+ if (raw_version < 9010)
+ return CudaVersion::CUDA_90;
+ if (raw_version < 9020)
+ return CudaVersion::CUDA_91;
+ if (raw_version < 10000)
+ return CudaVersion::CUDA_92;
+ if (raw_version < 10010)
+ return CudaVersion::CUDA_100;
+ if (raw_version < 10020)
+ return CudaVersion::CUDA_101;
+ if (raw_version < 11000)
+ return CudaVersion::CUDA_102;
+ if (raw_version < 11010)
+ return CudaVersion::CUDA_110;
+ return CudaVersion::LATEST;
}
+CudaVersionInfo parseCudaHFile(llvm::StringRef Input) {
+ // Helper lambda which skips the words if the line starts with them or returns
+ // None otherwise.
+ auto StartsWithWords =
+ [](llvm::StringRef Line,
+ const SmallVector<StringRef, 3> words) -> llvm::Optional<StringRef> {
+ for (StringRef word : words) {
+ if (!Line.consume_front(word))
+ return {};
+ Line = Line.ltrim();
+ }
+ return Line;
+ };
+
+ Input = Input.ltrim();
+ while (!Input.empty()) {
+ if (auto Line =
+ StartsWithWords(Input.ltrim(), {"#", "define", "CUDA_VERSION"})) {
+ uint32_t RawVersion;
+ Line->consumeInteger(10, RawVersion);
+ return {"cuda.h: CUDA_VERSION=" + Twine(RawVersion).str() + ".",
+ getCudaVersion(RawVersion)};
+ }
+ // Find next non-empty line.
+ Input = Input.drop_front(Input.find_first_of("\n\r")).ltrim();
+ }
+ return {"cuda.h: CUDA_VERSION not found.", CudaVersion::UNKNOWN};
+}
+} // namespace
+
void CudaInstallationDetector::WarnIfUnsupportedVersion() {
if (DetectedVersionIsNotSupported)
D.Diag(diag::warn_drv_unknown_cuda_version)
@@ -152,16 +204,31 @@ CudaInstallationDetector::CudaInstallationDetector(
else
continue;
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> VersionFile =
- FS.getBufferForFile(InstallPath + "/version.txt");
- if (!VersionFile) {
- // CUDA 7.0 doesn't have a version.txt, so guess that's our version if
- // version.txt isn't present.
- Version = CudaVersion::CUDA_70;
- } else {
- ParseCudaVersionFile((*VersionFile)->getBuffer());
+ CudaVersionInfo VersionInfo = {"", CudaVersion::UNKNOWN};
+ if (auto VersionFile = FS.getBufferForFile(InstallPath + "/version.txt"))
+ VersionInfo = parseCudaVersionFile((*VersionFile)->getBuffer());
+ // If version file didn't give us the version, try to find it in cuda.h
+ if (VersionInfo.Version == CudaVersion::UNKNOWN)
+ if (auto CudaHFile = FS.getBufferForFile(InstallPath + "/include/cuda.h"))
+ VersionInfo = parseCudaHFile((*CudaHFile)->getBuffer());
+ // As the last resort, make an educated guess between CUDA-7.0, (which had
+ // no version.txt file and had old-style libdevice bitcode ) and an unknown
+ // recent CUDA version (no version.txt, new style bitcode).
+ if (VersionInfo.Version == CudaVersion::UNKNOWN) {
+ VersionInfo.Version = (FS.exists(LibDevicePath + "/libdevice.10.bc"))
+ ? Version = CudaVersion::LATEST
+ : Version = CudaVersion::CUDA_70;
+ VersionInfo.DetectedVersion =
+ "No version found in version.txt or cuda.h.";
}
+ Version = VersionInfo.Version;
+ DetectedVersion = VersionInfo.DetectedVersion;
+
+ // TODO(tra): remove the warning once we have all features of 10.2
+ // and 11.0 implemented.
+ DetectedVersionIsNotSupported = Version > CudaVersion::LATEST_SUPPORTED;
+
if (Version >= CudaVersion::CUDA_90) {
// CUDA-9+ uses single libdevice file for all GPU variants.
std::string FilePath = LibDevicePath + "/libdevice.10.bc";
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h
index 873eb7338a30..bbf272c468a5 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h
@@ -78,9 +78,6 @@ public:
return LibDeviceMap.lookup(Gpu);
}
void WarnIfUnsupportedVersion();
-
-private:
- void ParseCudaVersionFile(llvm::StringRef V);
};
namespace tools {
diff --git a/contrib/llvm-project/clang/lib/Format/FormatToken.h b/contrib/llvm-project/clang/lib/Format/FormatToken.h
index d4287f53fde3..b132a3e84da5 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatToken.h
+++ b/contrib/llvm-project/clang/lib/Format/FormatToken.h
@@ -183,12 +183,6 @@ struct FormatToken {
/// before the token.
bool MustBreakBefore = false;
- /// Whether to not align across this token
- ///
- /// This happens for example when a preprocessor directive ended directly
- /// before the token, but very rarely otherwise.
- bool MustBreakAlignBefore = false;
-
/// The raw text of the token.
///
/// Contains the raw token text without leading whitespace and without leading
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
index a37386425aae..ea8a41cfba82 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
@@ -3037,7 +3037,6 @@ void UnwrappedLineParser::readToken(int LevelDifference) {
}
FormatTok = Tokens->getNextToken();
FormatTok->MustBreakBefore = true;
- FormatTok->MustBreakAlignBefore = true;
}
if (!PPStack.empty() && (PPStack.back().Kind == PP_Unreachable) &&
@@ -3062,7 +3061,6 @@ void UnwrappedLineParser::pushToken(FormatToken *Tok) {
Line->Tokens.push_back(UnwrappedLineNode(Tok));
if (MustBreakBeforeNextToken) {
Line->Tokens.back().Tok->MustBreakBefore = true;
- Line->Tokens.back().Tok->MustBreakAlignBefore = true;
MustBreakBeforeNextToken = false;
}
}
diff --git a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
index 32e0b685ea0f..3a265bd09168 100644
--- a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
+++ b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
@@ -411,11 +411,9 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
if (Changes[i].NewlinesBefore != 0) {
CommasBeforeMatch = 0;
EndOfSequence = i;
- // If there is a blank line, there is a forced-align-break (eg,
- // preprocessor), or if the last line didn't contain any matching token,
- // the sequence ends here.
- if (Changes[i].NewlinesBefore > 1 ||
- Changes[i].Tok->MustBreakAlignBefore || !FoundMatchOnLine)
+ // If there is a blank line, or if the last line didn't contain any
+ // matching token, the sequence ends here.
+ if (Changes[i].NewlinesBefore > 1 || !FoundMatchOnLine)
AlignCurrentSequence();
FoundMatchOnLine = false;
@@ -726,8 +724,6 @@ void WhitespaceManager::alignTrailingComments() {
if (Changes[i].StartOfBlockComment)
continue;
Newlines += Changes[i].NewlinesBefore;
- if (Changes[i].Tok->MustBreakAlignBefore)
- BreakBeforeNext = true;
if (!Changes[i].IsTrailingComment)
continue;
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_complex_builtins.h b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_complex_builtins.h
index 8c10ff6b461f..d924487ab285 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_complex_builtins.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_complex_builtins.h
@@ -16,7 +16,7 @@
// to work with CUDA and OpenMP target offloading [in C and C++ mode].)
#pragma push_macro("__DEVICE__")
-#ifdef _OPENMP
+#ifdef __OPENMP_NVPTX__
#pragma omp declare target
#define __DEVICE__ __attribute__((noinline, nothrow, cold, weak))
#else
@@ -26,7 +26,7 @@
// To make the algorithms available for C and C++ in CUDA and OpenMP we select
// different but equivalent function versions. TODO: For OpenMP we currently
// select the native builtins as the overload support for templates is lacking.
-#if !defined(_OPENMP)
+#if !defined(__OPENMP_NVPTX__)
#define _ISNANd std::isnan
#define _ISNANf std::isnan
#define _ISINFd std::isinf
@@ -250,7 +250,7 @@ __DEVICE__ float _Complex __divsc3(float __a, float __b, float __c, float __d) {
#undef _LOGBd
#undef _LOGBf
-#ifdef _OPENMP
+#ifdef __OPENMP_NVPTX__
#pragma omp end declare target
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/new b/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/new
index f49811c5a57c..7f255314056a 100644
--- a/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/new
+++ b/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/new
@@ -26,6 +26,13 @@
#include_next <new>
+#if !defined(__device__)
+// The header has been included too early from the standard C++ library
+// and CUDA-specific macros are not available yet.
+// Undo the include guard and try again later.
+#undef __CLANG_CUDA_WRAPPERS_NEW
+#else
+
#pragma push_macro("CUDA_NOEXCEPT")
#if __cplusplus >= 201103L
#define CUDA_NOEXCEPT noexcept
@@ -95,4 +102,5 @@ __device__ inline void operator delete[](void *, void *) CUDA_NOEXCEPT {}
#pragma pop_macro("CUDA_NOEXCEPT")
+#endif // __device__
#endif // include guard
diff --git a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex
index 1ed0b14879ef..d8dcd41670ee 100644
--- a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex
+++ b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex
@@ -18,7 +18,9 @@
#include <cmath>
#define __CUDA__
+#define __OPENMP_NVPTX__
#include <__clang_cuda_complex_builtins.h>
+#undef __OPENMP_NVPTX__
#endif
// Grab the host header too.
diff --git a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex.h b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex.h
index 829c7a785725..00d278548f82 100644
--- a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex.h
+++ b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex.h
@@ -18,7 +18,9 @@
#include <math.h>
#define __CUDA__
+#define __OPENMP_NVPTX__
#include <__clang_cuda_complex_builtins.h>
+#undef __OPENMP_NVPTX__
#endif
// Grab the host header too.
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp b/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
index b00d2ff5f1d5..1bf04d9cb4f2 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
@@ -1573,11 +1573,6 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if (SemaBuiltinSetjmp(TheCall))
return ExprError();
break;
- case Builtin::BI_setjmp:
- case Builtin::BI_setjmpex:
- if (checkArgCount(*this, TheCall, 1))
- return true;
- break;
case Builtin::BI__builtin_classify_type:
if (checkArgCount(*this, TheCall, 1)) return true;
TheCall->setType(Context.IntTy);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp b/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
index ddd95faebe99..ddd95faebe99 100644..100755
--- a/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
index 2703d9876f85..5b0417fa8859 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
@@ -2107,7 +2107,8 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
// If we have a builtin without an associated type we should not emit a
// warning when we were not able to find a type for it.
- if (Error == ASTContext::GE_Missing_type)
+ if (Error == ASTContext::GE_Missing_type ||
+ Context.BuiltinInfo.allowTypeMismatch(ID))
return nullptr;
// If we could not find a type for setjmp it is because the jmp_buf type was
@@ -2131,11 +2132,9 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Context.BuiltinInfo.isHeaderDependentFunction(ID))) {
Diag(Loc, diag::ext_implicit_lib_function_decl)
<< Context.BuiltinInfo.getName(ID) << R;
- if (Context.BuiltinInfo.getHeaderName(ID) &&
- !Diags.isIgnored(diag::ext_implicit_lib_function_decl, Loc))
+ if (const char *Header = Context.BuiltinInfo.getHeaderName(ID))
Diag(Loc, diag::note_include_header_or_declare)
- << Context.BuiltinInfo.getHeaderName(ID)
- << Context.BuiltinInfo.getName(ID);
+ << Header << Context.BuiltinInfo.getName(ID);
}
if (R.isNull())
@@ -9630,19 +9629,20 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
}
- // In C builtins get merged with implicitly lazily created declarations.
- // In C++ we need to check if it's a builtin and add the BuiltinAttr here.
- if (getLangOpts().CPlusPlus) {
+ // If this is the first declaration of a library builtin function, add
+ // attributes as appropriate.
+ if (!D.isRedeclaration() &&
+ NewFD->getDeclContext()->getRedeclContext()->isFileContext()) {
if (IdentifierInfo *II = Previous.getLookupName().getAsIdentifierInfo()) {
if (unsigned BuiltinID = II->getBuiltinID()) {
if (NewFD->getLanguageLinkage() == CLanguageLinkage) {
- // Declarations for builtins with custom typechecking by definition
- // don't make sense. Don't attempt typechecking and simply add the
- // attribute.
- if (Context.BuiltinInfo.hasCustomTypechecking(BuiltinID)) {
+ // Validate the type matches unless this builtin is specified as
+ // matching regardless of its declared type.
+ if (Context.BuiltinInfo.allowTypeMismatch(BuiltinID)) {
NewFD->addAttr(BuiltinAttr::CreateImplicit(Context, BuiltinID));
} else {
ASTContext::GetBuiltinTypeError Error;
+ LookupPredefedObjCSuperType(*this, S, NewFD->getIdentifier());
QualType BuiltinType = Context.GetBuiltinType(BuiltinID, Error);
if (!Error && !BuiltinType.isNull() &&
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
index 1a0594512a60..a9a2a19b4797 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
@@ -3301,7 +3301,11 @@ static void handleInitPriorityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
- if (prioritynum < 101 || prioritynum > 65535) {
+ // Only perform the priority check if the attribute is outside of a system
+ // header. Values <= 100 are reserved for the implementation, and libc++
+ // benefits from being able to specify values in that range.
+ if ((prioritynum < 101 || prioritynum > 65535) &&
+ !S.getSourceManager().isInSystemHeader(AL.getLoc())) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_range)
<< E->getSourceRange() << AL << 101 << 65535;
AL.setInvalid();
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
index c05ed0b14e3e..f788cf103503 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
@@ -1963,8 +1963,7 @@ public:
TypeLocBuilder InnerTLB;
QualType Transformed =
TransformType(InnerTLB, OrigDecl->getTypeSourceInfo()->getTypeLoc());
- TypeSourceInfo *TSI =
- TransformType(InnerTLB.getTypeSourceInfo(Context, Transformed));
+ TypeSourceInfo *TSI = InnerTLB.getTypeSourceInfo(Context, Transformed);
TypedefNameDecl *Decl = nullptr;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index baec13ba627c..7e6efe6105bf 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -3629,8 +3629,11 @@ Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
SemaRef.Context, Owner, D->getInnerLocStart(), D->getLocation(),
VarTemplate, DI->getType(), DI, D->getStorageClass(), Converted);
Var->setTemplateArgsInfo(TemplateArgsInfo);
- if (InsertPos)
+ if (!PrevDecl) {
+ void *InsertPos = nullptr;
+ VarTemplate->findSpecialization(Converted, InsertPos);
VarTemplate->AddSpecialization(Var, InsertPos);
+ }
if (SemaRef.getLangOpts().OpenCL)
SemaRef.deduceOpenCLAddressSpace(Var);
@@ -5311,7 +5314,7 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
TemplateArgs);
Var = cast_or_null<VarDecl>(Instantiator.VisitVarTemplateSpecializationDecl(
VarSpec->getSpecializedTemplate(), Def, nullptr,
- VarSpec->getTemplateArgsInfo(), VarSpec->getTemplateArgs().asArray()));
+ VarSpec->getTemplateArgsInfo(), VarSpec->getTemplateArgs().asArray(), VarSpec));
if (Var) {
llvm::PointerUnion<VarTemplateDecl *,
VarTemplatePartialSpecializationDecl *> PatternPtr =
@@ -5321,12 +5324,6 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
cast<VarTemplateSpecializationDecl>(Var)->setInstantiationOf(
Partial, &VarSpec->getTemplateInstantiationArgs());
- // Merge the definition with the declaration.
- LookupResult R(*this, Var->getDeclName(), Var->getLocation(),
- LookupOrdinaryName, forRedeclarationInCurContext());
- R.addDecl(OldVar);
- MergeVarDecl(Var, R);
-
// Attach the initializer.
InstantiateVariableInitializer(Var, Def, TemplateArgs);
}
diff --git a/contrib/llvm-project/clang/lib/Sema/UsedDeclVisitor.h b/contrib/llvm-project/clang/lib/Sema/UsedDeclVisitor.h
index d207e07f451a..c33d30478e2a 100644
--- a/contrib/llvm-project/clang/lib/Sema/UsedDeclVisitor.h
+++ b/contrib/llvm-project/clang/lib/Sema/UsedDeclVisitor.h
@@ -67,10 +67,13 @@ public:
void VisitCXXDeleteExpr(CXXDeleteExpr *E) {
if (E->getOperatorDelete())
asImpl().visitUsedDecl(E->getBeginLoc(), E->getOperatorDelete());
- QualType Destroyed = S.Context.getBaseElementType(E->getDestroyedType());
- if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
- asImpl().visitUsedDecl(E->getBeginLoc(), S.LookupDestructor(Record));
+ QualType DestroyedOrNull = E->getDestroyedType();
+ if (!DestroyedOrNull.isNull()) {
+ QualType Destroyed = S.Context.getBaseElementType(DestroyedOrNull);
+ if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
+ asImpl().visitUsedDecl(E->getBeginLoc(), S.LookupDestructor(Record));
+ }
}
Inherited::VisitCXXDeleteExpr(E);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h
index ec612dde3b8b..ec612dde3b8b 100755..100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
index 490a04b2181b..490a04b2181b 100755..100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
diff --git a/contrib/llvm-project/lld/COFF/DebugTypes.cpp b/contrib/llvm-project/lld/COFF/DebugTypes.cpp
index 4790b0166799..abe3bb9eef5b 100644
--- a/contrib/llvm-project/lld/COFF/DebugTypes.cpp
+++ b/contrib/llvm-project/lld/COFF/DebugTypes.cpp
@@ -202,6 +202,9 @@ Expected<const CVIndexMap *> TpiSource::mergeDebugT(TypeMerger *m,
BinaryStreamReader reader(file->debugTypes, support::little);
cantFail(reader.readArray(types, reader.getLength()));
+ // When dealing with PCH.OBJ, some indices were already merged.
+ unsigned nbHeadIndices = indexMap->tpiMap.size();
+
if (config->debugGHashes) {
ArrayRef<GloballyHashedType> hashes;
std::vector<GloballyHashedType> ownedHashes;
@@ -232,7 +235,7 @@ Expected<const CVIndexMap *> TpiSource::mergeDebugT(TypeMerger *m,
// collecting statistics.
m->tpiCounts.resize(m->getTypeTable().size());
m->ipiCounts.resize(m->getIDTable().size());
- uint32_t srcIdx = 0;
+ uint32_t srcIdx = nbHeadIndices;
for (CVType &ty : types) {
TypeIndex dstIdx = indexMap->tpiMap[srcIdx++];
// Type merging may fail, so a complex source type may become the simple
diff --git a/contrib/llvm-project/lld/ELF/LinkerScript.cpp b/contrib/llvm-project/lld/ELF/LinkerScript.cpp
index 6de2cd65b973..7314b27659bb 100644
--- a/contrib/llvm-project/lld/ELF/LinkerScript.cpp
+++ b/contrib/llvm-project/lld/ELF/LinkerScript.cpp
@@ -679,8 +679,11 @@ addInputSec(StringMap<TinyPtrVector<OutputSection *>> &map,
auto *firstIsec = cast<InputSectionBase>(
cast<InputSectionDescription>(sec->sectionCommands[0])
->sectionBases[0]);
- if (firstIsec->getLinkOrderDep()->getOutputSection() !=
- isec->getLinkOrderDep()->getOutputSection())
+ OutputSection *firstIsecOut =
+ firstIsec->flags & SHF_LINK_ORDER
+ ? firstIsec->getLinkOrderDep()->getOutputSection()
+ : nullptr;
+ if (firstIsecOut != isec->getLinkOrderDep()->getOutputSection())
continue;
}
diff --git a/contrib/llvm-project/lldb/include/lldb/Core/ModuleList.h b/contrib/llvm-project/lldb/include/lldb/Core/ModuleList.h
index ae1f6fdb20a2..d90b27e474ac 100644
--- a/contrib/llvm-project/lldb/include/lldb/Core/ModuleList.h
+++ b/contrib/llvm-project/lldb/include/lldb/Core/ModuleList.h
@@ -139,7 +139,13 @@ public:
///
/// \param[in] module_sp
/// A shared pointer to a module to replace in this collection.
- void ReplaceEquivalent(const lldb::ModuleSP &module_sp);
+ ///
+ /// \param[in] old_modules
+ /// Optional pointer to a vector which, if provided, will have shared
+ /// pointers to the replaced module(s) appended to it.
+ void ReplaceEquivalent(
+ const lldb::ModuleSP &module_sp,
+ llvm::SmallVectorImpl<lldb::ModuleSP> *old_modules = nullptr);
/// Append a module to the module list, if it is not already there.
///
@@ -443,12 +449,11 @@ public:
static bool ModuleIsInCache(const Module *module_ptr);
- static Status GetSharedModule(const ModuleSpec &module_spec,
- lldb::ModuleSP &module_sp,
- const FileSpecList *module_search_paths_ptr,
- lldb::ModuleSP *old_module_sp_ptr,
- bool *did_create_ptr,
- bool always_create = false);
+ static Status
+ GetSharedModule(const ModuleSpec &module_spec, lldb::ModuleSP &module_sp,
+ const FileSpecList *module_search_paths_ptr,
+ llvm::SmallVectorImpl<lldb::ModuleSP> *old_modules,
+ bool *did_create_ptr, bool always_create = false);
static bool RemoveSharedModule(lldb::ModuleSP &module_sp);
diff --git a/contrib/llvm-project/lldb/include/lldb/Target/Platform.h b/contrib/llvm-project/lldb/include/lldb/Target/Platform.h
index 6234b8244b3f..277fcf68cb0c 100644
--- a/contrib/llvm-project/lldb/include/lldb/Target/Platform.h
+++ b/contrib/llvm-project/lldb/include/lldb/Target/Platform.h
@@ -301,11 +301,10 @@ public:
LocateExecutableScriptingResources(Target *target, Module &module,
Stream *feedback_stream);
- virtual Status GetSharedModule(const ModuleSpec &module_spec,
- Process *process, lldb::ModuleSP &module_sp,
- const FileSpecList *module_search_paths_ptr,
- lldb::ModuleSP *old_module_sp_ptr,
- bool *did_create_ptr);
+ virtual Status GetSharedModule(
+ const ModuleSpec &module_spec, Process *process,
+ lldb::ModuleSP &module_sp, const FileSpecList *module_search_paths_ptr,
+ llvm::SmallVectorImpl<lldb::ModuleSP> *old_modules, bool *did_create_ptr);
virtual bool GetModuleSpec(const FileSpec &module_file_spec,
const ArchSpec &arch, ModuleSpec &module_spec);
diff --git a/contrib/llvm-project/lldb/source/Core/ModuleList.cpp b/contrib/llvm-project/lldb/source/Core/ModuleList.cpp
index 0345678ddaff..1701cb56338e 100644
--- a/contrib/llvm-project/lldb/source/Core/ModuleList.cpp
+++ b/contrib/llvm-project/lldb/source/Core/ModuleList.cpp
@@ -171,7 +171,9 @@ void ModuleList::Append(const ModuleSP &module_sp, bool notify) {
AppendImpl(module_sp, notify);
}
-void ModuleList::ReplaceEquivalent(const ModuleSP &module_sp) {
+void ModuleList::ReplaceEquivalent(
+ const ModuleSP &module_sp,
+ llvm::SmallVectorImpl<lldb::ModuleSP> *old_modules) {
if (module_sp) {
std::lock_guard<std::recursive_mutex> guard(m_modules_mutex);
@@ -184,11 +186,14 @@ void ModuleList::ReplaceEquivalent(const ModuleSP &module_sp) {
size_t idx = 0;
while (idx < m_modules.size()) {
- ModuleSP module_sp(m_modules[idx]);
- if (module_sp->MatchesModuleSpec(equivalent_module_spec))
+ ModuleSP test_module_sp(m_modules[idx]);
+ if (test_module_sp->MatchesModuleSpec(equivalent_module_spec)) {
+ if (old_modules)
+ old_modules->push_back(test_module_sp);
RemoveImpl(m_modules.begin() + idx);
- else
+ } else {
++idx;
+ }
}
// Now add the new module to the list
Append(module_sp);
@@ -731,11 +736,11 @@ size_t ModuleList::RemoveOrphanSharedModules(bool mandatory) {
return GetSharedModuleList().RemoveOrphans(mandatory);
}
-Status ModuleList::GetSharedModule(const ModuleSpec &module_spec,
- ModuleSP &module_sp,
- const FileSpecList *module_search_paths_ptr,
- ModuleSP *old_module_sp_ptr,
- bool *did_create_ptr, bool always_create) {
+Status
+ModuleList::GetSharedModule(const ModuleSpec &module_spec, ModuleSP &module_sp,
+ const FileSpecList *module_search_paths_ptr,
+ llvm::SmallVectorImpl<lldb::ModuleSP> *old_modules,
+ bool *did_create_ptr, bool always_create) {
ModuleList &shared_module_list = GetSharedModuleList();
std::lock_guard<std::recursive_mutex> guard(
shared_module_list.m_modules_mutex);
@@ -747,8 +752,6 @@ Status ModuleList::GetSharedModule(const ModuleSpec &module_spec,
if (did_create_ptr)
*did_create_ptr = false;
- if (old_module_sp_ptr)
- old_module_sp_ptr->reset();
const UUID *uuid_ptr = module_spec.GetUUIDPtr();
const FileSpec &module_file_spec = module_spec.GetFileSpec();
@@ -769,8 +772,8 @@ Status ModuleList::GetSharedModule(const ModuleSpec &module_spec,
// Make sure the file for the module hasn't been modified
if (module_sp->FileHasChanged()) {
- if (old_module_sp_ptr && !*old_module_sp_ptr)
- *old_module_sp_ptr = module_sp;
+ if (old_modules)
+ old_modules->push_back(module_sp);
Log *log(lldb_private::GetLogIfAnyCategoriesSet(LIBLLDB_LOG_MODULES));
if (log != nullptr)
@@ -812,7 +815,7 @@ Status ModuleList::GetSharedModule(const ModuleSpec &module_spec,
*did_create_ptr = true;
}
- shared_module_list.ReplaceEquivalent(module_sp);
+ shared_module_list.ReplaceEquivalent(module_sp, old_modules);
return error;
}
}
@@ -849,7 +852,7 @@ Status ModuleList::GetSharedModule(const ModuleSpec &module_spec,
if (did_create_ptr)
*did_create_ptr = true;
- shared_module_list.ReplaceEquivalent(module_sp);
+ shared_module_list.ReplaceEquivalent(module_sp, old_modules);
return Status();
}
}
@@ -924,8 +927,8 @@ Status ModuleList::GetSharedModule(const ModuleSpec &module_spec,
located_binary_modulespec.GetFileSpec());
if (file_spec_mod_time != llvm::sys::TimePoint<>()) {
if (file_spec_mod_time != module_sp->GetModificationTime()) {
- if (old_module_sp_ptr)
- *old_module_sp_ptr = module_sp;
+ if (old_modules)
+ old_modules->push_back(module_sp);
shared_module_list.Remove(module_sp);
module_sp.reset();
}
@@ -947,7 +950,7 @@ Status ModuleList::GetSharedModule(const ModuleSpec &module_spec,
if (did_create_ptr)
*did_create_ptr = true;
- shared_module_list.ReplaceEquivalent(module_sp);
+ shared_module_list.ReplaceEquivalent(module_sp, old_modules);
}
} else {
located_binary_modulespec.GetFileSpec().GetPath(path, sizeof(path));
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/minidump/ProcessMinidump.cpp b/contrib/llvm-project/lldb/source/Plugins/Process/minidump/ProcessMinidump.cpp
index fc8ee346f449..1041f63aa2e2 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/minidump/ProcessMinidump.cpp
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/minidump/ProcessMinidump.cpp
@@ -121,6 +121,72 @@ private:
lldb::addr_t m_base;
lldb::addr_t m_size;
};
+
+/// Duplicate the HashElfTextSection() from the breakpad sources.
+///
+/// Breakpad, a Google crash log reporting tool suite, creates minidump files
+/// for many different architectures. When using Breakpad to create ELF
+/// minidumps, it will check for a GNU build ID when creating a minidump file
+/// and if one doesn't exist in the file, it will say the UUID of the file is a
+/// checksum of up to the first 4096 bytes of the .text section. Facebook also
+/// uses breakpad and modified this hash to avoid collisions so we can
+/// calculate and check for this as well.
+///
+/// The breakpad code might end up hashing up to 15 bytes that immediately
+/// follow the .text section in the file, so this code must do exactly what it
+/// does so we can get an exact match for the UUID.
+///
+/// \param[in] module_sp The module to grab the .text section from.
+///
+/// \param[in/out] breakpad_uuid A vector that will receive the calculated
+/// breakpad .text hash.
+///
+/// \param[in/out] facebook_uuid A vector that will receive the calculated
+/// facebook .text hash.
+///
+void HashElfTextSection(ModuleSP module_sp, std::vector<uint8_t> &breakpad_uuid,
+ std::vector<uint8_t> &facebook_uuid) {
+ SectionList *sect_list = module_sp->GetSectionList();
+ if (sect_list == nullptr)
+ return;
+ SectionSP sect_sp = sect_list->FindSectionByName(ConstString(".text"));
+ if (!sect_sp)
+ return;
+ constexpr size_t kMDGUIDSize = 16;
+ constexpr size_t kBreakpadPageSize = 4096;
+ // The breakpad code has a bug where it might access beyond the end of a
+ // .text section by up to 15 bytes, so we must ensure we round up to the
+ // next kMDGUIDSize byte boundary.
+ DataExtractor data;
+ const size_t text_size = sect_sp->GetFileSize();
+ const size_t read_size = std::min<size_t>(
+ llvm::alignTo(text_size, kMDGUIDSize), kBreakpadPageSize);
+ sect_sp->GetObjectFile()->GetData(sect_sp->GetFileOffset(), read_size, data);
+
+ breakpad_uuid.assign(kMDGUIDSize, 0);
+ facebook_uuid.assign(kMDGUIDSize, 0);
+
+ // The only difference between the breakpad hash and the facebook hash is the
+ // hashing of the text section size into the hash prior to hashing the .text
+ // contents.
+ for (size_t i = 0; i < kMDGUIDSize; i++)
+ facebook_uuid[i] ^= text_size % 255;
+
+ // This code carefully duplicates how the hash was created in Breakpad
+ // sources, including the error where it might has an extra 15 bytes past the
+ // end of the .text section if the .text section is less than a page size in
+ // length.
+ const uint8_t *ptr = data.GetDataStart();
+ const uint8_t *ptr_end = data.GetDataEnd();
+ while (ptr < ptr_end) {
+ for (unsigned i = 0; i < kMDGUIDSize; i++) {
+ breakpad_uuid[i] ^= ptr[i];
+ facebook_uuid[i] ^= ptr[i];
+ }
+ ptr += kMDGUIDSize;
+ }
+}
+
} // namespace
ConstString ProcessMinidump::GetPluginNameStatic() {
@@ -444,6 +510,53 @@ bool ProcessMinidump::UpdateThreadList(ThreadList &old_thread_list,
return new_thread_list.GetSize(false) > 0;
}
+ModuleSP ProcessMinidump::GetOrCreateModule(UUID minidump_uuid,
+ llvm::StringRef name,
+ ModuleSpec module_spec) {
+ Log *log(GetLogIfAllCategoriesSet(LIBLLDB_LOG_DYNAMIC_LOADER));
+ Status error;
+
+ ModuleSP module_sp =
+ GetTarget().GetOrCreateModule(module_spec, true /* notify */, &error);
+ if (!module_sp)
+ return module_sp;
+ // We consider the module to be a match if the minidump UUID is a
+ // prefix of the actual UUID, or if either of the UUIDs are empty.
+ const auto dmp_bytes = minidump_uuid.GetBytes();
+ const auto mod_bytes = module_sp->GetUUID().GetBytes();
+ const bool match = dmp_bytes.empty() || mod_bytes.empty() ||
+ mod_bytes.take_front(dmp_bytes.size()) == dmp_bytes;
+ if (match) {
+ LLDB_LOG(log, "Partial uuid match for {0}.", name);
+ return module_sp;
+ }
+
+ // Breakpad generates minindump files, and if there is no GNU build
+ // ID in the binary, it will calculate a UUID by hashing first 4096
+ // bytes of the .text section and using that as the UUID for a module
+ // in the minidump. Facebook uses a modified breakpad client that
+ // uses a slightly modified this hash to avoid collisions. Check for
+ // UUIDs from the minindump that match these cases and accept the
+ // module we find if they do match.
+ std::vector<uint8_t> breakpad_uuid;
+ std::vector<uint8_t> facebook_uuid;
+ HashElfTextSection(module_sp, breakpad_uuid, facebook_uuid);
+ if (dmp_bytes == llvm::ArrayRef<uint8_t>(breakpad_uuid)) {
+ LLDB_LOG(log, "Breakpad .text hash match for {0}.", name);
+ return module_sp;
+ }
+ if (dmp_bytes == llvm::ArrayRef<uint8_t>(facebook_uuid)) {
+ LLDB_LOG(log, "Facebook .text hash match for {0}.", name);
+ return module_sp;
+ }
+ // The UUID wasn't a partial match and didn't match the .text hash
+ // so remove the module from the target, we will need to create a
+ // placeholder object file.
+ GetTarget().GetImages().Remove(module_sp);
+ module_sp.reset();
+ return module_sp;
+}
+
void ProcessMinidump::ReadModuleList() {
std::vector<const minidump::Module *> filtered_modules =
m_minidump_parser->GetFilteredModuleList();
@@ -473,30 +586,21 @@ void ProcessMinidump::ReadModuleList() {
// add the module to the target if it finds one.
lldb::ModuleSP module_sp = GetTarget().GetOrCreateModule(module_spec,
true /* notify */, &error);
- if (!module_sp) {
- // Try and find a module without specifying the UUID and only looking for
- // the file given a basename. We then will look for a partial UUID match
- // if we find any matches. This function will add the module to the
- // target if it finds one, so we need to remove the module from the target
- // if the UUID doesn't match during our manual UUID verification. This
- // allows the "target.exec-search-paths" setting to specify one or more
- // directories that contain executables that can be searched for matches.
- ModuleSpec basename_module_spec(module_spec);
- basename_module_spec.GetUUID().Clear();
- basename_module_spec.GetFileSpec().GetDirectory().Clear();
- module_sp = GetTarget().GetOrCreateModule(basename_module_spec,
- true /* notify */, &error);
- if (module_sp) {
- // We consider the module to be a match if the minidump UUID is a
- // prefix of the actual UUID, or if either of the UUIDs are empty.
- const auto dmp_bytes = uuid.GetBytes();
- const auto mod_bytes = module_sp->GetUUID().GetBytes();
- const bool match = dmp_bytes.empty() || mod_bytes.empty() ||
- mod_bytes.take_front(dmp_bytes.size()) == dmp_bytes;
- if (!match) {
- GetTarget().GetImages().Remove(module_sp);
- module_sp.reset();
- }
+ if (module_sp) {
+ LLDB_LOG(log, "Full uuid match for {0}.", name);
+ } else {
+ // We couldn't find a module with an exactly-matching UUID. Sometimes
+ // a minidump UUID is only a partial match or is a hash. So try again
+ // without specifying the UUID, then again without specifying the
+ // directory if that fails. This will allow us to find modules with
+ // partial matches or hash UUIDs in user-provided sysroots or search
+ // directories (target.exec-search-paths).
+ ModuleSpec partial_module_spec = module_spec;
+ partial_module_spec.GetUUID().Clear();
+ module_sp = GetOrCreateModule(uuid, name, partial_module_spec);
+ if (!module_sp) {
+ partial_module_spec.GetFileSpec().GetDirectory().Clear();
+ module_sp = GetOrCreateModule(uuid, name, partial_module_spec);
}
}
if (module_sp) {
diff --git a/contrib/llvm-project/lldb/source/Plugins/Process/minidump/ProcessMinidump.h b/contrib/llvm-project/lldb/source/Plugins/Process/minidump/ProcessMinidump.h
index 839b0e7563f7..bfdace7ea33e 100644
--- a/contrib/llvm-project/lldb/source/Plugins/Process/minidump/ProcessMinidump.h
+++ b/contrib/llvm-project/lldb/source/Plugins/Process/minidump/ProcessMinidump.h
@@ -102,6 +102,10 @@ protected:
void ReadModuleList();
+ lldb::ModuleSP GetOrCreateModule(lldb_private::UUID minidump_uuid,
+ llvm::StringRef name,
+ lldb_private::ModuleSpec module_spec);
+
JITLoaderList &GetJITLoaders() override;
private:
diff --git a/contrib/llvm-project/lldb/source/Target/Platform.cpp b/contrib/llvm-project/lldb/source/Target/Platform.cpp
index 16787141bee0..e5afb4c7b8d7 100644
--- a/contrib/llvm-project/lldb/source/Target/Platform.cpp
+++ b/contrib/llvm-project/lldb/source/Target/Platform.cpp
@@ -218,15 +218,14 @@ Platform::LocateExecutableScriptingResources(Target *target, Module &module,
// return PlatformSP();
//}
-Status Platform::GetSharedModule(const ModuleSpec &module_spec,
- Process *process, ModuleSP &module_sp,
- const FileSpecList *module_search_paths_ptr,
- ModuleSP *old_module_sp_ptr,
- bool *did_create_ptr) {
+Status Platform::GetSharedModule(
+ const ModuleSpec &module_spec, Process *process, ModuleSP &module_sp,
+ const FileSpecList *module_search_paths_ptr,
+ llvm::SmallVectorImpl<lldb::ModuleSP> *old_modules, bool *did_create_ptr) {
if (IsHost())
- return ModuleList::GetSharedModule(
- module_spec, module_sp, module_search_paths_ptr, old_module_sp_ptr,
- did_create_ptr, false);
+ return ModuleList::GetSharedModule(module_spec, module_sp,
+ module_search_paths_ptr, old_modules,
+ did_create_ptr, false);
// Module resolver lambda.
auto resolver = [&](const ModuleSpec &spec) {
@@ -239,17 +238,17 @@ Status Platform::GetSharedModule(const ModuleSpec &module_spec,
resolved_spec.GetFileSpec().PrependPathComponent(
m_sdk_sysroot.GetStringRef());
// Try to get shared module with resolved spec.
- error = ModuleList::GetSharedModule(
- resolved_spec, module_sp, module_search_paths_ptr, old_module_sp_ptr,
- did_create_ptr, false);
+ error = ModuleList::GetSharedModule(resolved_spec, module_sp,
+ module_search_paths_ptr, old_modules,
+ did_create_ptr, false);
}
// If we don't have sysroot or it didn't work then
// try original module spec.
if (!error.Success()) {
resolved_spec = spec;
- error = ModuleList::GetSharedModule(
- resolved_spec, module_sp, module_search_paths_ptr, old_module_sp_ptr,
- did_create_ptr, false);
+ error = ModuleList::GetSharedModule(resolved_spec, module_sp,
+ module_search_paths_ptr, old_modules,
+ did_create_ptr, false);
}
if (error.Success() && module_sp)
module_sp->SetPlatformFileSpec(resolved_spec.GetFileSpec());
@@ -1564,21 +1563,29 @@ Status Platform::GetRemoteSharedModule(const ModuleSpec &module_spec,
if (error.Success() && module_sp)
break;
}
- if (module_sp)
+ if (module_sp) {
+ resolved_module_spec = arch_module_spec;
got_module_spec = true;
+ }
}
if (!got_module_spec) {
// Get module information from a target.
- if (!GetModuleSpec(module_spec.GetFileSpec(), module_spec.GetArchitecture(),
- resolved_module_spec)) {
+ if (GetModuleSpec(module_spec.GetFileSpec(), module_spec.GetArchitecture(),
+ resolved_module_spec)) {
if (!module_spec.GetUUID().IsValid() ||
module_spec.GetUUID() == resolved_module_spec.GetUUID()) {
- return module_resolver(module_spec);
+ got_module_spec = true;
}
}
}
+ if (!got_module_spec) {
+ // Fall back to the given module resolver, which may have its own
+ // search logic.
+ return module_resolver(module_spec);
+ }
+
// If we are looking for a specific UUID, make sure resolved_module_spec has
// the same one before we search.
if (module_spec.GetUUID().IsValid()) {
diff --git a/contrib/llvm-project/lldb/source/Target/Target.cpp b/contrib/llvm-project/lldb/source/Target/Target.cpp
index 707344f99fcb..19d0c3d477eb 100644
--- a/contrib/llvm-project/lldb/source/Target/Target.cpp
+++ b/contrib/llvm-project/lldb/source/Target/Target.cpp
@@ -1965,8 +1965,9 @@ ModuleSP Target::GetOrCreateModule(const ModuleSpec &module_spec, bool notify,
module_sp = m_images.FindFirstModule(module_spec);
if (!module_sp) {
- ModuleSP old_module_sp; // This will get filled in if we have a new version
- // of the library
+ llvm::SmallVector<ModuleSP, 1>
+ old_modules; // This will get filled in if we have a new version
+ // of the library
bool did_create_module = false;
FileSpecList search_paths = GetExecutableSearchPaths();
// If there are image search path entries, try to use them first to acquire
@@ -1979,7 +1980,7 @@ ModuleSP Target::GetOrCreateModule(const ModuleSpec &module_spec, bool notify,
transformed_spec.GetFileSpec().GetFilename() =
module_spec.GetFileSpec().GetFilename();
error = ModuleList::GetSharedModule(transformed_spec, module_sp,
- &search_paths, &old_module_sp,
+ &search_paths, &old_modules,
&did_create_module);
}
}
@@ -1997,7 +1998,7 @@ ModuleSP Target::GetOrCreateModule(const ModuleSpec &module_spec, bool notify,
// We have a UUID, it is OK to check the global module list...
error =
ModuleList::GetSharedModule(module_spec, module_sp, &search_paths,
- &old_module_sp, &did_create_module);
+ &old_modules, &did_create_module);
}
if (!module_sp) {
@@ -2006,7 +2007,7 @@ ModuleSP Target::GetOrCreateModule(const ModuleSpec &module_spec, bool notify,
if (m_platform_sp) {
error = m_platform_sp->GetSharedModule(
module_spec, m_process_sp.get(), module_sp, &search_paths,
- &old_module_sp, &did_create_module);
+ &old_modules, &did_create_module);
} else {
error.SetErrorString("no platform is currently set");
}
@@ -2057,18 +2058,18 @@ ModuleSP Target::GetOrCreateModule(const ModuleSpec &module_spec, bool notify,
// this target. So let's remove the UUID from the module list, and look
// in the target's module list. Only do this if there is SOMETHING else
// in the module spec...
- if (!old_module_sp) {
- if (module_spec.GetUUID().IsValid() &&
- !module_spec.GetFileSpec().GetFilename().IsEmpty() &&
- !module_spec.GetFileSpec().GetDirectory().IsEmpty()) {
- ModuleSpec module_spec_copy(module_spec.GetFileSpec());
- module_spec_copy.GetUUID().Clear();
-
- ModuleList found_modules;
- m_images.FindModules(module_spec_copy, found_modules);
- if (found_modules.GetSize() == 1)
- old_module_sp = found_modules.GetModuleAtIndex(0);
- }
+ if (module_spec.GetUUID().IsValid() &&
+ !module_spec.GetFileSpec().GetFilename().IsEmpty() &&
+ !module_spec.GetFileSpec().GetDirectory().IsEmpty()) {
+ ModuleSpec module_spec_copy(module_spec.GetFileSpec());
+ module_spec_copy.GetUUID().Clear();
+
+ ModuleList found_modules;
+ m_images.FindModules(module_spec_copy, found_modules);
+ found_modules.ForEach([&](const ModuleSP &found_module) -> bool {
+ old_modules.push_back(found_module);
+ return true;
+ });
}
// Preload symbols outside of any lock, so hopefully we can do this for
@@ -2076,14 +2077,67 @@ ModuleSP Target::GetOrCreateModule(const ModuleSpec &module_spec, bool notify,
if (GetPreloadSymbols())
module_sp->PreloadSymbols();
- if (old_module_sp && m_images.GetIndexForModule(old_module_sp.get()) !=
- LLDB_INVALID_INDEX32) {
- m_images.ReplaceModule(old_module_sp, module_sp);
+ llvm::SmallVector<ModuleSP, 1> replaced_modules;
+ for (ModuleSP &old_module_sp : old_modules) {
+ if (m_images.GetIndexForModule(old_module_sp.get()) !=
+ LLDB_INVALID_INDEX32) {
+ if (replaced_modules.empty())
+ m_images.ReplaceModule(old_module_sp, module_sp);
+ else
+ m_images.Remove(old_module_sp);
+
+ replaced_modules.push_back(std::move(old_module_sp));
+ }
+ }
+
+ if (replaced_modules.size() > 1) {
+ // The same new module replaced multiple old modules
+ // simultaneously. It's not clear this should ever
+ // happen (if we always replace old modules as we add
+ // new ones, presumably we should never have more than
+ // one old one). If there are legitimate cases where
+ // this happens, then the ModuleList::Notifier interface
+ // may need to be adjusted to allow reporting this.
+ // In the meantime, just log that this has happened; just
+ // above we called ReplaceModule on the first one, and Remove
+ // on the rest.
+ if (Log *log = GetLogIfAnyCategoriesSet(LIBLLDB_LOG_TARGET |
+ LIBLLDB_LOG_MODULES)) {
+ StreamString message;
+ auto dump = [&message](Module &dump_module) -> void {
+ UUID dump_uuid = dump_module.GetUUID();
+
+ message << '[';
+ dump_module.GetDescription(message.AsRawOstream());
+ message << " (uuid ";
+
+ if (dump_uuid.IsValid())
+ dump_uuid.Dump(&message);
+ else
+ message << "not specified";
+
+ message << ")]";
+ };
+
+ message << "New module ";
+ dump(*module_sp);
+ message.AsRawOstream()
+ << llvm::formatv(" simultaneously replaced {0} old modules: ",
+ replaced_modules.size());
+ for (ModuleSP &replaced_module_sp : replaced_modules)
+ dump(*replaced_module_sp);
+
+ log->PutString(message.GetString());
+ }
+ }
+
+ if (replaced_modules.empty())
+ m_images.Append(module_sp, notify);
+
+ for (ModuleSP &old_module_sp : replaced_modules) {
Module *old_module_ptr = old_module_sp.get();
old_module_sp.reset();
ModuleList::RemoveSharedModuleIfOrphaned(old_module_ptr);
- } else {
- m_images.Append(module_sp, notify);
}
} else
module_sp.reset();
diff --git a/contrib/llvm-project/llvm/include/llvm/ADT/ImmutableMap.h b/contrib/llvm-project/llvm/include/llvm/ADT/ImmutableMap.h
index 30689d2274a8..81b21a7319a7 100644
--- a/contrib/llvm-project/llvm/include/llvm/ADT/ImmutableMap.h
+++ b/contrib/llvm-project/llvm/include/llvm/ADT/ImmutableMap.h
@@ -355,7 +355,7 @@ public:
unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
static inline void Profile(FoldingSetNodeID &ID, const ImmutableMapRef &M) {
- ID.AddPointer(M.Root);
+ ID.AddPointer(M.Root.get());
}
inline void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
diff --git a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h
index bdcf10fd1640..21a5c26883cd 100644
--- a/contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h
+++ b/contrib/llvm-project/llvm/include/llvm/BinaryFormat/ELF.h
@@ -405,6 +405,12 @@ enum {
#include "ELFRelocs/AArch64.def"
};
+// Special values for the st_other field in the symbol table entry for AArch64.
+enum {
+ // Symbol may follow different calling convention than base PCS.
+ STO_AARCH64_VARIANT_PCS = 0x80
+};
+
// ARM Specific e_flags
enum : unsigned {
EF_ARM_SOFT_FLOAT = 0x00000200U, // Legacy pre EABI_VER5
diff --git a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/DeadArgumentElimination.h b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/DeadArgumentElimination.h
index 73797bc10017..496ceea12bc9 100644
--- a/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/DeadArgumentElimination.h
+++ b/contrib/llvm-project/llvm/include/llvm/Transforms/IPO/DeadArgumentElimination.h
@@ -128,6 +128,7 @@ private:
Liveness SurveyUses(const Value *V, UseVector &MaybeLiveUses);
void SurveyFunction(const Function &F);
+ bool IsLive(const RetOrArg &RA);
void MarkValue(const RetOrArg &RA, Liveness L,
const UseVector &MaybeLiveUses);
void MarkLive(const RetOrArg &RA);
diff --git a/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFDebugArangeSet.cpp b/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFDebugArangeSet.cpp
index 608fc0388af0..c3b039b05f30 100644
--- a/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFDebugArangeSet.cpp
+++ b/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFDebugArangeSet.cpp
@@ -132,19 +132,20 @@ Error DWARFDebugArangeSet::extract(DWARFDataExtractor data,
uint64_t end_offset = Offset + full_length;
while (*offset_ptr < end_offset) {
+ uint64_t EntryOffset = *offset_ptr;
arangeDescriptor.Address = data.getUnsigned(offset_ptr, HeaderData.AddrSize);
arangeDescriptor.Length = data.getUnsigned(offset_ptr, HeaderData.AddrSize);
- if (arangeDescriptor.Length == 0) {
- // Each set of tuples is terminated by a 0 for the address and 0
- // for the length.
- if (arangeDescriptor.Address == 0 && *offset_ptr == end_offset)
+ // Each set of tuples is terminated by a 0 for the address and 0
+ // for the length.
+ if (arangeDescriptor.Length == 0 && arangeDescriptor.Address == 0) {
+ if (*offset_ptr == end_offset)
return ErrorSuccess();
return createStringError(
errc::invalid_argument,
"address range table at offset 0x%" PRIx64
- " has an invalid tuple (length = 0) at offset 0x%" PRIx64,
- Offset, *offset_ptr - tuple_size);
+ " has a premature terminator entry at offset 0x%" PRIx64,
+ Offset, EntryOffset);
}
ArangeDescriptors.push_back(arangeDescriptor);
diff --git a/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFUnitIndex.cpp b/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFUnitIndex.cpp
index 3d4cecce27db..d27fd08db14e 100644
--- a/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFUnitIndex.cpp
+++ b/contrib/llvm-project/llvm/lib/DebugInfo/DWARF/DWARFUnitIndex.cpp
@@ -286,10 +286,14 @@ const DWARFUnitIndex::Entry *DWARFUnitIndex::getFromHash(uint64_t S) const {
auto H = S & Mask;
auto HP = ((S >> 32) & Mask) | 1;
- while (Rows[H].getSignature() != S && Rows[H].getSignature() != 0)
+ // The spec says "while 0 is a valid hash value, the row index in a used slot
+ // will always be non-zero". Loop until we find a match or an empty slot.
+ while (Rows[H].getSignature() != S && Rows[H].Index != nullptr)
H = (H + HP) & Mask;
- if (Rows[H].getSignature() != S)
+ // If the slot is empty, we don't care whether the signature matches (it could
+ // be zero and still match the zeros in the empty slot).
+ if (Rows[H].Index == nullptr)
return nullptr;
return &Rows[H];
diff --git a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
index 7e9b0690ccea..04f541b59557 100644
--- a/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
+++ b/contrib/llvm-project/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
@@ -308,7 +308,9 @@ RuntimeDyldImpl::loadObjectImpl(const object::ObjectFile &Obj) {
<< " SID: " << SectionID
<< " Offset: " << format("%p", (uintptr_t)Addr)
<< " flags: " << *FlagsOrErr << "\n");
- GlobalSymbolTable[Name] = SymbolTableEntry(SectionID, Addr, *JITSymFlags);
+ if (!Name.empty()) // Skip absolute symbol relocations.
+ GlobalSymbolTable[Name] =
+ SymbolTableEntry(SectionID, Addr, *JITSymFlags);
} else if (SymType == object::SymbolRef::ST_Function ||
SymType == object::SymbolRef::ST_Data ||
SymType == object::SymbolRef::ST_Unknown ||
@@ -340,8 +342,9 @@ RuntimeDyldImpl::loadObjectImpl(const object::ObjectFile &Obj) {
<< " SID: " << SectionID
<< " Offset: " << format("%p", (uintptr_t)SectOffset)
<< " flags: " << *FlagsOrErr << "\n");
- GlobalSymbolTable[Name] =
- SymbolTableEntry(SectionID, SectOffset, *JITSymFlags);
+ if (!Name.empty()) // Skip absolute symbol relocations
+ GlobalSymbolTable[Name] =
+ SymbolTableEntry(SectionID, SectOffset, *JITSymFlags);
}
}
@@ -769,8 +772,9 @@ Error RuntimeDyldImpl::emitCommonSymbols(const ObjectFile &Obj,
LLVM_DEBUG(dbgs() << "Allocating common symbol " << Name << " address "
<< format("%p", Addr) << "\n");
- GlobalSymbolTable[Name] =
- SymbolTableEntry(SectionID, Offset, std::move(*JITSymFlags));
+ if (!Name.empty()) // Skip absolute symbol relocations.
+ GlobalSymbolTable[Name] =
+ SymbolTableEntry(SectionID, Offset, std::move(*JITSymFlags));
Offset += Size;
Addr += Size;
}
@@ -930,6 +934,8 @@ void RuntimeDyldImpl::addRelocationForSymbol(const RelocationEntry &RE,
if (Loc == GlobalSymbolTable.end()) {
ExternalSymbolRelocations[SymbolName].push_back(RE);
} else {
+ assert(!SymbolName.empty() &&
+ "Empty symbol should not be in GlobalSymbolTable");
// Copy the RE since we want to modify its addend.
RelocationEntry RECopy = RE;
const auto &SymInfo = Loc->second;
@@ -1234,7 +1240,8 @@ void RuntimeDyldImpl::finalizeAsync(
for (auto &RelocKV : SharedThis->ExternalSymbolRelocations) {
StringRef Name = RelocKV.first();
- assert(!Name.empty() && "Symbol has no name?");
+ if (Name.empty()) // Skip absolute symbol relocations.
+ continue;
assert(!SharedThis->GlobalSymbolTable.count(Name) &&
"Name already processed. RuntimeDyld instances can not be re-used "
"when finalizing with finalizeAsync.");
diff --git a/contrib/llvm-project/llvm/lib/IR/ConstantFold.cpp b/contrib/llvm-project/llvm/lib/IR/ConstantFold.cpp
index f3c3e9ad9f69..c20d0955f3d8 100644
--- a/contrib/llvm-project/llvm/lib/IR/ConstantFold.cpp
+++ b/contrib/llvm-project/llvm/lib/IR/ConstantFold.cpp
@@ -1589,7 +1589,7 @@ static FCmpInst::Predicate evaluateFCmpRelation(Constant *V1, Constant *V2) {
static ICmpInst::Predicate areGlobalsPotentiallyEqual(const GlobalValue *GV1,
const GlobalValue *GV2) {
auto isGlobalUnsafeForEquality = [](const GlobalValue *GV) {
- if (GV->hasExternalWeakLinkage() || GV->hasWeakAnyLinkage())
+ if (GV->isInterposable() || GV->hasGlobalUnnamedAddr())
return true;
if (const auto *GVar = dyn_cast<GlobalVariable>(GV)) {
Type *Ty = GVar->getValueType();
diff --git a/contrib/llvm-project/llvm/lib/MC/MCObjectFileInfo.cpp b/contrib/llvm-project/llvm/lib/MC/MCObjectFileInfo.cpp
index b77a9635f64c..b9b4416fde21 100644
--- a/contrib/llvm-project/llvm/lib/MC/MCObjectFileInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/MC/MCObjectFileInfo.cpp
@@ -317,6 +317,8 @@ void MCObjectFileInfo::initELFMCObjectFileInfo(const Triple &T, bool Large) {
break;
case Triple::ppc64:
case Triple::ppc64le:
+ case Triple::aarch64:
+ case Triple::aarch64_be:
case Triple::x86_64:
FDECFIEncoding = dwarf::DW_EH_PE_pcrel |
(Large ? dwarf::DW_EH_PE_sdata8 : dwarf::DW_EH_PE_sdata4);
diff --git a/contrib/llvm-project/llvm/lib/MC/MCParser/ELFAsmParser.cpp b/contrib/llvm-project/llvm/lib/MC/MCParser/ELFAsmParser.cpp
index e5ab13bc719d..fb8215ef2281 100644
--- a/contrib/llvm-project/llvm/lib/MC/MCParser/ELFAsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/MC/MCParser/ELFAsmParser.cpp
@@ -644,10 +644,13 @@ EndStmt:
!(SectionName == ".eh_frame" && Type == ELF::SHT_PROGBITS))
Error(loc, "changed section type for " + SectionName + ", expected: 0x" +
utohexstr(Section->getType()));
- if (Section->getFlags() != Flags)
+ // Check that flags are used consistently. However, the GNU assembler permits
+ // to leave out in subsequent uses of the same sections; for compatibility,
+ // do likewise.
+ if ((Flags || Size || !TypeName.empty()) && Section->getFlags() != Flags)
Error(loc, "changed section flags for " + SectionName + ", expected: 0x" +
utohexstr(Section->getFlags()));
- if (Section->getEntrySize() != Size)
+ if ((Flags || Size || !TypeName.empty()) && Section->getEntrySize() != Size)
Error(loc, "changed section entsize for " + SectionName +
", expected: " + Twine(Section->getEntrySize()));
diff --git a/contrib/llvm-project/llvm/lib/MCA/HardwareUnits/LSUnit.cpp b/contrib/llvm-project/llvm/lib/MCA/HardwareUnits/LSUnit.cpp
index e945e8cecce9..4594368fc0e9 100644
--- a/contrib/llvm-project/llvm/lib/MCA/HardwareUnits/LSUnit.cpp
+++ b/contrib/llvm-project/llvm/lib/MCA/HardwareUnits/LSUnit.cpp
@@ -243,6 +243,8 @@ void LSUnit::onInstructionExecuted(const InstRef &IR) {
CurrentStoreGroupID = 0;
if (GroupID == CurrentLoadBarrierGroupID)
CurrentLoadBarrierGroupID = 0;
+ if (GroupID == CurrentStoreBarrierGroupID)
+ CurrentStoreBarrierGroupID = 0;
}
}
diff --git a/contrib/llvm-project/llvm/lib/Support/Host.cpp b/contrib/llvm-project/llvm/lib/Support/Host.cpp
index 658c1ee74cfe..36cecf9b2a16 100644
--- a/contrib/llvm-project/llvm/lib/Support/Host.cpp
+++ b/contrib/llvm-project/llvm/lib/Support/Host.cpp
@@ -760,14 +760,15 @@ getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
*Type = X86::INTEL_GOLDMONT_PLUS;
break;
case 0x86:
+ CPU = "tremont";
*Type = X86::INTEL_TREMONT;
break;
+ // Xeon Phi (Knights Landing + Knights Mill):
case 0x57:
- CPU = "tremont";
+ CPU = "knl";
*Type = X86::INTEL_KNL;
break;
-
case 0x85:
CPU = "knm";
*Type = X86::INTEL_KNM;
diff --git a/contrib/llvm-project/llvm/lib/Support/Windows/Path.inc b/contrib/llvm-project/llvm/lib/Support/Windows/Path.inc
index e352beb77616..a4ffc0ec4313 100644
--- a/contrib/llvm-project/llvm/lib/Support/Windows/Path.inc
+++ b/contrib/llvm-project/llvm/lib/Support/Windows/Path.inc
@@ -19,7 +19,6 @@
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/WindowsError.h"
#include <fcntl.h>
-#include <io.h>
#include <sys/stat.h>
#include <sys/types.h>
@@ -352,13 +351,13 @@ std::error_code is_local(const Twine &path, bool &result) {
static std::error_code realPathFromHandle(HANDLE H,
SmallVectorImpl<wchar_t> &Buffer) {
DWORD CountChars = ::GetFinalPathNameByHandleW(
- H, Buffer.begin(), Buffer.capacity() - 1, FILE_NAME_NORMALIZED);
- if (CountChars > Buffer.capacity()) {
+ H, Buffer.begin(), Buffer.capacity(), FILE_NAME_NORMALIZED);
+ if (CountChars && CountChars >= Buffer.capacity()) {
// The buffer wasn't big enough, try again. In this case the return value
// *does* indicate the size of the null terminator.
Buffer.reserve(CountChars);
CountChars = ::GetFinalPathNameByHandleW(
- H, Buffer.data(), Buffer.capacity() - 1, FILE_NAME_NORMALIZED);
+ H, Buffer.begin(), Buffer.capacity(), FILE_NAME_NORMALIZED);
}
if (CountChars == 0)
return mapWindowsError(GetLastError());
@@ -403,6 +402,20 @@ std::error_code is_local(int FD, bool &Result) {
}
static std::error_code setDeleteDisposition(HANDLE Handle, bool Delete) {
+ // First, check if the file is on a network (non-local) drive. If so, don't
+ // set DeleteFile to true, since it prevents opening the file for writes.
+ SmallVector<wchar_t, 128> FinalPath;
+ if (std::error_code EC = realPathFromHandle(Handle, FinalPath))
+ return EC;
+
+ bool IsLocal;
+ if (std::error_code EC = is_local_internal(FinalPath, IsLocal))
+ return EC;
+
+ if (!IsLocal)
+ return std::error_code();
+
+ // The file is on a local drive, set the DeleteFile to true.
FILE_DISPOSITION_INFO Disposition;
Disposition.DeleteFile = Delete;
if (!SetFileInformationByHandle(Handle, FileDispositionInfo, &Disposition,
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
index 3a94820dac8d..7ec7ffe309f7 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
@@ -89,6 +89,8 @@ public:
void emitJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB, unsigned JTI);
+ void emitFunctionEntryLabel() override;
+
void LowerJumpTableDestSmall(MCStreamer &OutStreamer, const MachineInstr &MI);
void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
@@ -822,6 +824,19 @@ void AArch64AsmPrinter::emitJumpTableEntry(const MachineJumpTableInfo *MJTI,
OutStreamer->emitValue(Value, Size);
}
+void AArch64AsmPrinter::emitFunctionEntryLabel() {
+ if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall ||
+ MF->getFunction().getCallingConv() ==
+ CallingConv::AArch64_SVE_VectorCall ||
+ STI->getRegisterInfo()->hasSVEArgsOrReturn(MF)) {
+ auto *TS =
+ static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
+ TS->emitDirectiveVariantPCS(CurrentFnSym);
+ }
+
+ return AsmPrinter::emitFunctionEntryLabel();
+}
+
/// Small jump tables contain an unsigned byte or half, representing the offset
/// from the lowest-addressed possible destination to the desired basic
/// block. Since all instructions are 4-byte aligned, this is further compressed
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
index 84ec5afcc9c1..9ae2b465e247 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
@@ -35,6 +35,9 @@ static const MCPhysReg DRegList[] = {AArch64::D0, AArch64::D1, AArch64::D2,
static const MCPhysReg QRegList[] = {AArch64::Q0, AArch64::Q1, AArch64::Q2,
AArch64::Q3, AArch64::Q4, AArch64::Q5,
AArch64::Q6, AArch64::Q7};
+static const MCPhysReg ZRegList[] = {AArch64::Z0, AArch64::Z1, AArch64::Z2,
+ AArch64::Z3, AArch64::Z4, AArch64::Z5,
+ AArch64::Z6, AArch64::Z7};
static bool finishStackBlock(SmallVectorImpl<CCValAssign> &PendingMembers,
MVT LocVT, ISD::ArgFlagsTy &ArgFlags,
@@ -97,6 +100,8 @@ static bool CC_AArch64_Custom_Block(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
RegList = DRegList;
else if (LocVT.SimpleTy == MVT::f128 || LocVT.is128BitVector())
RegList = QRegList;
+ else if (LocVT.isScalableVector())
+ RegList = ZRegList;
else {
// Not an array we want to split up after all.
return false;
@@ -141,6 +146,10 @@ static bool CC_AArch64_Custom_Block(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
return true;
}
+ if (LocVT.isScalableVector())
+ report_fatal_error(
+ "Passing consecutive scalable vector registers unsupported");
+
// Mark all regs in the class as unavailable
for (auto Reg : RegList)
State.AllocateReg(Reg);
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 45bfa85bdc07..48ca9039b1bd 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -4366,6 +4366,10 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
for (unsigned i = 0; i != NumArgs; ++i) {
MVT ArgVT = Outs[i].VT;
+ if (!Outs[i].IsFixed && ArgVT.isScalableVector())
+ report_fatal_error("Passing SVE types to variadic functions is "
+ "currently not supported");
+
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
CCAssignFn *AssignFn = CCAssignFnForCall(CallConv,
/*IsVarArg=*/ !Outs[i].IsFixed);
@@ -6168,6 +6172,10 @@ SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
Chain = VAList.getValue(1);
VAList = DAG.getZExtOrTrunc(VAList, DL, PtrVT);
+ if (VT.isScalableVector())
+ report_fatal_error("Passing SVE types to variadic functions is "
+ "currently not supported");
+
if (Align && *Align > MinSlotSize) {
VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
DAG.getConstant(Align->value() - 1, DL, PtrVT));
@@ -14702,7 +14710,14 @@ Value *AArch64TargetLowering::emitStoreConditional(IRBuilder<> &Builder,
bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters(
Type *Ty, CallingConv::ID CallConv, bool isVarArg) const {
- return Ty->isArrayTy();
+ if (Ty->isArrayTy())
+ return true;
+
+ const TypeSize &TySize = Ty->getPrimitiveSizeInBits();
+ if (TySize.isScalable() && TySize.getKnownMinSize() > 128)
+ return true;
+
+ return false;
}
bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &,
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index 0ac09c4f96f0..e72ae0e62cb7 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -179,6 +179,8 @@ private:
bool parseDirectiveCFINegateRAState();
bool parseDirectiveCFIBKeyFrame();
+ bool parseDirectiveVariantPCS(SMLoc L);
+
bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
SmallVectorImpl<SMLoc> &Loc);
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
@@ -5077,6 +5079,8 @@ bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
parseDirectiveCFIBKeyFrame();
else if (IDVal == ".arch_extension")
parseDirectiveArchExtension(Loc);
+ else if (IDVal == ".variant_pcs")
+ parseDirectiveVariantPCS(Loc);
else if (IsMachO) {
if (IDVal == MCLOHDirectiveName())
parseDirectiveLOH(IDVal, Loc);
@@ -5507,6 +5511,32 @@ bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
return false;
}
+/// parseDirectiveVariantPCS
+/// ::= .variant_pcs symbolname
+bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
+ MCAsmParser &Parser = getParser();
+
+ const AsmToken &Tok = Parser.getTok();
+ if (Tok.isNot(AsmToken::Identifier))
+ return TokError("expected symbol name");
+
+ StringRef SymbolName = Tok.getIdentifier();
+
+ MCSymbol *Sym = getContext().lookupSymbol(SymbolName);
+ if (!Sym)
+ return TokError("unknown symbol in '.variant_pcs' directive");
+
+ Parser.Lex(); // Eat the symbol
+
+ // Shouldn't be any more tokens
+ if (parseToken(AsmToken::EndOfStatement))
+ return addErrorSuffix(" in '.variant_pcs' directive");
+
+ getTargetStreamer().emitDirectiveVariantPCS(Sym);
+
+ return false;
+}
+
bool
AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
AArch64MCExpr::VariantKind &ELFRefKind,
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 408f0cb77e73..7733fe7f7b24 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -289,14 +289,15 @@ private:
getExtendTypeForInst(MachineInstr &MI, MachineRegisterInfo &MRI,
bool IsLoadStore = false) const;
- /// Instructions that accept extend modifiers like UXTW expect the register
- /// being extended to be a GPR32. Narrow ExtReg to a 32-bit register using a
- /// subregister copy if necessary. Return either ExtReg, or the result of the
- /// new copy.
- Register narrowExtendRegIfNeeded(Register ExtReg,
- MachineIRBuilder &MIB) const;
- Register widenGPRBankRegIfNeeded(Register Reg, unsigned Size,
- MachineIRBuilder &MIB) const;
+ /// Move \p Reg to \p RC if \p Reg is not already on \p RC.
+ ///
+ /// \returns Either \p Reg if no change was necessary, or the new register
+ /// created by moving \p Reg.
+ ///
+ /// Note: This uses emitCopy right now.
+ Register moveScalarRegClass(Register Reg, const TargetRegisterClass &RC,
+ MachineIRBuilder &MIB) const;
+
ComplexRendererFns selectArithExtendedRegister(MachineOperand &Root) const;
void renderTruncImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
@@ -1195,10 +1196,10 @@ MachineInstr *AArch64InstructionSelector::emitTestBit(
// TBNZW work.
bool UseWReg = Bit < 32;
unsigned NecessarySize = UseWReg ? 32 : 64;
- if (Size < NecessarySize)
- TestReg = widenGPRBankRegIfNeeded(TestReg, NecessarySize, MIB);
- else if (Size > NecessarySize)
- TestReg = narrowExtendRegIfNeeded(TestReg, MIB);
+ if (Size != NecessarySize)
+ TestReg = moveScalarRegClass(
+ TestReg, UseWReg ? AArch64::GPR32RegClass : AArch64::GPR64RegClass,
+ MIB);
static const unsigned OpcTable[2][2] = {{AArch64::TBZX, AArch64::TBNZX},
{AArch64::TBZW, AArch64::TBNZW}};
@@ -4904,9 +4905,19 @@ AArch64InstructionSelector::selectExtendedSHL(
return None;
unsigned OffsetOpc = OffsetInst->getOpcode();
- if (OffsetOpc != TargetOpcode::G_SHL && OffsetOpc != TargetOpcode::G_MUL)
- return None;
+ bool LookedThroughZExt = false;
+ if (OffsetOpc != TargetOpcode::G_SHL && OffsetOpc != TargetOpcode::G_MUL) {
+ // Try to look through a ZEXT.
+ if (OffsetOpc != TargetOpcode::G_ZEXT || !WantsExt)
+ return None;
+
+ OffsetInst = MRI.getVRegDef(OffsetInst->getOperand(1).getReg());
+ OffsetOpc = OffsetInst->getOpcode();
+ LookedThroughZExt = true;
+ if (OffsetOpc != TargetOpcode::G_SHL && OffsetOpc != TargetOpcode::G_MUL)
+ return None;
+ }
// Make sure that the memory op is a valid size.
int64_t LegalShiftVal = Log2_32(SizeInBytes);
if (LegalShiftVal == 0)
@@ -4957,21 +4968,24 @@ AArch64InstructionSelector::selectExtendedSHL(
unsigned SignExtend = 0;
if (WantsExt) {
- // Check if the offset is defined by an extend.
- MachineInstr *ExtInst = getDefIgnoringCopies(OffsetReg, MRI);
- auto Ext = getExtendTypeForInst(*ExtInst, MRI, true);
- if (Ext == AArch64_AM::InvalidShiftExtend)
- return None;
+ // Check if the offset is defined by an extend, unless we looked through a
+ // G_ZEXT earlier.
+ if (!LookedThroughZExt) {
+ MachineInstr *ExtInst = getDefIgnoringCopies(OffsetReg, MRI);
+ auto Ext = getExtendTypeForInst(*ExtInst, MRI, true);
+ if (Ext == AArch64_AM::InvalidShiftExtend)
+ return None;
- SignExtend = isSignExtendShiftType(Ext) ? 1 : 0;
- // We only support SXTW for signed extension here.
- if (SignExtend && Ext != AArch64_AM::SXTW)
- return None;
+ SignExtend = isSignExtendShiftType(Ext) ? 1 : 0;
+ // We only support SXTW for signed extension here.
+ if (SignExtend && Ext != AArch64_AM::SXTW)
+ return None;
+ OffsetReg = ExtInst->getOperand(1).getReg();
+ }
// Need a 32-bit wide register here.
MachineIRBuilder MIB(*MRI.getVRegDef(Root.getReg()));
- OffsetReg = ExtInst->getOperand(1).getReg();
- OffsetReg = narrowExtendRegIfNeeded(OffsetReg, MIB);
+ OffsetReg = moveScalarRegClass(OffsetReg, AArch64::GPR32RegClass, MIB);
}
// We can use the LHS of the GEP as the base, and the LHS of the shift as an
@@ -5143,8 +5157,8 @@ AArch64InstructionSelector::selectAddrModeWRO(MachineOperand &Root,
// Need a 32-bit wide register.
MachineIRBuilder MIB(*PtrAdd);
- Register ExtReg =
- narrowExtendRegIfNeeded(OffsetInst->getOperand(1).getReg(), MIB);
+ Register ExtReg = moveScalarRegClass(OffsetInst->getOperand(1).getReg(),
+ AArch64::GPR32RegClass, MIB);
unsigned SignExtend = Ext == AArch64_AM::SXTW;
// Base is LHS, offset is ExtReg.
@@ -5418,67 +5432,21 @@ AArch64_AM::ShiftExtendType AArch64InstructionSelector::getExtendTypeForInst(
}
}
-Register AArch64InstructionSelector::narrowExtendRegIfNeeded(
- Register ExtReg, MachineIRBuilder &MIB) const {
+Register AArch64InstructionSelector::moveScalarRegClass(
+ Register Reg, const TargetRegisterClass &RC, MachineIRBuilder &MIB) const {
MachineRegisterInfo &MRI = *MIB.getMRI();
- if (MRI.getType(ExtReg).getSizeInBits() == 32)
- return ExtReg;
-
- // Insert a copy to move ExtReg to GPR32.
- Register NarrowReg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
- auto Copy = MIB.buildCopy({NarrowReg}, {ExtReg});
+ auto Ty = MRI.getType(Reg);
+ assert(!Ty.isVector() && "Expected scalars only!");
+ if (Ty.getSizeInBits() == TRI.getRegSizeInBits(RC))
+ return Reg;
- // Select the copy into a subregister copy.
+ // Create a copy and immediately select it.
+ // FIXME: We should have an emitCopy function?
+ auto Copy = MIB.buildCopy({&RC}, {Reg});
selectCopy(*Copy, TII, MRI, TRI, RBI);
return Copy.getReg(0);
}
-Register AArch64InstructionSelector::widenGPRBankRegIfNeeded(
- Register Reg, unsigned WideSize, MachineIRBuilder &MIB) const {
- assert(WideSize >= 8 && "WideSize is smaller than all possible registers?");
- MachineRegisterInfo &MRI = *MIB.getMRI();
- unsigned NarrowSize = MRI.getType(Reg).getSizeInBits();
- assert(WideSize >= NarrowSize &&
- "WideSize cannot be smaller than NarrowSize!");
-
- // If the sizes match, just return the register.
- //
- // If NarrowSize is an s1, then we can select it to any size, so we'll treat
- // it as a don't care.
- if (NarrowSize == WideSize || NarrowSize == 1)
- return Reg;
-
- // Now check the register classes.
- const RegisterBank *RB = RBI.getRegBank(Reg, MRI, TRI);
- const TargetRegisterClass *OrigRC = getMinClassForRegBank(*RB, NarrowSize);
- const TargetRegisterClass *WideRC = getMinClassForRegBank(*RB, WideSize);
- assert(OrigRC && "Could not determine narrow RC?");
- assert(WideRC && "Could not determine wide RC?");
-
- // If the sizes differ, but the register classes are the same, there is no
- // need to insert a SUBREG_TO_REG.
- //
- // For example, an s8 that's supposed to be a GPR will be selected to either
- // a GPR32 or a GPR64 register. Note that this assumes that the s8 will
- // always end up on a GPR32.
- if (OrigRC == WideRC)
- return Reg;
-
- // We have two different register classes. Insert a SUBREG_TO_REG.
- unsigned SubReg = 0;
- getSubRegForClass(OrigRC, TRI, SubReg);
- assert(SubReg && "Couldn't determine subregister?");
-
- // Build the SUBREG_TO_REG and return the new, widened register.
- auto SubRegToReg =
- MIB.buildInstr(AArch64::SUBREG_TO_REG, {WideRC}, {})
- .addImm(0)
- .addUse(Reg)
- .addImm(SubReg);
- constrainSelectedInstRegOperands(*SubRegToReg, TII, TRI, RBI);
- return SubRegToReg.getReg(0);
-}
-
/// Select an "extended register" operand. This operand folds in an extend
/// followed by an optional left shift.
InstructionSelector::ComplexRendererFns
@@ -5539,7 +5507,7 @@ AArch64InstructionSelector::selectArithExtendedRegister(
// We require a GPR32 here. Narrow the ExtReg if needed using a subregister
// copy.
MachineIRBuilder MIB(*RootDef);
- ExtReg = narrowExtendRegIfNeeded(ExtReg, MIB);
+ ExtReg = moveScalarRegClass(ExtReg, AArch64::GPR32RegClass, MIB);
return {{[=](MachineInstrBuilder &MIB) { MIB.addUse(ExtReg); },
[=](MachineInstrBuilder &MIB) {
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 2eaec0b970fa..4ffde2a7e3c4 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -97,15 +97,25 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
.moreElementsToNextPow2(0);
getActionDefinitionsBuilder(G_SHL)
- .legalFor({{s32, s32}, {s64, s64},
- {v2s32, v2s32}, {v4s32, v4s32}, {v2s64, v2s64}})
- .clampScalar(1, s32, s64)
- .clampScalar(0, s32, s64)
- .widenScalarToNextPow2(0)
- .clampNumElements(0, v2s32, v4s32)
- .clampNumElements(0, v2s64, v2s64)
- .moreElementsToNextPow2(0)
- .minScalarSameAs(1, 0);
+ .customIf([=](const LegalityQuery &Query) {
+ const auto &SrcTy = Query.Types[0];
+ const auto &AmtTy = Query.Types[1];
+ return !SrcTy.isVector() && SrcTy.getSizeInBits() == 32 &&
+ AmtTy.getSizeInBits() == 32;
+ })
+ .legalFor({{s32, s32},
+ {s64, s64},
+ {s32, s64},
+ {v2s32, v2s32},
+ {v4s32, v4s32},
+ {v2s64, v2s64}})
+ .clampScalar(1, s32, s64)
+ .clampScalar(0, s32, s64)
+ .widenScalarToNextPow2(0)
+ .clampNumElements(0, v2s32, v4s32)
+ .clampNumElements(0, v2s64, v2s64)
+ .moreElementsToNextPow2(0)
+ .minScalarSameAs(1, 0);
getActionDefinitionsBuilder(G_PTR_ADD)
.legalFor({{p0, s64}, {v2p0, v2s64}})
@@ -710,16 +720,14 @@ bool AArch64LegalizerInfo::legalizeShlAshrLshr(
// If the shift amount is a G_CONSTANT, promote it to a 64 bit type so the
// imported patterns can select it later. Either way, it will be legal.
Register AmtReg = MI.getOperand(2).getReg();
- auto *CstMI = MRI.getVRegDef(AmtReg);
- assert(CstMI && "expected to find a vreg def");
- if (CstMI->getOpcode() != TargetOpcode::G_CONSTANT)
+ auto VRegAndVal = getConstantVRegValWithLookThrough(AmtReg, MRI);
+ if (!VRegAndVal)
return true;
// Check the shift amount is in range for an immediate form.
- unsigned Amount = CstMI->getOperand(1).getCImm()->getZExtValue();
+ int64_t Amount = VRegAndVal->Value;
if (Amount > 31)
return true; // This will have to remain a register variant.
- assert(MRI.getType(AmtReg).getSizeInBits() == 32);
- auto ExtCst = MIRBuilder.buildZExt(LLT::scalar(64), AmtReg);
+ auto ExtCst = MIRBuilder.buildConstant(LLT::scalar(64), Amount);
MI.getOperand(2).setReg(ExtCst.getReg(0));
return true;
}
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index 7e3ff1948dad..93213f5977e5 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -261,6 +261,7 @@ AArch64RegisterBankInfo::getRegBankFromRegClass(const TargetRegisterClass &RC,
case AArch64::GPR64common_and_GPR64noipRegClassID:
case AArch64::GPR64noip_and_tcGPR64RegClassID:
case AArch64::tcGPR64RegClassID:
+ case AArch64::rtcGPR64RegClassID:
case AArch64::WSeqPairsClassRegClassID:
case AArch64::XSeqPairsClassRegClassID:
return getRegBank(AArch64::GPRRegBankID);
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
index fe4c34be1519..6dfda8217628 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
@@ -47,6 +47,10 @@ class AArch64TargetAsmStreamer : public AArch64TargetStreamer {
void emitInst(uint32_t Inst) override;
+ void emitDirectiveVariantPCS(MCSymbol *Symbol) override {
+ OS << "\t.variant_pcs " << Symbol->getName() << "\n";
+ }
+
public:
AArch64TargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS);
};
@@ -194,6 +198,10 @@ void AArch64TargetELFStreamer::emitInst(uint32_t Inst) {
getStreamer().emitInst(Inst);
}
+void AArch64TargetELFStreamer::emitDirectiveVariantPCS(MCSymbol *Symbol) {
+ cast<MCSymbolELF>(Symbol)->setOther(ELF::STO_AARCH64_VARIANT_PCS);
+}
+
MCTargetStreamer *createAArch64AsmTargetStreamer(MCStreamer &S,
formatted_raw_ostream &OS,
MCInstPrinter *InstPrint,
diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h
index 3a0c5d8318dd..1af978a806d1 100644
--- a/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h
+++ b/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64TargetStreamer.h
@@ -36,6 +36,9 @@ public:
/// Callback used to implement the .inst directive.
virtual void emitInst(uint32_t Inst);
+ /// Callback used to implement the .variant_pcs directive.
+ virtual void emitDirectiveVariantPCS(MCSymbol *Symbol) {};
+
virtual void EmitARM64WinCFIAllocStack(unsigned Size) {}
virtual void EmitARM64WinCFISaveFPLR(int Offset) {}
virtual void EmitARM64WinCFISaveFPLRX(int Offset) {}
@@ -63,6 +66,7 @@ private:
AArch64ELFStreamer &getStreamer();
void emitInst(uint32_t Inst) override;
+ void emitDirectiveVariantPCS(MCSymbol *Symbol) override;
public:
AArch64TargetELFStreamer(MCStreamer &S) : AArch64TargetStreamer(S) {}
diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
index f31c722db1b2..442be886a8ac 100644
--- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
@@ -254,16 +254,24 @@ bool SIPreEmitPeephole::runOnMachineFunction(MachineFunction &MF) {
for (MachineBasicBlock &MBB : MF) {
MachineBasicBlock::iterator MBBE = MBB.getFirstTerminator();
- if (MBBE != MBB.end()) {
- MachineInstr &MI = *MBBE;
+ MachineBasicBlock::iterator TermI = MBBE;
+ // Check first terminator for VCC branches to optimize
+ if (TermI != MBB.end()) {
+ MachineInstr &MI = *TermI;
switch (MI.getOpcode()) {
case AMDGPU::S_CBRANCH_VCCZ:
case AMDGPU::S_CBRANCH_VCCNZ:
Changed |= optimizeVccBranch(MI);
continue;
- case AMDGPU::SI_RETURN_TO_EPILOG:
- // FIXME: This is not an optimization and should be
- // moved somewhere else.
+ default:
+ break;
+ }
+ }
+ // Check all terminators for SI_RETURN_TO_EPILOG
+ // FIXME: This is not an optimization and should be moved somewhere else.
+ while (TermI != MBB.end()) {
+ MachineInstr &MI = *TermI;
+ if (MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) {
assert(!MF.getInfo<SIMachineFunctionInfo>()->returnsVoid());
// Graphics shaders returning non-void shouldn't contain S_ENDPGM,
@@ -281,11 +289,11 @@ bool SIPreEmitPeephole::runOnMachineFunction(MachineFunction &MF) {
.addMBB(EmptyMBBAtEnd);
MI.eraseFromParent();
MBBE = MBB.getFirstTerminator();
+ TermI = MBBE;
+ continue;
}
- break;
- default:
- break;
}
+ TermI++;
}
if (!ST.hasVGPRIndexMode())
diff --git a/contrib/llvm-project/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
index d407edfbd966..77f565fb5957 100644
--- a/contrib/llvm-project/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
@@ -254,7 +254,7 @@ void BPFDAGToDAGISel::PreprocessLoad(SDNode *Node,
const LoadSDNode *LD = cast<LoadSDNode>(Node);
uint64_t size = LD->getMemOperand()->getSize();
- if (!size || size > 8 || (size & (size - 1)))
+ if (!size || size > 8 || (size & (size - 1)) || !LD->isSimple())
return;
SDNode *LDAddrNode = LD->getOperand(1).getNode();
@@ -342,7 +342,7 @@ bool BPFDAGToDAGISel::getConstantFieldValue(const GlobalAddressSDNode *Node,
unsigned char *ByteSeq) {
const GlobalVariable *V = dyn_cast<GlobalVariable>(Node->getGlobal());
- if (!V || !V->hasInitializer())
+ if (!V || !V->hasInitializer() || !V->isConstant())
return false;
const Constant *Init = V->getInitializer();
diff --git a/contrib/llvm-project/llvm/lib/Target/Sparc/LeonFeatures.td b/contrib/llvm-project/llvm/lib/Target/Sparc/LeonFeatures.td
index 75273eff1868..75273eff1868 100755..100644
--- a/contrib/llvm-project/llvm/lib/Target/Sparc/LeonFeatures.td
+++ b/contrib/llvm-project/llvm/lib/Target/Sparc/LeonFeatures.td
diff --git a/contrib/llvm-project/llvm/lib/Target/Sparc/LeonPasses.cpp b/contrib/llvm-project/llvm/lib/Target/Sparc/LeonPasses.cpp
index e9d3aaeb9cfe..e9d3aaeb9cfe 100755..100644
--- a/contrib/llvm-project/llvm/lib/Target/Sparc/LeonPasses.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Sparc/LeonPasses.cpp
diff --git a/contrib/llvm-project/llvm/lib/Target/Sparc/LeonPasses.h b/contrib/llvm-project/llvm/lib/Target/Sparc/LeonPasses.h
index b165bc93780f..b165bc93780f 100755..100644
--- a/contrib/llvm-project/llvm/lib/Target/Sparc/LeonPasses.h
+++ b/contrib/llvm-project/llvm/lib/Target/Sparc/LeonPasses.h
diff --git a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp
index 069e43c6f544..7845a18b14c1 100644
--- a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp
@@ -351,7 +351,7 @@ void SparcAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
break;
case MachineOperand::MO_Immediate:
- O << (int)MO.getImm();
+ O << MO.getImm();
break;
case MachineOperand::MO_MachineBasicBlock:
MO.getMBB()->getSymbol()->print(O, MAI);
diff --git a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcSchedule.td b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcSchedule.td
index 0f05372b7050..0f05372b7050 100755..100644
--- a/contrib/llvm-project/llvm/lib/Target/Sparc/SparcSchedule.td
+++ b/contrib/llvm-project/llvm/lib/Target/Sparc/SparcSchedule.td
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
index 8a0092a3f298..c2a0d3e01740 100644
--- a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
@@ -58,6 +58,9 @@ class WebAssemblyFastISel final : public FastISel {
int FI;
} Base;
+ // Whether the base has been determined yet
+ bool IsBaseSet = false;
+
int64_t Offset = 0;
const GlobalValue *GV = nullptr;
@@ -74,8 +77,9 @@ class WebAssemblyFastISel final : public FastISel {
bool isFIBase() const { return Kind == FrameIndexBase; }
void setReg(unsigned Reg) {
assert(isRegBase() && "Invalid base register access!");
- assert(Base.Reg == 0 && "Overwriting non-zero register");
+ assert(!IsBaseSet && "Base cannot be reset");
Base.Reg = Reg;
+ IsBaseSet = true;
}
unsigned getReg() const {
assert(isRegBase() && "Invalid base register access!");
@@ -83,8 +87,9 @@ class WebAssemblyFastISel final : public FastISel {
}
void setFI(unsigned FI) {
assert(isFIBase() && "Invalid base frame index access!");
- assert(Base.FI == 0 && "Overwriting non-zero frame index");
+ assert(!IsBaseSet && "Base cannot be reset");
Base.FI = FI;
+ IsBaseSet = true;
}
unsigned getFI() const {
assert(isFIBase() && "Invalid base frame index access!");
@@ -98,13 +103,7 @@ class WebAssemblyFastISel final : public FastISel {
int64_t getOffset() const { return Offset; }
void setGlobalValue(const GlobalValue *G) { GV = G; }
const GlobalValue *getGlobalValue() const { return GV; }
- bool isSet() const {
- if (isRegBase()) {
- return Base.Reg != 0;
- } else {
- return Base.FI != 0;
- }
- }
+ bool isSet() const { return IsBaseSet; }
};
/// Keep a pointer to the WebAssemblySubtarget around so that we can make the
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td
index 5ff0d73534a6..085910f01ee6 100644
--- a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td
@@ -328,7 +328,9 @@ defm CONST_F64 : I<(outs F64:$res), (ins f64imm_op:$imm),
} // isMoveImm = 1, isAsCheapAsAMove = 1, isReMaterializable = 1
def : Pat<(i32 (WebAssemblywrapper tglobaladdr:$addr)),
- (CONST_I32 tglobaladdr:$addr)>, Requires<[IsNotPIC]>;
+ (CONST_I32 tglobaladdr:$addr)>, Requires<[IsNotPIC, HasAddr32]>;
+def : Pat<(i64 (WebAssemblywrapper tglobaladdr:$addr)),
+ (CONST_I64 tglobaladdr:$addr)>, Requires<[IsNotPIC, HasAddr64]>;
def : Pat<(i32 (WebAssemblywrapper tglobaladdr:$addr)),
(GLOBAL_GET_I32 tglobaladdr:$addr)>, Requires<[IsPIC]>;
diff --git a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp
index 130589c9df8c..6b6394a58339 100644
--- a/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp
@@ -101,10 +101,12 @@ void WebAssemblyRegisterInfo::eliminateFrameIndex(
WebAssemblyFrameLowering::getOpcConst(MF) &&
MRI.hasOneNonDBGUse(Def->getOperand(0).getReg())) {
MachineOperand &ImmMO = Def->getOperand(1);
- ImmMO.setImm(ImmMO.getImm() + uint32_t(FrameOffset));
- MI.getOperand(FIOperandNum)
- .ChangeToRegister(FrameRegister, /*isDef=*/false);
- return;
+ if (ImmMO.isImm()) {
+ ImmMO.setImm(ImmMO.getImm() + uint32_t(FrameOffset));
+ MI.getOperand(FIOperandNum)
+ .ChangeToRegister(FrameRegister, /*isDef=*/false);
+ return;
+ }
}
}
}
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86EvexToVex.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86EvexToVex.cpp
index 540ad98b6d54..540ad98b6d54 100755..100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86EvexToVex.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86EvexToVex.cpp
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.cpp
index c7ca6fb2a4fc..db6b68659493 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -586,29 +586,55 @@ void X86FrameLowering::emitStackProbeInlineGeneric(
const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
uint64_t ProbeChunk = StackProbeSize * 8;
+ uint64_t MaxAlign =
+ TRI->needsStackRealignment(MF) ? calculateMaxStackAlign(MF) : 0;
+
// Synthesize a loop or unroll it, depending on the number of iterations.
+ // BuildStackAlignAND ensures that only MaxAlign % StackProbeSize bits left
+ // between the unaligned rsp and current rsp.
if (Offset > ProbeChunk) {
- emitStackProbeInlineGenericLoop(MF, MBB, MBBI, DL, Offset);
+ emitStackProbeInlineGenericLoop(MF, MBB, MBBI, DL, Offset,
+ MaxAlign % StackProbeSize);
} else {
- emitStackProbeInlineGenericBlock(MF, MBB, MBBI, DL, Offset);
+ emitStackProbeInlineGenericBlock(MF, MBB, MBBI, DL, Offset,
+ MaxAlign % StackProbeSize);
}
}
void X86FrameLowering::emitStackProbeInlineGenericBlock(
MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
- uint64_t Offset) const {
+ MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset,
+ uint64_t AlignOffset) const {
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
const X86TargetLowering &TLI = *STI.getTargetLowering();
const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, Offset);
const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
+
uint64_t CurrentOffset = 0;
- // 0 Thanks to return address being saved on the stack
- uint64_t CurrentProbeOffset = 0;
- // For the first N - 1 pages, just probe. I tried to take advantage of
+ assert(AlignOffset < StackProbeSize);
+
+ // If the offset is so small it fits within a page, there's nothing to do.
+ if (StackProbeSize < Offset + AlignOffset) {
+
+ MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
+ .addReg(StackPtr)
+ .addImm(StackProbeSize - AlignOffset)
+ .setMIFlag(MachineInstr::FrameSetup);
+ MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
+
+ addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc))
+ .setMIFlag(MachineInstr::FrameSetup),
+ StackPtr, false, 0)
+ .addImm(0)
+ .setMIFlag(MachineInstr::FrameSetup);
+ NumFrameExtraProbe++;
+ CurrentOffset = StackProbeSize - AlignOffset;
+ }
+
+ // For the next N - 1 pages, just probe. I tried to take advantage of
// natural probes but it implies much more logic and there was very few
// interesting natural probes to interleave.
while (CurrentOffset + StackProbeSize < Offset) {
@@ -626,9 +652,9 @@ void X86FrameLowering::emitStackProbeInlineGenericBlock(
.setMIFlag(MachineInstr::FrameSetup);
NumFrameExtraProbe++;
CurrentOffset += StackProbeSize;
- CurrentProbeOffset += StackProbeSize;
}
+ // No need to probe the tail, it is smaller than a Page.
uint64_t ChunkSize = Offset - CurrentOffset;
MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
.addReg(StackPtr)
@@ -639,8 +665,8 @@ void X86FrameLowering::emitStackProbeInlineGenericBlock(
void X86FrameLowering::emitStackProbeInlineGenericLoop(
MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
- uint64_t Offset) const {
+ MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset,
+ uint64_t AlignOffset) const {
assert(Offset && "null offset");
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
@@ -648,6 +674,26 @@ void X86FrameLowering::emitStackProbeInlineGenericLoop(
const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
+ if (AlignOffset) {
+ if (AlignOffset < StackProbeSize) {
+ // Perform a first smaller allocation followed by a probe.
+ const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, AlignOffset);
+ MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), StackPtr)
+ .addReg(StackPtr)
+ .addImm(AlignOffset)
+ .setMIFlag(MachineInstr::FrameSetup);
+ MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
+
+ addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc))
+ .setMIFlag(MachineInstr::FrameSetup),
+ StackPtr, false, 0)
+ .addImm(0)
+ .setMIFlag(MachineInstr::FrameSetup);
+ NumFrameExtraProbe++;
+ Offset -= AlignOffset;
+ }
+ }
+
// Synthesize a loop
NumFrameLoopProbe++;
const BasicBlock *LLVM_BB = MBB.getBasicBlock();
@@ -666,8 +712,8 @@ void X86FrameLowering::emitStackProbeInlineGenericLoop(
// save loop bound
{
- const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, Offset);
- BuildMI(MBB, MBBI, DL, TII.get(Opc), FinalStackProbed)
+ const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, Offset);
+ BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), FinalStackProbed)
.addReg(FinalStackProbed)
.addImm(Offset / StackProbeSize * StackProbeSize)
.setMIFlag(MachineInstr::FrameSetup);
@@ -675,8 +721,8 @@ void X86FrameLowering::emitStackProbeInlineGenericLoop(
// allocate a page
{
- const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, StackProbeSize);
- BuildMI(testMBB, DL, TII.get(Opc), StackPtr)
+ const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, StackProbeSize);
+ BuildMI(testMBB, DL, TII.get(SUBOpc), StackPtr)
.addReg(StackPtr)
.addImm(StackProbeSize)
.setMIFlag(MachineInstr::FrameSetup);
@@ -1052,13 +1098,149 @@ void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
uint64_t MaxAlign) const {
uint64_t Val = -MaxAlign;
unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val);
- MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AndOp), Reg)
- .addReg(Reg)
- .addImm(Val)
- .setMIFlag(MachineInstr::FrameSetup);
- // The EFLAGS implicit def is dead.
- MI->getOperand(3).setIsDead();
+ MachineFunction &MF = *MBB.getParent();
+ const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
+ const X86TargetLowering &TLI = *STI.getTargetLowering();
+ const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
+ const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF);
+
+ // We want to make sure that (in worst case) less than StackProbeSize bytes
+ // are not probed after the AND. This assumption is used in
+ // emitStackProbeInlineGeneric.
+ if (Reg == StackPtr && EmitInlineStackProbe && MaxAlign >= StackProbeSize) {
+ {
+ NumFrameLoopProbe++;
+ MachineBasicBlock *entryMBB =
+ MF.CreateMachineBasicBlock(MBB.getBasicBlock());
+ MachineBasicBlock *headMBB =
+ MF.CreateMachineBasicBlock(MBB.getBasicBlock());
+ MachineBasicBlock *bodyMBB =
+ MF.CreateMachineBasicBlock(MBB.getBasicBlock());
+ MachineBasicBlock *footMBB =
+ MF.CreateMachineBasicBlock(MBB.getBasicBlock());
+
+ MachineFunction::iterator MBBIter = MBB.getIterator();
+ MF.insert(MBBIter, entryMBB);
+ MF.insert(MBBIter, headMBB);
+ MF.insert(MBBIter, bodyMBB);
+ MF.insert(MBBIter, footMBB);
+ const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
+ Register FinalStackProbed = Uses64BitFramePtr ? X86::R11 : X86::R11D;
+
+ // Setup entry block
+ {
+
+ entryMBB->splice(entryMBB->end(), &MBB, MBB.begin(), MBBI);
+ BuildMI(entryMBB, DL, TII.get(TargetOpcode::COPY), FinalStackProbed)
+ .addReg(StackPtr)
+ .setMIFlag(MachineInstr::FrameSetup);
+ MachineInstr *MI =
+ BuildMI(entryMBB, DL, TII.get(AndOp), FinalStackProbed)
+ .addReg(FinalStackProbed)
+ .addImm(Val)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ // The EFLAGS implicit def is dead.
+ MI->getOperand(3).setIsDead();
+
+ BuildMI(entryMBB, DL,
+ TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
+ .addReg(FinalStackProbed)
+ .addReg(StackPtr)
+ .setMIFlag(MachineInstr::FrameSetup);
+ BuildMI(entryMBB, DL, TII.get(X86::JCC_1))
+ .addMBB(&MBB)
+ .addImm(X86::COND_E)
+ .setMIFlag(MachineInstr::FrameSetup);
+ entryMBB->addSuccessor(headMBB);
+ entryMBB->addSuccessor(&MBB);
+ }
+
+ // Loop entry block
+
+ {
+ const unsigned SUBOpc =
+ getSUBriOpcode(Uses64BitFramePtr, StackProbeSize);
+ BuildMI(headMBB, DL, TII.get(SUBOpc), StackPtr)
+ .addReg(StackPtr)
+ .addImm(StackProbeSize)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ BuildMI(headMBB, DL,
+ TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
+ .addReg(FinalStackProbed)
+ .addReg(StackPtr)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ // jump
+ BuildMI(headMBB, DL, TII.get(X86::JCC_1))
+ .addMBB(footMBB)
+ .addImm(X86::COND_B)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ headMBB->addSuccessor(bodyMBB);
+ headMBB->addSuccessor(footMBB);
+ }
+
+ // setup loop body
+ {
+ addRegOffset(BuildMI(bodyMBB, DL, TII.get(MovMIOpc))
+ .setMIFlag(MachineInstr::FrameSetup),
+ StackPtr, false, 0)
+ .addImm(0)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ const unsigned SUBOpc =
+ getSUBriOpcode(Uses64BitFramePtr, StackProbeSize);
+ BuildMI(bodyMBB, DL, TII.get(SUBOpc), StackPtr)
+ .addReg(StackPtr)
+ .addImm(StackProbeSize)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ // cmp with stack pointer bound
+ BuildMI(bodyMBB, DL,
+ TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
+ .addReg(FinalStackProbed)
+ .addReg(StackPtr)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ // jump
+ BuildMI(bodyMBB, DL, TII.get(X86::JCC_1))
+ .addMBB(bodyMBB)
+ .addImm(X86::COND_B)
+ .setMIFlag(MachineInstr::FrameSetup);
+ bodyMBB->addSuccessor(bodyMBB);
+ bodyMBB->addSuccessor(footMBB);
+ }
+
+ // setup loop footer
+ {
+ BuildMI(footMBB, DL, TII.get(TargetOpcode::COPY), StackPtr)
+ .addReg(FinalStackProbed)
+ .setMIFlag(MachineInstr::FrameSetup);
+ addRegOffset(BuildMI(footMBB, DL, TII.get(MovMIOpc))
+ .setMIFlag(MachineInstr::FrameSetup),
+ StackPtr, false, 0)
+ .addImm(0)
+ .setMIFlag(MachineInstr::FrameSetup);
+ footMBB->addSuccessor(&MBB);
+ }
+
+ recomputeLiveIns(*headMBB);
+ recomputeLiveIns(*bodyMBB);
+ recomputeLiveIns(*footMBB);
+ recomputeLiveIns(MBB);
+ }
+ } else {
+ MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AndOp), Reg)
+ .addReg(Reg)
+ .addImm(Val)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ // The EFLAGS implicit def is dead.
+ MI->getOperand(3).setIsDead();
+ }
}
bool X86FrameLowering::has128ByteRedZone(const MachineFunction& MF) const {
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.h b/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.h
index c0b4be95f88d..bb2e83205e71 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.h
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86FrameLowering.h
@@ -213,14 +213,14 @@ private:
void emitStackProbeInlineGenericBlock(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- const DebugLoc &DL,
- uint64_t Offset) const;
+ const DebugLoc &DL, uint64_t Offset,
+ uint64_t Align) const;
void emitStackProbeInlineGenericLoop(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- const DebugLoc &DL,
- uint64_t Offset) const;
+ const DebugLoc &DL, uint64_t Offset,
+ uint64_t Align) const;
/// Emit a stub to later inline the target stack probe.
MachineInstr *emitStackProbeInlineStub(MachineFunction &MF,
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp
index 1671917157f4..56690c3c555b 100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -30285,6 +30285,13 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
Results.push_back(V);
return;
}
+ case ISD::BITREVERSE:
+ assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
+ assert(Subtarget.hasXOP() && "Expected XOP");
+ // We can use VPPERM by copying to a vector register and back. We'll need
+ // to move the scalar in two i32 pieces.
+ Results.push_back(LowerBITREVERSE(SDValue(N, 0), Subtarget, DAG));
+ return;
}
}
@@ -31876,7 +31883,7 @@ X86TargetLowering::EmitLoweredProbedAlloca(MachineInstr &MI,
BuildMI(testMBB, DL, TII->get(X86::JCC_1))
.addMBB(tailMBB)
- .addImm(X86::COND_L);
+ .addImm(X86::COND_GE);
testMBB->addSuccessor(blockMBB);
testMBB->addSuccessor(tailMBB);
@@ -31892,9 +31899,9 @@ X86TargetLowering::EmitLoweredProbedAlloca(MachineInstr &MI,
//
// The property we want to enforce is to never have more than [page alloc] between two probes.
- const unsigned MovMIOpc =
- TFI.Uses64BitFramePtr ? X86::MOV64mi32 : X86::MOV32mi;
- addRegOffset(BuildMI(blockMBB, DL, TII->get(MovMIOpc)), physSPReg, false, 0)
+ const unsigned XORMIOpc =
+ TFI.Uses64BitFramePtr ? X86::XOR64mi8 : X86::XOR32mi8;
+ addRegOffset(BuildMI(blockMBB, DL, TII->get(XORMIOpc)), physSPReg, false, 0)
.addImm(0);
BuildMI(blockMBB, DL,
@@ -36018,8 +36025,10 @@ static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
// Share broadcast with the longest vector and extract low subvector (free).
+ // Ensure the same SDValue from the SDNode use is being used.
for (SDNode *User : Src->uses())
if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST &&
+ Src == User->getOperand(0) &&
User->getValueSizeInBits(0) > VT.getSizeInBits()) {
return extractSubVector(SDValue(User, 0), 0, DAG, DL,
VT.getSizeInBits());
@@ -39588,10 +39597,14 @@ combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
// vselect Cond, 000..., X -> andn Cond, X
if (TValIsAllZeros) {
- MVT AndNVT = MVT::getVectorVT(MVT::i64, CondVT.getSizeInBits() / 64);
- SDValue CastCond = DAG.getBitcast(AndNVT, Cond);
- SDValue CastRHS = DAG.getBitcast(AndNVT, RHS);
- SDValue AndN = DAG.getNode(X86ISD::ANDNP, DL, AndNVT, CastCond, CastRHS);
+ SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
+ SDValue AndN;
+ // The canonical form differs for i1 vectors - x86andnp is not used
+ if (CondVT.getScalarType() == MVT::i1)
+ AndN = DAG.getNode(ISD::AND, DL, CondVT, DAG.getNOT(DL, Cond, CondVT),
+ CastRHS);
+ else
+ AndN = DAG.getNode(X86ISD::ANDNP, DL, CondVT, Cond, CastRHS);
return DAG.getBitcast(VT, AndN);
}
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86SchedBroadwell.td b/contrib/llvm-project/llvm/lib/Target/X86/X86SchedBroadwell.td
index 4aea7bc253bb..4aea7bc253bb 100755..100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86SchedBroadwell.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86SchedBroadwell.td
diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86SchedSkylakeServer.td b/contrib/llvm-project/llvm/lib/Target/X86/X86SchedSkylakeServer.td
index 7fc96d1eda89..7fc96d1eda89 100755..100644
--- a/contrib/llvm-project/llvm/lib/Target/X86/X86SchedSkylakeServer.td
+++ b/contrib/llvm-project/llvm/lib/Target/X86/X86SchedSkylakeServer.td
diff --git a/contrib/llvm-project/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp b/contrib/llvm-project/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
index 54c51b6e7161..f2588938d964 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -357,7 +357,7 @@ DeadArgumentEliminationPass::Liveness
DeadArgumentEliminationPass::MarkIfNotLive(RetOrArg Use,
UseVector &MaybeLiveUses) {
// We're live if our use or its Function is already marked as live.
- if (LiveFunctions.count(Use.F) || LiveValues.count(Use))
+ if (IsLive(Use))
return Live;
// We're maybe live otherwise, but remember that we must become live if
@@ -657,10 +657,18 @@ void DeadArgumentEliminationPass::MarkValue(const RetOrArg &RA, Liveness L,
MarkLive(RA);
break;
case MaybeLive:
- // Note any uses of this value, so this return value can be
- // marked live whenever one of the uses becomes live.
- for (const auto &MaybeLiveUse : MaybeLiveUses)
- Uses.insert(std::make_pair(MaybeLiveUse, RA));
+ assert(!IsLive(RA) && "Use is already live!");
+ for (const auto &MaybeLiveUse : MaybeLiveUses) {
+ if (IsLive(MaybeLiveUse)) {
+ // A use is live, so this value is live.
+ MarkLive(RA);
+ break;
+ } else {
+ // Note any uses of this value, so this value can be
+ // marked live whenever one of the uses becomes live.
+ Uses.insert(std::make_pair(MaybeLiveUse, RA));
+ }
+ }
break;
}
}
@@ -686,17 +694,20 @@ void DeadArgumentEliminationPass::MarkLive(const Function &F) {
/// mark any values that are used by this value (according to Uses) live as
/// well.
void DeadArgumentEliminationPass::MarkLive(const RetOrArg &RA) {
- if (LiveFunctions.count(RA.F))
- return; // Function was already marked Live.
+ if (IsLive(RA))
+ return; // Already marked Live.
- if (!LiveValues.insert(RA).second)
- return; // We were already marked Live.
+ LiveValues.insert(RA);
LLVM_DEBUG(dbgs() << "DeadArgumentEliminationPass - Marking "
<< RA.getDescription() << " live\n");
PropagateLiveness(RA);
}
+bool DeadArgumentEliminationPass::IsLive(const RetOrArg &RA) {
+ return LiveFunctions.count(RA.F) || LiveValues.count(RA);
+}
+
/// PropagateLiveness - Given that RA is a live value, propagate it's liveness
/// to any other values it uses (according to Uses).
void DeadArgumentEliminationPass::PropagateLiveness(const RetOrArg &RA) {
diff --git a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index fa695c39cd1e..1e43014e7d32 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -782,25 +782,24 @@ static Value *canonicalizeSaturatedAdd(ICmpInst *Cmp, Value *TVal, Value *FVal,
// Match unsigned saturated add of 2 variables with an unnecessary 'not'.
// There are 8 commuted variants.
- // Canonicalize -1 (saturated result) to true value of the select. Just
- // swapping the compare operands is legal, because the selected value is the
- // same in case of equality, so we can interchange u< and u<=.
+ // Canonicalize -1 (saturated result) to true value of the select.
if (match(FVal, m_AllOnes())) {
std::swap(TVal, FVal);
- std::swap(Cmp0, Cmp1);
+ Pred = CmpInst::getInversePredicate(Pred);
}
if (!match(TVal, m_AllOnes()))
return nullptr;
- // Canonicalize predicate to 'ULT'.
- if (Pred == ICmpInst::ICMP_UGT) {
- Pred = ICmpInst::ICMP_ULT;
+ // Canonicalize predicate to less-than or less-or-equal-than.
+ if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
std::swap(Cmp0, Cmp1);
+ Pred = CmpInst::getSwappedPredicate(Pred);
}
- if (Pred != ICmpInst::ICMP_ULT)
+ if (Pred != ICmpInst::ICMP_ULT && Pred != ICmpInst::ICMP_ULE)
return nullptr;
// Match unsigned saturated add of 2 variables with an unnecessary 'not'.
+ // Strictness of the comparison is irrelevant.
Value *Y;
if (match(Cmp0, m_Not(m_Value(X))) &&
match(FVal, m_c_Add(m_Specific(X), m_Value(Y))) && Y == Cmp1) {
@@ -809,6 +808,7 @@ static Value *canonicalizeSaturatedAdd(ICmpInst *Cmp, Value *TVal, Value *FVal,
return Builder.CreateBinaryIntrinsic(Intrinsic::uadd_sat, X, Y);
}
// The 'not' op may be included in the sum but not the compare.
+ // Strictness of the comparison is irrelevant.
X = Cmp0;
Y = Cmp1;
if (match(FVal, m_c_Add(m_Not(m_Specific(X)), m_Specific(Y)))) {
@@ -819,7 +819,9 @@ static Value *canonicalizeSaturatedAdd(ICmpInst *Cmp, Value *TVal, Value *FVal,
Intrinsic::uadd_sat, BO->getOperand(0), BO->getOperand(1));
}
// The overflow may be detected via the add wrapping round.
- if (match(Cmp0, m_c_Add(m_Specific(Cmp1), m_Value(Y))) &&
+ // This is only valid for strict comparison!
+ if (Pred == ICmpInst::ICMP_ULT &&
+ match(Cmp0, m_c_Add(m_Specific(Cmp1), m_Value(Y))) &&
match(FVal, m_c_Add(m_Specific(Cmp1), m_Specific(Y)))) {
// ((X + Y) u< X) ? -1 : (X + Y) --> uadd.sat(X, Y)
// ((X + Y) u< Y) ? -1 : (X + Y) --> uadd.sat(X, Y)
diff --git a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index ee09a4d9db7e..1557fad4d372 100644
--- a/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/contrib/llvm-project/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -792,7 +792,7 @@ private:
StringRef InternalSuffix);
Instruction *CreateAsanModuleDtor(Module &M);
- bool canInstrumentAliasedGlobal(const GlobalAlias &GA) const;
+ const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const;
bool shouldInstrumentGlobal(GlobalVariable *G) const;
bool ShouldUseMachOGlobalsSection() const;
StringRef getGlobalMetadataSection() const;
@@ -1784,20 +1784,22 @@ void ModuleAddressSanitizer::createInitializerPoisonCalls(
}
}
-bool ModuleAddressSanitizer::canInstrumentAliasedGlobal(
- const GlobalAlias &GA) const {
+const GlobalVariable *
+ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const {
// In case this function should be expanded to include rules that do not just
// apply when CompileKernel is true, either guard all existing rules with an
// 'if (CompileKernel) { ... }' or be absolutely sure that all these rules
// should also apply to user space.
assert(CompileKernel && "Only expecting to be called when compiling kernel");
+ const Constant *C = GA.getAliasee();
+
// When compiling the kernel, globals that are aliased by symbols prefixed
// by "__" are special and cannot be padded with a redzone.
if (GA.getName().startswith("__"))
- return false;
+ return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases());
- return true;
+ return nullptr;
}
bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const {
@@ -2256,14 +2258,12 @@ bool ModuleAddressSanitizer::InstrumentGlobals(IRBuilder<> &IRB, Module &M,
*CtorComdat = false;
// Build set of globals that are aliased by some GA, where
- // canInstrumentAliasedGlobal(GA) returns false.
+ // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable.
SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions;
if (CompileKernel) {
for (auto &GA : M.aliases()) {
- if (const auto *GV = dyn_cast<GlobalVariable>(GA.getAliasee())) {
- if (!canInstrumentAliasedGlobal(GA))
- AliasedGlobalExclusions.insert(GV);
- }
+ if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA))
+ AliasedGlobalExclusions.insert(GV);
}
}