aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2023-09-11 13:44:52 +0000
committerDimitry Andric <dim@FreeBSD.org>2023-09-11 13:44:52 +0000
commit8092e001bcd76c0b9fec2311f3a515aa60d2ed07 (patch)
tree11ad04f2dd7ba4fa65e4f3e402399dfc7deb68ea
parentc938c0a643200ec844981864ac587bc6c1f576aa (diff)
Vendor import of llvm-project branch release/17.x llvmorg-17.0.0-rc4-10-g0176e8729ea4.vendor/llvm-project/llvmorg-17.0.0-rc4-10-g0176e8729ea4
-rw-r--r--clang/include/clang/AST/ExprConcepts.h14
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp2
-rw-r--r--clang/lib/Driver/Driver.cpp8
-rw-r--r--clang/lib/Driver/ToolChains/AIX.cpp6
-rw-r--r--clang/lib/Driver/ToolChains/Arch/X86.cpp8
-rw-r--r--clang/lib/Driver/ToolChains/Arch/X86.h2
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.cpp2
-rw-r--r--clang/lib/Frontend/FrontendAction.cpp5
-rw-r--r--clang/lib/Sema/SemaExprCXX.cpp25
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiate.cpp17
-rw-r--r--clang/lib/Tooling/Inclusions/Stdlib/StdSymbolMap.inc54
-rw-r--r--compiler-rt/lib/builtins/aarch64/lse.S40
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc16
-rw-r--r--libcxx/include/__config36
-rw-r--r--llvm/include/llvm/Analysis/LazyValueInfo.h3
-rw-r--r--llvm/lib/Analysis/LazyValueInfo.cpp9
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp2
-rw-r--r--llvm/lib/CodeGen/ComplexDeinterleavingPass.cpp12
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp50
-rw-r--r--llvm/lib/Transforms/Scalar/JumpThreading.cpp2
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp38
21 files changed, 297 insertions, 54 deletions
diff --git a/clang/include/clang/AST/ExprConcepts.h b/clang/include/clang/AST/ExprConcepts.h
index d900e980852b..13d4568119eb 100644
--- a/clang/include/clang/AST/ExprConcepts.h
+++ b/clang/include/clang/AST/ExprConcepts.h
@@ -14,20 +14,21 @@
#ifndef LLVM_CLANG_AST_EXPRCONCEPTS_H
#define LLVM_CLANG_AST_EXPRCONCEPTS_H
-#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTConcept.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
-#include "clang/AST/DeclarationName.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/Type.h"
#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TrailingObjects.h"
-#include <utility>
#include <string>
+#include <utility>
namespace clang {
class ASTStmtReader;
@@ -467,6 +468,13 @@ public:
}
};
+using EntityPrinter = llvm::function_ref<void(llvm::raw_ostream &)>;
+
+/// \brief create a Requirement::SubstitutionDiagnostic with only a
+/// SubstitutedEntity and DiagLoc using Sema's allocator.
+Requirement::SubstitutionDiagnostic *
+createSubstDiagAt(Sema &S, SourceLocation Location, EntityPrinter Printer);
+
} // namespace concepts
/// C++2a [expr.prim.req]:
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index a3506df7d4e5..f09d1129b128 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -2386,7 +2386,7 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
// functions. If the current target's C++ ABI requires this and this is a
// member function, set its alignment accordingly.
if (getTarget().getCXXABI().areMemberFunctionsAligned()) {
- if (F->getPointerAlignment(getDataLayout()) < 2 && isa<CXXMethodDecl>(D))
+ if (isa<CXXMethodDecl>(D) && F->getPointerAlignment(getDataLayout()) < 2)
F->setAlignment(std::max(llvm::Align(2), F->getAlign().valueOrOne()));
}
diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp
index f6ea4d0b4366..bdbdad9362e1 100644
--- a/clang/lib/Driver/Driver.cpp
+++ b/clang/lib/Driver/Driver.cpp
@@ -4936,6 +4936,12 @@ void Driver::BuildJobs(Compilation &C) const {
(void)C.getArgs().hasArg(options::OPT_driver_mode);
(void)C.getArgs().hasArg(options::OPT_rsp_quoting);
+ bool HasAssembleJob = llvm::any_of(C.getJobs(), [](auto &J) {
+ // Match ClangAs and other derived assemblers of Tool. ClangAs uses a
+ // longer ShortName "clang integrated assembler" while other assemblers just
+ // use "assembler".
+ return strstr(J.getCreator().getShortName(), "assembler");
+ });
for (Arg *A : C.getArgs()) {
// FIXME: It would be nice to be able to send the argument to the
// DiagnosticsEngine, so that extra values, position, and so on could be
@@ -4965,7 +4971,7 @@ void Driver::BuildJobs(Compilation &C) const {
// already been warned about.
if (!IsCLMode() || !A->getOption().matches(options::OPT_UNKNOWN)) {
if (A->getOption().hasFlag(options::TargetSpecific) &&
- !A->isIgnoredTargetSpecific()) {
+ !A->isIgnoredTargetSpecific() && !HasAssembleJob) {
Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getSpelling() << getTargetTriple();
} else {
diff --git a/clang/lib/Driver/ToolChains/AIX.cpp b/clang/lib/Driver/ToolChains/AIX.cpp
index 97217eba9ca0..bfc86d9f3471 100644
--- a/clang/lib/Driver/ToolChains/AIX.cpp
+++ b/clang/lib/Driver/ToolChains/AIX.cpp
@@ -30,6 +30,7 @@ void aix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
ArgStringList CmdArgs;
const bool IsArch32Bit = getToolChain().getTriple().isArch32Bit();
@@ -38,6 +39,11 @@ void aix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
if (!IsArch32Bit && !IsArch64Bit)
llvm_unreachable("Unsupported bit width value.");
+ if (Arg *A = C.getArgs().getLastArg(options::OPT_G)) {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << D.getTargetTriple();
+ }
+
// Specify the mode in which the as(1) command operates.
if (IsArch32Bit) {
CmdArgs.push_back("-a32");
diff --git a/clang/lib/Driver/ToolChains/Arch/X86.cpp b/clang/lib/Driver/ToolChains/Arch/X86.cpp
index 4383b8004143..cf2bc63d74ad 100644
--- a/clang/lib/Driver/ToolChains/Arch/X86.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/X86.cpp
@@ -118,13 +118,7 @@ std::string x86::getX86TargetCPU(const Driver &D, const ArgList &Args,
void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args,
- std::vector<StringRef> &Features, bool ForAS) {
- if (ForAS) {
- // Some target-specific options are only handled in AddX86TargetArgs, which
- // is not called by ClangAs::ConstructJob. Claim them here.
- Args.claimAllArgs(options::OPT_mfpmath_EQ);
- }
-
+ std::vector<StringRef> &Features) {
// Claim and report unsupported -mabi=. Note: we don't support "sysv_abi" or
// "ms_abi" as default function attributes.
if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_mabi_EQ)) {
diff --git a/clang/lib/Driver/ToolChains/Arch/X86.h b/clang/lib/Driver/ToolChains/Arch/X86.h
index 762a1fa6f4d5..e07387f3ece3 100644
--- a/clang/lib/Driver/ToolChains/Arch/X86.h
+++ b/clang/lib/Driver/ToolChains/Arch/X86.h
@@ -26,7 +26,7 @@ std::string getX86TargetCPU(const Driver &D, const llvm::opt::ArgList &Args,
void getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args,
- std::vector<llvm::StringRef> &Features, bool ForAS);
+ std::vector<llvm::StringRef> &Features);
} // end namespace x86
} // end namespace target
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 8766d34eec53..0d6907b8e5c7 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -528,7 +528,7 @@ void tools::getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
break;
case llvm::Triple::x86:
case llvm::Triple::x86_64:
- x86::getX86TargetFeatures(D, Triple, Args, Features, ForAS);
+ x86::getX86TargetFeatures(D, Triple, Args, Features);
break;
case llvm::Triple::hexagon:
hexagon::getHexagonTargetFeatures(D, Triple, Args, Features);
diff --git a/clang/lib/Frontend/FrontendAction.cpp b/clang/lib/Frontend/FrontendAction.cpp
index c6f958a6077b..0bd4b01ff79d 100644
--- a/clang/lib/Frontend/FrontendAction.cpp
+++ b/clang/lib/Frontend/FrontendAction.cpp
@@ -15,6 +15,7 @@
#include "clang/Basic/FileEntry.h"
#include "clang/Basic/LangStandard.h"
#include "clang/Basic/Sarif.h"
+#include "clang/Basic/Stack.h"
#include "clang/Frontend/ASTUnit.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
@@ -1150,6 +1151,10 @@ void ASTFrontendAction::ExecuteAction() {
CompilerInstance &CI = getCompilerInstance();
if (!CI.hasPreprocessor())
return;
+ // This is a fallback: If the client forgets to invoke this, we mark the
+ // current stack as the bottom. Though not optimal, this could help prevent
+ // stack overflow during deep recursion.
+ clang::noteBottomOfStack();
// FIXME: Move the truncation aspect of this into Sema, we delayed this till
// here so the source manager would be initialized.
diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp
index 423d5372a6f6..1cff4a75790e 100644
--- a/clang/lib/Sema/SemaExprCXX.cpp
+++ b/clang/lib/Sema/SemaExprCXX.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Type.h"
@@ -9072,16 +9073,24 @@ Sema::BuildExprRequirement(
MultiLevelTemplateArgumentList MLTAL(Param, TAL.asArray(),
/*Final=*/false);
MLTAL.addOuterRetainedLevels(TPL->getDepth());
- Expr *IDC = Param->getTypeConstraint()->getImmediatelyDeclaredConstraint();
+ const TypeConstraint *TC = Param->getTypeConstraint();
+ assert(TC && "Type Constraint cannot be null here");
+ auto *IDC = TC->getImmediatelyDeclaredConstraint();
+ assert(IDC && "ImmediatelyDeclaredConstraint can't be null here.");
ExprResult Constraint = SubstExpr(IDC, MLTAL);
if (Constraint.isInvalid()) {
- Status = concepts::ExprRequirement::SS_ExprSubstitutionFailure;
- } else {
- SubstitutedConstraintExpr =
- cast<ConceptSpecializationExpr>(Constraint.get());
- if (!SubstitutedConstraintExpr->isSatisfied())
- Status = concepts::ExprRequirement::SS_ConstraintsNotSatisfied;
- }
+ return new (Context) concepts::ExprRequirement(
+ concepts::createSubstDiagAt(*this, IDC->getExprLoc(),
+ [&](llvm::raw_ostream &OS) {
+ IDC->printPretty(OS, /*Helper=*/nullptr,
+ getPrintingPolicy());
+ }),
+ IsSimple, NoexceptLoc, ReturnTypeRequirement);
+ }
+ SubstitutedConstraintExpr =
+ cast<ConceptSpecializationExpr>(Constraint.get());
+ if (!SubstitutedConstraintExpr->isSatisfied())
+ Status = concepts::ExprRequirement::SS_ConstraintsNotSatisfied;
}
return new (Context) concepts::ExprRequirement(E, IsSimple, NoexceptLoc,
ReturnTypeRequirement, Status,
diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp
index 8702e2ca3a1b..394006a57747 100644
--- a/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -2276,9 +2276,9 @@ QualType TemplateInstantiator::TransformSubstTemplateTypeParmPackType(
getPackIndex(Pack), Arg, TL.getNameLoc());
}
-template<typename EntityPrinter>
static concepts::Requirement::SubstitutionDiagnostic *
-createSubstDiag(Sema &S, TemplateDeductionInfo &Info, EntityPrinter Printer) {
+createSubstDiag(Sema &S, TemplateDeductionInfo &Info,
+ concepts::EntityPrinter Printer) {
SmallString<128> Message;
SourceLocation ErrorLoc;
if (Info.hasSFINAEDiagnostic()) {
@@ -2302,6 +2302,19 @@ createSubstDiag(Sema &S, TemplateDeductionInfo &Info, EntityPrinter Printer) {
StringRef(MessageBuf, Message.size())};
}
+concepts::Requirement::SubstitutionDiagnostic *
+concepts::createSubstDiagAt(Sema &S, SourceLocation Location,
+ EntityPrinter Printer) {
+ SmallString<128> Entity;
+ llvm::raw_svector_ostream OS(Entity);
+ Printer(OS);
+ char *EntityBuf = new (S.Context) char[Entity.size()];
+ llvm::copy(Entity, EntityBuf);
+ return new (S.Context) concepts::Requirement::SubstitutionDiagnostic{
+ /*SubstitutedEntity=*/StringRef(EntityBuf, Entity.size()),
+ /*DiagLoc=*/Location, /*DiagMessage=*/StringRef()};
+}
+
ExprResult TemplateInstantiator::TransformRequiresTypeParams(
SourceLocation KWLoc, SourceLocation RBraceLoc, const RequiresExpr *RE,
RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> Params,
diff --git a/clang/lib/Tooling/Inclusions/Stdlib/StdSymbolMap.inc b/clang/lib/Tooling/Inclusions/Stdlib/StdSymbolMap.inc
index a08ec11e77a4..b46bd2e4d7a4 100644
--- a/clang/lib/Tooling/Inclusions/Stdlib/StdSymbolMap.inc
+++ b/clang/lib/Tooling/Inclusions/Stdlib/StdSymbolMap.inc
@@ -3773,6 +3773,33 @@ SYMBOL(viewable_range, std::ranges::, <ranges>)
SYMBOL(wistream_view, std::ranges::, <ranges>)
SYMBOL(zip_transform_view, std::ranges::, <ranges>)
SYMBOL(zip_view, std::ranges::, <ranges>)
+SYMBOL(all, std::ranges::views::, <ranges>)
+SYMBOL(all_t, std::ranges::views::, <ranges>)
+SYMBOL(as_const, std::ranges::views::, <ranges>)
+SYMBOL(as_rvalue, std::ranges::views::, <ranges>)
+SYMBOL(common, std::ranges::views::, <ranges>)
+SYMBOL(counted, std::ranges::views::, <ranges>)
+SYMBOL(drop, std::ranges::views::, <ranges>)
+SYMBOL(drop_while, std::ranges::views::, <ranges>)
+SYMBOL(elements, std::ranges::views::, <ranges>)
+SYMBOL(empty, std::ranges::views::, <ranges>)
+SYMBOL(filter, std::ranges::views::, <ranges>)
+SYMBOL(iota, std::ranges::views::, <ranges>)
+SYMBOL(istream, std::ranges::views::, <ranges>)
+SYMBOL(istream, std::ranges::views::, <iosfwd>)
+SYMBOL(join, std::ranges::views::, <ranges>)
+SYMBOL(join_with, std::ranges::views::, <ranges>)
+SYMBOL(keys, std::ranges::views::, <ranges>)
+SYMBOL(lazy_split, std::ranges::views::, <ranges>)
+SYMBOL(reverse, std::ranges::views::, <ranges>)
+SYMBOL(single, std::ranges::views::, <ranges>)
+SYMBOL(split, std::ranges::views::, <ranges>)
+SYMBOL(take, std::ranges::views::, <ranges>)
+SYMBOL(take_while, std::ranges::views::, <ranges>)
+SYMBOL(transform, std::ranges::views::, <ranges>)
+SYMBOL(values, std::ranges::views::, <ranges>)
+SYMBOL(zip, std::ranges::views::, <ranges>)
+SYMBOL(zip_transform, std::ranges::views::, <ranges>)
SYMBOL(ECMAScript, std::regex_constants::, <regex>)
SYMBOL(awk, std::regex_constants::, <regex>)
SYMBOL(basic, std::regex_constants::, <regex>)
@@ -3817,3 +3844,30 @@ SYMBOL(get_id, std::this_thread::, <thread>)
SYMBOL(sleep_for, std::this_thread::, <thread>)
SYMBOL(sleep_until, std::this_thread::, <thread>)
SYMBOL(yield, std::this_thread::, <thread>)
+SYMBOL(all, std::views::, <ranges>)
+SYMBOL(all_t, std::views::, <ranges>)
+SYMBOL(as_const, std::views::, <ranges>)
+SYMBOL(as_rvalue, std::views::, <ranges>)
+SYMBOL(common, std::views::, <ranges>)
+SYMBOL(counted, std::views::, <ranges>)
+SYMBOL(drop, std::views::, <ranges>)
+SYMBOL(drop_while, std::views::, <ranges>)
+SYMBOL(elements, std::views::, <ranges>)
+SYMBOL(empty, std::views::, <ranges>)
+SYMBOL(filter, std::views::, <ranges>)
+SYMBOL(iota, std::views::, <ranges>)
+SYMBOL(istream, std::views::, <ranges>)
+SYMBOL(istream, std::views::, <iosfwd>)
+SYMBOL(join, std::views::, <ranges>)
+SYMBOL(join_with, std::views::, <ranges>)
+SYMBOL(keys, std::views::, <ranges>)
+SYMBOL(lazy_split, std::views::, <ranges>)
+SYMBOL(reverse, std::views::, <ranges>)
+SYMBOL(single, std::views::, <ranges>)
+SYMBOL(split, std::views::, <ranges>)
+SYMBOL(take, std::views::, <ranges>)
+SYMBOL(take_while, std::views::, <ranges>)
+SYMBOL(transform, std::views::, <ranges>)
+SYMBOL(values, std::views::, <ranges>)
+SYMBOL(zip, std::views::, <ranges>)
+SYMBOL(zip_transform, std::views::, <ranges>)
diff --git a/compiler-rt/lib/builtins/aarch64/lse.S b/compiler-rt/lib/builtins/aarch64/lse.S
index 5dc0d5320b5a..1fe18f4a4681 100644
--- a/compiler-rt/lib/builtins/aarch64/lse.S
+++ b/compiler-rt/lib/builtins/aarch64/lse.S
@@ -7,7 +7,7 @@
// Out-of-line LSE atomics helpers. Ported from libgcc library.
// N = {1, 2, 4, 8}
// M = {1, 2, 4, 8, 16}
-// ORDER = {'relax', 'acq', 'rel', 'acq_rel'}
+// ORDER = {'relax', 'acq', 'rel', 'acq_rel', 'sync'}
// Routines implemented:
//
// iM __aarch64_casM_ORDER(iM expected, iM desired, iM *ptr)
@@ -35,8 +35,8 @@ HIDDEN(___aarch64_have_lse_atomics)
#endif
// Generate mnemonics for
-// L_cas: SIZE: 1,2,4,8,16 MODEL: 1,2,3,4
-// L_swp L_ldadd L_ldclr L_ldeor L_ldset: SIZE: 1,2,4,8 MODEL: 1,2,3,4
+// L_cas: SIZE: 1,2,4,8,16 MODEL: 1,2,3,4,5
+// L_swp L_ldadd L_ldclr L_ldeor L_ldset: SIZE: 1,2,4,8 MODEL: 1,2,3,4,5
#if SIZE == 1
#define S b
@@ -64,24 +64,44 @@ HIDDEN(___aarch64_have_lse_atomics)
#define L
#define M 0x000000
#define N 0x000000
+#define BARRIER
#elif MODEL == 2
#define SUFF _acq
#define A a
#define L
#define M 0x400000
#define N 0x800000
+#define BARRIER
#elif MODEL == 3
#define SUFF _rel
#define A
#define L l
#define M 0x008000
#define N 0x400000
+#define BARRIER
#elif MODEL == 4
#define SUFF _acq_rel
#define A a
#define L l
#define M 0x408000
#define N 0xc00000
+#define BARRIER
+#elif MODEL == 5
+#define SUFF _sync
+#ifdef L_swp
+// swp has _acq semantics.
+#define A a
+#define L
+#define M 0x400000
+#define N 0x800000
+#else
+// All other _sync functions have _seq semantics.
+#define A a
+#define L l
+#define M 0x408000
+#define N 0xc00000
+#endif
+#define BARRIER dmb ish
#else
#error
#endif // MODEL
@@ -96,7 +116,12 @@ HIDDEN(___aarch64_have_lse_atomics)
#endif
#define NAME(BASE) GLUE4(__aarch64_, BASE, SIZE, SUFF)
+#if MODEL == 5
+// Drop A for _sync functions.
+#define LDXR GLUE3(ld, xr, S)
+#else
#define LDXR GLUE4(ld, A, xr, S)
+#endif
#define STXR GLUE4(st, L, xr, S)
// Define temporary registers.
@@ -136,9 +161,15 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(cas))
STXR w(tmp1), s(1), [x2]
cbnz w(tmp1), 0b
1:
+ BARRIER
ret
#else
+#if MODEL == 5
+// Drop A for _sync functions.
+#define LDXP GLUE2(ld, xp)
+#else
#define LDXP GLUE3(ld, A, xp)
+#endif
#define STXP GLUE3(st, L, xp)
#ifdef HAS_ASM_LSE
#define CASP GLUE3(casp, A, L) x0, x1, x2, x3, [x4]
@@ -159,6 +190,7 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(cas))
STXP w(tmp2), x2, x3, [x4]
cbnz w(tmp2), 0b
1:
+ BARRIER
ret
#endif
END_COMPILERRT_OUTLINE_FUNCTION(NAME(cas))
@@ -180,6 +212,7 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(swp))
LDXR s(0), [x1]
STXR w(tmp1), s(tmp0), [x1]
cbnz w(tmp1), 0b
+ BARRIER
ret
END_COMPILERRT_OUTLINE_FUNCTION(NAME(swp))
#endif // L_swp
@@ -224,6 +257,7 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(LDNM))
OP s(tmp1), s(0), s(tmp0)
STXR w(tmp2), s(tmp1), [x1]
cbnz w(tmp2), 0b
+ BARRIER
ret
END_COMPILERRT_OUTLINE_FUNCTION(NAME(LDNM))
#endif // L_ldadd L_ldclr L_ldeor L_ldset
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
index 220abb89c3be..24485900644b 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
@@ -340,11 +340,19 @@ static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
size = 0;
}
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
- // For %ms/%mc, write the allocated output buffer as well.
+ // For %mc/%mC/%ms/%m[/%mS, write the allocated output buffer as well.
if (dir.allocate) {
- char *buf = *(char **)argp;
- if (buf)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);
+ if (char *buf = *(char **)argp) {
+ if (dir.convSpecifier == 'c')
+ size = 1;
+ else if (dir.convSpecifier == 'C')
+ size = sizeof(wchar_t);
+ else if (dir.convSpecifier == 'S')
+ size = (internal_wcslen((wchar_t *)buf) + 1) * sizeof(wchar_t);
+ else // 's' or '['
+ size = internal_strlen(buf) + 1;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, size);
+ }
}
}
}
diff --git a/libcxx/include/__config b/libcxx/include/__config
index 9759d3b9e8e0..43f8a20031ff 100644
--- a/libcxx/include/__config
+++ b/libcxx/include/__config
@@ -208,19 +208,16 @@
// HARDENING {
-// TODO(hardening): remove this in LLVM 18.
-// This is for backward compatibility -- make enabling `_LIBCPP_ENABLE_ASSERTIONS` (which predates hardening modes)
-// equivalent to setting the hardened mode.
-# ifdef _LIBCPP_ENABLE_ASSERTIONS
-# warning "_LIBCPP_ENABLE_ASSERTIONS is deprecated, please use _LIBCPP_ENABLE_HARDENED_MODE instead."
-# if _LIBCPP_ENABLE_ASSERTIONS != 0 && _LIBCPP_ENABLE_ASSERTIONS != 1
-# error "_LIBCPP_ENABLE_ASSERTIONS must be set to 0 or 1"
-# endif
-# if _LIBCPP_ENABLE_ASSERTIONS
-# define _LIBCPP_ENABLE_HARDENED_MODE 1
-# endif
+# ifndef _LIBCPP_ENABLE_ASSERTIONS
+# define _LIBCPP_ENABLE_ASSERTIONS _LIBCPP_ENABLE_ASSERTIONS_DEFAULT
+# endif
+# if _LIBCPP_ENABLE_ASSERTIONS != 0 && _LIBCPP_ENABLE_ASSERTIONS != 1
+# error "_LIBCPP_ENABLE_ASSERTIONS must be set to 0 or 1"
# endif
+// NOTE: These modes are experimental and are not stable yet in LLVM 17. Please refrain from using them and use the
+// documented libc++ "safe" mode instead.
+//
// Enables the hardened mode which consists of all checks intended to be used in production. Hardened mode prioritizes
// security-critical checks that can be done with relatively little overhead in constant time. Mutually exclusive with
// `_LIBCPP_ENABLE_DEBUG_MODE`.
@@ -275,6 +272,11 @@
# error "Only one of _LIBCPP_ENABLE_HARDENED_MODE and _LIBCPP_ENABLE_DEBUG_MODE can be enabled."
# endif
+# if _LIBCPP_ENABLE_ASSERTIONS && (_LIBCPP_ENABLE_HARDENED_MODE || _LIBCPP_ENABLE_DEBUG_MODE)
+# error \
+ "_LIBCPP_ENABLE_ASSERTIONS is mutually exclusive with _LIBCPP_ENABLE_HARDENED_MODE and _LIBCPP_ENABLE_DEBUG_MODE."
+# endif
+
// Hardened mode checks.
// clang-format off
@@ -303,6 +305,18 @@
# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSERT(expression, message)
# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSERT(expression, message)
+// Safe mode checks.
+
+# elif _LIBCPP_ENABLE_ASSERTIONS
+
+// All checks enabled.
+# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSERT(expression, message)
+# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSERT(expression, message)
+
// Disable all checks if hardening is not enabled.
# else
diff --git a/llvm/include/llvm/Analysis/LazyValueInfo.h b/llvm/include/llvm/Analysis/LazyValueInfo.h
index b109b7f7e65a..7b2bfdac75a8 100644
--- a/llvm/include/llvm/Analysis/LazyValueInfo.h
+++ b/llvm/include/llvm/Analysis/LazyValueInfo.h
@@ -115,6 +115,9 @@ public:
/// PredBB to OldSucc to be from PredBB to NewSucc instead.
void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc);
+ /// Remove information related to this value from the cache.
+ void forgetValue(Value *V);
+
/// Inform the analysis cache that we have erased a block.
void eraseBlock(BasicBlock *BB);
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index 33651783cb17..2ba6036056d9 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -465,6 +465,10 @@ public:
F.print(OS, &Writer);
}
+ /// This is part of the update interface to remove information related to this
+ /// value from the cache.
+ void forgetValue(Value *V) { TheCache.eraseValue(V); }
+
/// This is part of the update interface to inform the cache
/// that a block has been deleted.
void eraseBlock(BasicBlock *BB) {
@@ -1969,6 +1973,11 @@ void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
}
}
+void LazyValueInfo::forgetValue(Value *V) {
+ if (PImpl)
+ getImpl(PImpl, AC, nullptr).forgetValue(V);
+}
+
void LazyValueInfo::eraseBlock(BasicBlock *BB) {
if (PImpl) {
getImpl(PImpl, AC, BB->getModule()).eraseBlock(BB);
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 111d4d30aab9..39ab48b4a48e 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -6833,7 +6833,7 @@ const ConstantRange &ScalarEvolution::getRangeRef(
if (llvm::isKnownNonZero(V, DL))
MinVal = Align;
ConservativeResult = ConservativeResult.intersectWith(
- {MinVal, MaxVal + 1}, RangeType);
+ ConstantRange::getNonEmpty(MinVal, MaxVal + 1), RangeType);
}
}
diff --git a/llvm/lib/CodeGen/ComplexDeinterleavingPass.cpp b/llvm/lib/CodeGen/ComplexDeinterleavingPass.cpp
index 952f454f8f6a..7979ac9a5fb7 100644
--- a/llvm/lib/CodeGen/ComplexDeinterleavingPass.cpp
+++ b/llvm/lib/CodeGen/ComplexDeinterleavingPass.cpp
@@ -1424,7 +1424,17 @@ bool ComplexDeinterleavingGraph::identifyNodes(Instruction *RootI) {
// CompositeNode we should choose only one either Real or Imag instruction to
// use as an anchor for generating complex instruction.
auto It = RootToNode.find(RootI);
- if (It != RootToNode.end() && It->second->Real == RootI) {
+ if (It != RootToNode.end()) {
+ auto RootNode = It->second;
+ assert(RootNode->Operation ==
+ ComplexDeinterleavingOperation::ReductionOperation);
+ // Find out which part, Real or Imag, comes later, and only if we come to
+ // the latest part, add it to OrderedRoots.
+ auto *R = cast<Instruction>(RootNode->Real);
+ auto *I = cast<Instruction>(RootNode->Imag);
+ auto *ReplacementAnchor = R->comesBefore(I) ? I : R;
+ if (ReplacementAnchor != RootI)
+ return false;
OrderedRoots.push_back(RootI);
return true;
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 0605dfa63793..c7a6dd7deb45 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -13840,7 +13840,17 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
case Intrinsic::aarch64_neon_ld4:
case Intrinsic::aarch64_neon_ld1x2:
case Intrinsic::aarch64_neon_ld1x3:
- case Intrinsic::aarch64_neon_ld1x4:
+ case Intrinsic::aarch64_neon_ld1x4: {
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
+ Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
+ Info.ptrVal = I.getArgOperand(I.arg_size() - 1);
+ Info.offset = 0;
+ Info.align.reset();
+ // volatile loads with NEON intrinsics not supported
+ Info.flags = MachineMemOperand::MOLoad;
+ return true;
+ }
case Intrinsic::aarch64_neon_ld2lane:
case Intrinsic::aarch64_neon_ld3lane:
case Intrinsic::aarch64_neon_ld4lane:
@@ -13848,9 +13858,13 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
case Intrinsic::aarch64_neon_ld3r:
case Intrinsic::aarch64_neon_ld4r: {
Info.opc = ISD::INTRINSIC_W_CHAIN;
- // Conservatively set memVT to the entire set of vectors loaded.
- uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
- Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
+ // ldx return struct with the same vec type
+ Type *RetTy = I.getType();
+ auto *StructTy = cast<StructType>(RetTy);
+ unsigned NumElts = StructTy->getNumElements();
+ Type *VecTy = StructTy->getElementType(0);
+ MVT EleVT = MVT::getVT(VecTy).getVectorElementType();
+ Info.memVT = EVT::getVectorVT(I.getType()->getContext(), EleVT, NumElts);
Info.ptrVal = I.getArgOperand(I.arg_size() - 1);
Info.offset = 0;
Info.align.reset();
@@ -13863,20 +13877,40 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
case Intrinsic::aarch64_neon_st4:
case Intrinsic::aarch64_neon_st1x2:
case Intrinsic::aarch64_neon_st1x3:
- case Intrinsic::aarch64_neon_st1x4:
+ case Intrinsic::aarch64_neon_st1x4: {
+ Info.opc = ISD::INTRINSIC_VOID;
+ unsigned NumElts = 0;
+ for (const Value *Arg : I.args()) {
+ Type *ArgTy = Arg->getType();
+ if (!ArgTy->isVectorTy())
+ break;
+ NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
+ }
+ Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
+ Info.ptrVal = I.getArgOperand(I.arg_size() - 1);
+ Info.offset = 0;
+ Info.align.reset();
+ // volatile stores with NEON intrinsics not supported
+ Info.flags = MachineMemOperand::MOStore;
+ return true;
+ }
case Intrinsic::aarch64_neon_st2lane:
case Intrinsic::aarch64_neon_st3lane:
case Intrinsic::aarch64_neon_st4lane: {
Info.opc = ISD::INTRINSIC_VOID;
- // Conservatively set memVT to the entire set of vectors stored.
unsigned NumElts = 0;
+ // all the vector type is same
+ Type *VecTy = I.getArgOperand(0)->getType();
+ MVT EleVT = MVT::getVT(VecTy).getVectorElementType();
+
for (const Value *Arg : I.args()) {
Type *ArgTy = Arg->getType();
if (!ArgTy->isVectorTy())
break;
- NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
+ NumElts += 1;
}
- Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
+
+ Info.memVT = EVT::getVectorVT(I.getType()->getContext(), EleVT, NumElts);
Info.ptrVal = I.getArgOperand(I.arg_size() - 1);
Info.offset = 0;
Info.align.reset();
diff --git a/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/llvm/lib/Transforms/Scalar/JumpThreading.cpp
index 24390f1b54f6..5b8f1b00dc03 100644
--- a/llvm/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/llvm/lib/Transforms/Scalar/JumpThreading.cpp
@@ -1269,6 +1269,7 @@ bool JumpThreadingPass::simplifyPartiallyRedundantLoad(LoadInst *LoadI) {
if (IsLoadCSE) {
LoadInst *NLoadI = cast<LoadInst>(AvailableVal);
combineMetadataForCSE(NLoadI, LoadI, false);
+ LVI->forgetValue(NLoadI);
};
// If the returned value is the load itself, replace with poison. This can
@@ -1461,6 +1462,7 @@ bool JumpThreadingPass::simplifyPartiallyRedundantLoad(LoadInst *LoadI) {
for (LoadInst *PredLoadI : CSELoads) {
combineMetadataForCSE(PredLoadI, LoadI, true);
+ LVI->forgetValue(PredLoadI);
}
LoadI->replaceAllUsesWith(PN);
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index d7e40e8ef978..b603bbe55dc9 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -3781,10 +3781,44 @@ void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
// the incoming edges.
VPBasicBlock *Header =
State.Plan->getVectorLoopRegion()->getEntryBasicBlock();
+
+ // Gather all VPReductionPHIRecipe and sort them so that Intermediate stores
+ // sank outside of the loop would keep the same order as they had in the
+ // original loop.
+ SmallVector<VPReductionPHIRecipe *> ReductionPHIList;
for (VPRecipeBase &R : Header->phis()) {
if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
- fixReduction(ReductionPhi, State);
- else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
+ ReductionPHIList.emplace_back(ReductionPhi);
+ }
+ stable_sort(ReductionPHIList, [this](const VPReductionPHIRecipe *R1,
+ const VPReductionPHIRecipe *R2) {
+ auto *IS1 = R1->getRecurrenceDescriptor().IntermediateStore;
+ auto *IS2 = R2->getRecurrenceDescriptor().IntermediateStore;
+
+ // If neither of the recipes has an intermediate store, keep the order the
+ // same.
+ if (!IS1 && !IS2)
+ return false;
+
+ // If only one of the recipes has an intermediate store, then move it
+ // towards the beginning of the list.
+ if (IS1 && !IS2)
+ return true;
+
+ if (!IS1 && IS2)
+ return false;
+
+ // If both recipes have an intermediate store, then the recipe with the
+ // later store should be processed earlier. So it should go to the beginning
+ // of the list.
+ return DT->dominates(IS2, IS1);
+ });
+
+ for (VPReductionPHIRecipe *ReductionPhi : ReductionPHIList)
+ fixReduction(ReductionPhi, State);
+
+ for (VPRecipeBase &R : Header->phis()) {
+ if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
fixFixedOrderRecurrence(FOR, State);
}
}