aboutsummaryrefslogtreecommitdiff
path: root/llvm/include/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/include/llvm')
-rw-r--r--llvm/include/llvm/ADT/ScopedHashTable.h13
-rw-r--r--llvm/include/llvm/ADT/StringMap.h30
-rw-r--r--llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h8
-rw-r--r--llvm/include/llvm/Analysis/GlobalsModRef.h6
-rw-r--r--llvm/include/llvm/Analysis/InstSimplifyFolder.h17
-rw-r--r--llvm/include/llvm/Analysis/ScalarEvolution.h6
-rw-r--r--llvm/include/llvm/Analysis/TargetFolder.h53
-rw-r--r--llvm/include/llvm/Analysis/TargetTransformInfo.h14
-rw-r--r--llvm/include/llvm/Analysis/TargetTransformInfoImpl.h4
-rw-r--r--llvm/include/llvm/Analysis/ValueTracking.h6
-rw-r--r--llvm/include/llvm/Analysis/VectorUtils.h6
-rw-r--r--llvm/include/llvm/AsmParser/LLToken.h5
-rw-r--r--llvm/include/llvm/BinaryFormat/ELF.h1
-rw-r--r--llvm/include/llvm/Bitcode/LLVMBitCodes.h5
-rw-r--r--llvm/include/llvm/Bitstream/BitstreamWriter.h4
-rw-r--r--llvm/include/llvm/CodeGen/AsmPrinter.h7
-rw-r--r--llvm/include/llvm/CodeGen/BasicTTIImpl.h2
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h4
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h21
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h3
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h41
-rw-r--r--llvm/include/llvm/CodeGen/ISDOpcodes.h7
-rw-r--r--llvm/include/llvm/CodeGen/MachineScheduler.h2
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAGISel.h1
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAGNodes.h4
-rw-r--r--llvm/include/llvm/CodeGen/TargetLowering.h3
-rw-r--r--llvm/include/llvm/DWARFLinker/DWARFLinker.h3
-rw-r--r--llvm/include/llvm/DebugInfo/Symbolize/Symbolize.h2
-rw-r--r--llvm/include/llvm/Debuginfod/Debuginfod.h83
-rw-r--r--llvm/include/llvm/Debuginfod/HTTPServer.h123
-rw-r--r--llvm/include/llvm/ExecutionEngine/JITLink/COFF.h39
-rw-r--r--llvm/include/llvm/ExecutionEngine/JITLink/COFF_x86_64.h38
-rw-r--r--llvm/include/llvm/ExecutionEngine/JITLink/aarch64.h14
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/MachOPlatform.h3
-rw-r--r--llvm/include/llvm/Frontend/OpenACC/ACC.td5
-rw-r--r--llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h23
-rw-r--r--llvm/include/llvm/Frontend/OpenMP/OMPKinds.def10
-rw-r--r--llvm/include/llvm/IR/Attributes.td4
-rw-r--r--llvm/include/llvm/IR/Constant.h4
-rw-r--r--llvm/include/llvm/IR/ConstantFolder.h50
-rw-r--r--llvm/include/llvm/IR/Constants.h36
-rw-r--r--llvm/include/llvm/IR/FixedMetadataKinds.def1
-rw-r--r--llvm/include/llvm/IR/GlobalValue.h48
-rw-r--r--llvm/include/llvm/IR/IRBuilder.h24
-rw-r--r--llvm/include/llvm/IR/IRBuilderFolder.h10
-rw-r--r--llvm/include/llvm/IR/InlineAsm.h9
-rw-r--r--llvm/include/llvm/IR/Instructions.h12
-rw-r--r--llvm/include/llvm/IR/Intrinsics.td2
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAArch64.td43
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAMDGPU.td2
-rw-r--r--llvm/include/llvm/IR/IntrinsicsDirectX.td10
-rw-r--r--llvm/include/llvm/IR/IntrinsicsX86.td6
-rw-r--r--llvm/include/llvm/IR/Metadata.h20
-rw-r--r--llvm/include/llvm/IR/Module.h4
-rw-r--r--llvm/include/llvm/IR/ModuleSummaryIndex.h2
-rw-r--r--llvm/include/llvm/IR/NoFolder.h18
-rw-r--r--llvm/include/llvm/IR/OptBisect.h4
-rw-r--r--llvm/include/llvm/LTO/Config.h6
-rw-r--r--llvm/include/llvm/MC/MCDwarf.h4
-rw-r--r--llvm/include/llvm/MC/MCSymbolWasm.h10
-rw-r--r--llvm/include/llvm/MC/MCSymbolXCOFF.h2
-rw-r--r--llvm/include/llvm/ObjCopy/CommonConfig.h1
-rw-r--r--llvm/include/llvm/Object/Decompressor.h4
-rw-r--r--llvm/include/llvm/Support/ARMTargetParser.def3
-rw-r--r--llvm/include/llvm/Support/Allocator.h21
-rw-r--r--llvm/include/llvm/Support/AllocatorBase.h22
-rw-r--r--llvm/include/llvm/Support/Casting.h4
-rw-r--r--llvm/include/llvm/Support/CodeGen.h7
-rw-r--r--llvm/include/llvm/Support/Compression.h23
-rw-r--r--llvm/include/llvm/Support/ConvertUTF.h2
-rw-r--r--llvm/include/llvm/Support/Error.h2
-rw-r--r--llvm/include/llvm/Support/TargetOpcodes.def2
-rw-r--r--llvm/include/llvm/Support/X86TargetParser.def1
-rw-r--r--llvm/include/llvm/Target/GenericOpcodes.td2
-rw-r--r--llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td4
-rw-r--r--llvm/include/llvm/Target/TargetSelectionDAG.td58
-rw-r--r--llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h2
-rw-r--r--llvm/include/llvm/Transforms/Utils/Debugify.h66
-rw-r--r--llvm/include/llvm/Transforms/Utils/LoopUtils.h8
-rw-r--r--llvm/include/llvm/Transforms/Utils/ModuleUtils.h3
-rw-r--r--llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h7
81 files changed, 886 insertions, 303 deletions
diff --git a/llvm/include/llvm/ADT/ScopedHashTable.h b/llvm/include/llvm/ADT/ScopedHashTable.h
index 48544961d095..78d4df7d5684 100644
--- a/llvm/include/llvm/ADT/ScopedHashTable.h
+++ b/llvm/include/llvm/ADT/ScopedHashTable.h
@@ -147,7 +147,9 @@ public:
};
template <typename K, typename V, typename KInfo, typename AllocatorTy>
-class ScopedHashTable {
+class ScopedHashTable : detail::AllocatorHolder<AllocatorTy> {
+ using AllocTy = detail::AllocatorHolder<AllocatorTy>;
+
public:
/// ScopeTy - This is a helpful typedef that allows clients to get easy access
/// to the name of the scope for this hash table.
@@ -162,11 +164,9 @@ private:
DenseMap<K, ValTy*, KInfo> TopLevelMap;
ScopeTy *CurScope = nullptr;
- AllocatorTy Allocator;
-
public:
ScopedHashTable() = default;
- ScopedHashTable(AllocatorTy A) : Allocator(A) {}
+ ScopedHashTable(AllocatorTy A) : AllocTy(A) {}
ScopedHashTable(const ScopedHashTable &) = delete;
ScopedHashTable &operator=(const ScopedHashTable &) = delete;
@@ -175,8 +175,7 @@ public:
}
/// Access to the allocator.
- AllocatorTy &getAllocator() { return Allocator; }
- const AllocatorTy &getAllocator() const { return Allocator; }
+ using AllocTy::getAllocator;
/// Return 1 if the specified key is in the table, 0 otherwise.
size_type count(const K &Key) const {
@@ -217,7 +216,7 @@ public:
assert(S && "No scope active!");
ScopedHashTableVal<K, V> *&KeyEntry = TopLevelMap[Key];
KeyEntry = ValTy::Create(S->getLastValInScope(), KeyEntry, Key, Val,
- Allocator);
+ getAllocator());
S->setLastValInScope(KeyEntry);
}
};
diff --git a/llvm/include/llvm/ADT/StringMap.h b/llvm/include/llvm/ADT/StringMap.h
index 23248093c67e..81f2626eea72 100644
--- a/llvm/include/llvm/ADT/StringMap.h
+++ b/llvm/include/llvm/ADT/StringMap.h
@@ -107,8 +107,9 @@ public:
/// funky memory allocation and hashing things to make it extremely efficient,
/// storing the string data *after* the value in the map.
template <typename ValueTy, typename AllocatorTy = MallocAllocator>
-class StringMap : public StringMapImpl {
- AllocatorTy Allocator;
+class StringMap : public StringMapImpl,
+ private detail::AllocatorHolder<AllocatorTy> {
+ using AllocTy = detail::AllocatorHolder<AllocatorTy>;
public:
using MapEntryTy = StringMapEntry<ValueTy>;
@@ -119,12 +120,11 @@ public:
: StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))) {}
explicit StringMap(AllocatorTy A)
- : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), Allocator(A) {
- }
+ : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), AllocTy(A) {}
StringMap(unsigned InitialSize, AllocatorTy A)
: StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))),
- Allocator(A) {}
+ AllocTy(A) {}
StringMap(std::initializer_list<std::pair<StringRef, ValueTy>> List)
: StringMapImpl(List.size(), static_cast<unsigned>(sizeof(MapEntryTy))) {
@@ -132,11 +132,11 @@ public:
}
StringMap(StringMap &&RHS)
- : StringMapImpl(std::move(RHS)), Allocator(std::move(RHS.Allocator)) {}
+ : StringMapImpl(std::move(RHS)), AllocTy(std::move(RHS.getAllocator())) {}
StringMap(const StringMap &RHS)
: StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))),
- Allocator(RHS.Allocator) {
+ AllocTy(RHS.getAllocator()) {
if (RHS.empty())
return;
@@ -156,7 +156,7 @@ public:
}
TheTable[I] = MapEntryTy::Create(
- static_cast<MapEntryTy *>(Bucket)->getKey(), Allocator,
+ static_cast<MapEntryTy *>(Bucket)->getKey(), getAllocator(),
static_cast<MapEntryTy *>(Bucket)->getValue());
HashTable[I] = RHSHashTable[I];
}
@@ -171,7 +171,7 @@ public:
StringMap &operator=(StringMap RHS) {
StringMapImpl::swap(RHS);
- std::swap(Allocator, RHS.Allocator);
+ std::swap(getAllocator(), RHS.getAllocator());
return *this;
}
@@ -183,15 +183,14 @@ public:
for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
StringMapEntryBase *Bucket = TheTable[I];
if (Bucket && Bucket != getTombstoneVal()) {
- static_cast<MapEntryTy *>(Bucket)->Destroy(Allocator);
+ static_cast<MapEntryTy *>(Bucket)->Destroy(getAllocator());
}
}
}
free(TheTable);
}
- AllocatorTy &getAllocator() { return Allocator; }
- const AllocatorTy &getAllocator() const { return Allocator; }
+ using AllocTy::getAllocator;
using key_type = const char *;
using mapped_type = ValueTy;
@@ -336,7 +335,8 @@ public:
if (Bucket == getTombstoneVal())
--NumTombstones;
- Bucket = MapEntryTy::Create(Key, Allocator, std::forward<ArgsTy>(Args)...);
+ Bucket =
+ MapEntryTy::Create(Key, getAllocator(), std::forward<ArgsTy>(Args)...);
++NumItems;
assert(NumItems + NumTombstones <= NumBuckets);
@@ -354,7 +354,7 @@ public:
for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
StringMapEntryBase *&Bucket = TheTable[I];
if (Bucket && Bucket != getTombstoneVal()) {
- static_cast<MapEntryTy *>(Bucket)->Destroy(Allocator);
+ static_cast<MapEntryTy *>(Bucket)->Destroy(getAllocator());
}
Bucket = nullptr;
}
@@ -370,7 +370,7 @@ public:
void erase(iterator I) {
MapEntryTy &V = *I;
remove(&V);
- V.Destroy(Allocator);
+ V.Destroy(getAllocator());
}
bool erase(StringRef Key) {
diff --git a/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h b/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h
index d8e524d7cb80..8addbde40c4f 100644
--- a/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h
+++ b/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h
@@ -1278,9 +1278,9 @@ bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
}
LLVM_DEBUG(dbgs() << getBlockName(HeaderNode)
<< " has irr loop header weight "
- << HeaderWeight.getValue() << "\n");
+ << HeaderWeight.value() << "\n");
NumHeadersWithWeight++;
- uint64_t HeaderWeightValue = HeaderWeight.getValue();
+ uint64_t HeaderWeightValue = HeaderWeight.value();
if (!MinHeaderWeight || HeaderWeightValue < MinHeaderWeight)
MinHeaderWeight = HeaderWeightValue;
if (HeaderWeightValue) {
@@ -1732,10 +1732,10 @@ raw_ostream &BlockFrequencyInfoImpl<BT>::print(raw_ostream &OS) const {
if (Optional<uint64_t> ProfileCount =
BlockFrequencyInfoImplBase::getBlockProfileCount(
F->getFunction(), getNode(&BB)))
- OS << ", count = " << ProfileCount.getValue();
+ OS << ", count = " << ProfileCount.value();
if (Optional<uint64_t> IrrLoopHeaderWeight =
BB.getIrrLoopHeaderWeight())
- OS << ", irr_loop_header_weight = " << IrrLoopHeaderWeight.getValue();
+ OS << ", irr_loop_header_weight = " << IrrLoopHeaderWeight.value();
OS << "\n";
}
diff --git a/llvm/include/llvm/Analysis/GlobalsModRef.h b/llvm/include/llvm/Analysis/GlobalsModRef.h
index 4d8ed10bb18e..62095a1d6ad2 100644
--- a/llvm/include/llvm/Analysis/GlobalsModRef.h
+++ b/llvm/include/llvm/Analysis/GlobalsModRef.h
@@ -102,16 +102,12 @@ public:
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
AAQueryInfo &AAQI);
+ using AAResultBase::getModRefBehavior;
/// getModRefBehavior - Return the behavior of the specified function if
/// called from the specified call site. The call site may be null in which
/// case the most generic behavior of this function should be returned.
FunctionModRefBehavior getModRefBehavior(const Function *F);
- /// getModRefBehavior - Return the behavior of the specified function if
- /// called from the specified call site. The call site may be null in which
- /// case the most generic behavior of this function should be returned.
- FunctionModRefBehavior getModRefBehavior(const CallBase *Call);
-
private:
FunctionInfo *getFunctionInfo(const Function *F);
diff --git a/llvm/include/llvm/Analysis/InstSimplifyFolder.h b/llvm/include/llvm/Analysis/InstSimplifyFolder.h
index d4ea7d73ec92..16bd9f765421 100644
--- a/llvm/include/llvm/Analysis/InstSimplifyFolder.h
+++ b/llvm/include/llvm/Analysis/InstSimplifyFolder.h
@@ -67,6 +67,11 @@ public:
return simplifyBinOp(Opc, LHS, RHS, FMF, SQ);
}
+ Value *FoldUnOpFMF(Instruction::UnaryOps Opc, Value *V,
+ FastMathFlags FMF) const override {
+ return simplifyUnOp(Opc, V, FMF, SQ);
+ }
+
Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
return simplifyICmpInst(P, LHS, RHS, SQ);
}
@@ -108,18 +113,6 @@ public:
}
//===--------------------------------------------------------------------===//
- // Unary Operators
- //===--------------------------------------------------------------------===//
-
- Value *CreateFNeg(Constant *C) const override {
- return ConstFolder.CreateFNeg(C);
- }
-
- Value *CreateUnOp(Instruction::UnaryOps Opc, Constant *C) const override {
- return ConstFolder.CreateUnOp(Opc, C);
- }
-
- //===--------------------------------------------------------------------===//
// Cast/Conversion Operators
//===--------------------------------------------------------------------===//
diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index de1cc299f062..5b49ab14286b 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -535,8 +535,10 @@ public:
/// Parse NSW/NUW flags from add/sub/mul IR binary operation \p Op into
/// SCEV no-wrap flags, and deduce flag[s] that aren't known yet.
- /// Does not mutate the original instruction.
- std::pair<SCEV::NoWrapFlags, bool /*Deduced*/>
+ /// Does not mutate the original instruction. Returns None if it could not
+ /// deduce more precise flags than the instruction already has, otherwise
+ /// returns proven flags.
+ Optional<SCEV::NoWrapFlags>
getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO);
/// Notify this ScalarEvolution that \p User directly uses SCEVs in \p Ops.
diff --git a/llvm/include/llvm/Analysis/TargetFolder.h b/llvm/include/llvm/Analysis/TargetFolder.h
index 3a7218b10b97..c42577330e9b 100644
--- a/llvm/include/llvm/Analysis/TargetFolder.h
+++ b/llvm/include/llvm/Analysis/TargetFolder.h
@@ -55,8 +55,11 @@ public:
Value *RHS) const override {
auto *LC = dyn_cast<Constant>(LHS);
auto *RC = dyn_cast<Constant>(RHS);
- if (LC && RC)
- return Fold(ConstantExpr::get(Opc, LC, RC));
+ if (LC && RC) {
+ if (ConstantExpr::isDesirableBinOp(Opc))
+ return Fold(ConstantExpr::get(Opc, LC, RC));
+ return ConstantFoldBinaryOpOperands(Opc, LC, RC, DL);
+ }
return nullptr;
}
@@ -64,9 +67,12 @@ public:
bool IsExact) const override {
auto *LC = dyn_cast<Constant>(LHS);
auto *RC = dyn_cast<Constant>(RHS);
- if (LC && RC)
- return Fold(ConstantExpr::get(
- Opc, LC, RC, IsExact ? PossiblyExactOperator::IsExact : 0));
+ if (LC && RC) {
+ if (ConstantExpr::isDesirableBinOp(Opc))
+ return Fold(ConstantExpr::get(
+ Opc, LC, RC, IsExact ? PossiblyExactOperator::IsExact : 0));
+ return ConstantFoldBinaryOpOperands(Opc, LC, RC, DL);
+ }
return nullptr;
}
@@ -75,12 +81,15 @@ public:
auto *LC = dyn_cast<Constant>(LHS);
auto *RC = dyn_cast<Constant>(RHS);
if (LC && RC) {
- unsigned Flags = 0;
- if (HasNUW)
- Flags |= OverflowingBinaryOperator::NoUnsignedWrap;
- if (HasNSW)
- Flags |= OverflowingBinaryOperator::NoSignedWrap;
- return Fold(ConstantExpr::get(Opc, LC, RC, Flags));
+ if (ConstantExpr::isDesirableBinOp(Opc)) {
+ unsigned Flags = 0;
+ if (HasNUW)
+ Flags |= OverflowingBinaryOperator::NoUnsignedWrap;
+ if (HasNSW)
+ Flags |= OverflowingBinaryOperator::NoSignedWrap;
+ return Fold(ConstantExpr::get(Opc, LC, RC, Flags));
+ }
+ return ConstantFoldBinaryOpOperands(Opc, LC, RC, DL);
}
return nullptr;
}
@@ -89,11 +98,19 @@ public:
FastMathFlags FMF) const override {
return FoldBinOp(Opc, LHS, RHS);
}
+
Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
auto *LC = dyn_cast<Constant>(LHS);
auto *RC = dyn_cast<Constant>(RHS);
if (LC && RC)
- return ConstantExpr::getCompare(P, LC, RC);
+ return Fold(ConstantExpr::getCompare(P, LC, RC));
+ return nullptr;
+ }
+
+ Value *FoldUnOpFMF(Instruction::UnaryOps Opc, Value *V,
+ FastMathFlags FMF) const override {
+ if (Constant *C = dyn_cast<Constant>(V))
+ return Fold(ConstantExpr::get(Opc, C));
return nullptr;
}
@@ -165,18 +182,6 @@ public:
}
//===--------------------------------------------------------------------===//
- // Unary Operators
- //===--------------------------------------------------------------------===//
-
- Constant *CreateFNeg(Constant *C) const override {
- return Fold(ConstantExpr::getFNeg(C));
- }
-
- Constant *CreateUnOp(Instruction::UnaryOps Opc, Constant *C) const override {
- return Fold(ConstantExpr::get(Opc, C));
- }
-
- //===--------------------------------------------------------------------===//
// Cast/Conversion Operators
//===--------------------------------------------------------------------===//
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 372f17cfc7ff..c64cb51cc08e 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -160,6 +160,8 @@ public:
bool skipScalarizationCost() const { return ScalarizationCost.isValid(); }
};
+enum class PredicationStyle { None, Data, DataAndControlFlow };
+
class TargetTransformInfo;
typedef TargetTransformInfo TTI;
@@ -531,8 +533,12 @@ public:
const LoopAccessInfo *LAI) const;
/// Query the target whether lowering of the llvm.get.active.lane.mask
- /// intrinsic is supported.
- bool emitGetActiveLaneMask() const;
+ /// intrinsic is supported and how the mask should be used. A return value
+ /// of PredicationStyle::Data indicates the mask is used as data only,
+ /// whereas PredicationStyle::DataAndControlFlow indicates we should also use
+ /// the mask for control flow in the loop. If unsupported the return value is
+ /// PredicationStyle::None.
+ PredicationStyle emitGetActiveLaneMask() const;
// Parameters that control the loop peeling transformation
struct PeelingPreferences {
@@ -1553,7 +1559,7 @@ public:
preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
AssumptionCache &AC, TargetLibraryInfo *TLI,
DominatorTree *DT, const LoopAccessInfo *LAI) = 0;
- virtual bool emitGetActiveLaneMask() = 0;
+ virtual PredicationStyle emitGetActiveLaneMask() = 0;
virtual Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
IntrinsicInst &II) = 0;
virtual Optional<Value *>
@@ -1932,7 +1938,7 @@ public:
const LoopAccessInfo *LAI) override {
return Impl.preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LAI);
}
- bool emitGetActiveLaneMask() override {
+ PredicationStyle emitGetActiveLaneMask() override {
return Impl.emitGetActiveLaneMask();
}
Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index a70c418974f5..af71fc9bffaf 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -167,8 +167,8 @@ public:
return false;
}
- bool emitGetActiveLaneMask() const {
- return false;
+ PredicationStyle emitGetActiveLaneMask() const {
+ return PredicationStyle::None;
}
Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h
index 3b29bf1d53b4..7cb1a91d8c93 100644
--- a/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/llvm/include/llvm/Analysis/ValueTracking.h
@@ -458,7 +458,7 @@ constexpr unsigned MaxAnalysisRecursionDepth = 6;
///
/// This method can return true for instructions that read memory;
/// for such instructions, moving them may change the resulting value.
- bool isSafeToSpeculativelyExecute(const Value *V,
+ bool isSafeToSpeculativelyExecute(const Instruction *I,
const Instruction *CtxI = nullptr,
const DominatorTree *DT = nullptr,
const TargetLibraryInfo *TLI = nullptr);
@@ -481,8 +481,8 @@ constexpr unsigned MaxAnalysisRecursionDepth = 6;
/// This behavior is a shortcoming in the current implementation and not
/// intentional.
bool isSafeToSpeculativelyExecuteWithOpcode(
- unsigned Opcode, const Operator *Inst, const Instruction *CtxI = nullptr,
- const DominatorTree *DT = nullptr,
+ unsigned Opcode, const Instruction *Inst,
+ const Instruction *CtxI = nullptr, const DominatorTree *DT = nullptr,
const TargetLibraryInfo *TLI = nullptr);
/// Returns true if the result or effects of the given instructions \p I
diff --git a/llvm/include/llvm/Analysis/VectorUtils.h b/llvm/include/llvm/Analysis/VectorUtils.h
index 0005874ba040..fa0892788b43 100644
--- a/llvm/include/llvm/Analysis/VectorUtils.h
+++ b/llvm/include/llvm/Analysis/VectorUtils.h
@@ -236,10 +236,10 @@ class VFDatabase {
// ensuring that the variant described in the attribute has a
// corresponding definition or declaration of the vector
// function in the Module M.
- if (Shape && (Shape.getValue().ScalarName == ScalarName)) {
- assert(CI.getModule()->getFunction(Shape.getValue().VectorName) &&
+ if (Shape && (Shape.value().ScalarName == ScalarName)) {
+ assert(CI.getModule()->getFunction(Shape.value().VectorName) &&
"Vector function is missing.");
- Mappings.push_back(Shape.getValue());
+ Mappings.push_back(Shape.value());
}
}
}
diff --git a/llvm/include/llvm/AsmParser/LLToken.h b/llvm/include/llvm/AsmParser/LLToken.h
index 230a1662cc04..04235f0fdc4e 100644
--- a/llvm/include/llvm/AsmParser/LLToken.h
+++ b/llvm/include/llvm/AsmParser/LLToken.h
@@ -221,6 +221,8 @@ enum Kind {
kw_min,
kw_umax,
kw_umin,
+ kw_fmax,
+ kw_fmin,
// Instruction Opcodes (Opcode in UIntVal).
kw_fneg,
@@ -397,9 +399,6 @@ enum Kind {
// GV's with __attribute__((no_sanitize("hwaddress"))), or things in
// -fsanitize-ignorelist when built with HWASan.
kw_no_sanitize_hwaddress,
- // GV's with __attribute__((no_sanitize("memtag"))), or things in
- // -fsanitize-ignorelist when built with memory tagging.
- kw_no_sanitize_memtag,
// GV's where the clang++ frontend (when ASan is used) notes that this is
// dynamically initialized, and thus needs ODR detection.
kw_sanitize_address_dyninit,
diff --git a/llvm/include/llvm/BinaryFormat/ELF.h b/llvm/include/llvm/BinaryFormat/ELF.h
index 1e0ef613788d..1fd025761127 100644
--- a/llvm/include/llvm/BinaryFormat/ELF.h
+++ b/llvm/include/llvm/BinaryFormat/ELF.h
@@ -990,6 +990,7 @@ enum : unsigned {
// backward-compatibility).
SHT_LLVM_CALL_GRAPH_PROFILE = 0x6fff4c09, // LLVM Call Graph Profile.
SHT_LLVM_BB_ADDR_MAP = 0x6fff4c0a, // LLVM Basic Block Address Map.
+ SHT_LLVM_OFFLOADING = 0x6fff4c0b, // LLVM device offloading data.
// Android's experimental support for SHT_RELR sections.
// https://android.googlesource.com/platform/bionic/+/b7feec74547f84559a1467aca02708ff61346d2a/libc/include/elf.h#512
SHT_ANDROID_RELR = 0x6fffff00, // Relocation entries; only offsets.
diff --git a/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/llvm/include/llvm/Bitcode/LLVMBitCodes.h
index 5d96204ba42a..eee4c50cc13b 100644
--- a/llvm/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/llvm/include/llvm/Bitcode/LLVMBitCodes.h
@@ -458,7 +458,9 @@ enum RMWOperations {
RMW_UMAX = 9,
RMW_UMIN = 10,
RMW_FADD = 11,
- RMW_FSUB = 12
+ RMW_FSUB = 12,
+ RMW_FMAX = 13,
+ RMW_FMIN = 14
};
/// OverflowingBinaryOperatorOptionalFlags - Flags for serializing
@@ -686,6 +688,7 @@ enum AttributeKindCodes {
ATTR_KIND_ALLOCATED_POINTER = 81,
ATTR_KIND_ALLOC_KIND = 82,
ATTR_KIND_PRESPLIT_COROUTINE = 83,
+ ATTR_KIND_FNRETTHUNK_EXTERN = 84,
};
enum ComdatSelectionKindCodes {
diff --git a/llvm/include/llvm/Bitstream/BitstreamWriter.h b/llvm/include/llvm/Bitstream/BitstreamWriter.h
index be6bab5532bd..143f9ba17afe 100644
--- a/llvm/include/llvm/Bitstream/BitstreamWriter.h
+++ b/llvm/include/llvm/Bitstream/BitstreamWriter.h
@@ -386,12 +386,12 @@ private:
const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i++);
if (Op.isLiteral())
- EmitAbbreviatedLiteral(Op, Code.getValue());
+ EmitAbbreviatedLiteral(Op, Code.value());
else {
assert(Op.getEncoding() != BitCodeAbbrevOp::Array &&
Op.getEncoding() != BitCodeAbbrevOp::Blob &&
"Expected literal or scalar");
- EmitAbbreviatedField(Op, Code.getValue());
+ EmitAbbreviatedField(Op, Code.value());
}
}
diff --git a/llvm/include/llvm/CodeGen/AsmPrinter.h b/llvm/include/llvm/CodeGen/AsmPrinter.h
index fb4627c029b0..5e900e9162d8 100644
--- a/llvm/include/llvm/CodeGen/AsmPrinter.h
+++ b/llvm/include/llvm/CodeGen/AsmPrinter.h
@@ -41,6 +41,7 @@ class DIEAbbrev;
class DwarfDebug;
class GCMetadataPrinter;
class GCStrategy;
+class GlobalAlias;
class GlobalObject;
class GlobalValue;
class GlobalVariable;
@@ -474,7 +475,11 @@ public:
virtual const MCExpr *lowerConstant(const Constant *CV);
/// Print a general LLVM constant to the .s file.
- void emitGlobalConstant(const DataLayout &DL, const Constant *CV);
+ /// On AIX, when an alias refers to a sub-element of a global variable, the
+ /// label of that alias needs to be emitted before the corresponding element.
+ using AliasMapTy = DenseMap<uint64_t, SmallVector<const GlobalAlias *, 1>>;
+ void emitGlobalConstant(const DataLayout &DL, const Constant *CV,
+ AliasMapTy *AliasList = nullptr);
/// Unnamed constant global variables solely contaning a pointer to
/// another globals variable act like a global variable "proxy", or GOT
diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index 46be8e030406..b5b766ff03f1 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -607,7 +607,7 @@ public:
return BaseT::preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LAI);
}
- bool emitGetActiveLaneMask() {
+ PredicationStyle emitGetActiveLaneMask() {
return BaseT::emitGetActiveLaneMask();
}
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
index 8ea45e576e4d..44ba81223ec3 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
@@ -196,6 +196,10 @@ enum {
/// - PredicateID - The ID of the predicate function to call
GIM_CheckCxxInsnPredicate,
+ /// Check if there's no use of the first result.
+ /// - InsnID - Instruction ID
+ GIM_CheckHasNoUse,
+
/// Check the type for the specified operand
/// - InsnID - Instruction ID
/// - OpIdx - Operand index
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
index c06b33d11170..1229dfcb2c31 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
@@ -379,6 +379,25 @@ bool InstructionSelector::executeMatchTable(
return false;
break;
}
+ case GIM_CheckHasNoUse: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckHasNoUse(MIs["
+ << InsnID << "]\n");
+
+ const MachineInstr *MI = State.MIs[InsnID];
+ assert(MI && "Used insn before defined");
+ assert(MI->getNumDefs() > 0 && "No defs");
+ const Register Res = MI->getOperand(0).getReg();
+
+ if (!MRI.use_nodbg_empty(Res)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+
+ break;
+ }
case GIM_CheckAtomicOrdering: {
int64_t InsnID = MatchTable[CurrentIdx++];
AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
@@ -675,7 +694,7 @@ bool InstructionSelector::executeMatchTable(
(ISel.*ISelInfo.ComplexPredicates[ComplexPredicateID])(
State.MIs[InsnID]->getOperand(OpIdx));
if (Renderer)
- State.Renderers[RendererID] = Renderer.getValue();
+ State.Renderers[RendererID] = Renderer.value();
else
if (handleReject() == RejectAndGiveUp)
return false;
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index c6c57ac07f0e..caa6346a40db 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -327,9 +327,6 @@ public:
LegalizeResult reduceLoadStoreWidth(GLoadStore &MI, unsigned TypeIdx,
LLT NarrowTy);
- LegalizeResult fewerElementsVectorSextInReg(MachineInstr &MI, unsigned TypeIdx,
- LLT NarrowTy);
-
LegalizeResult narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
LLT HalfTy, LLT ShiftAmtTy);
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index 16ba568c1be9..01fd5d94d371 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -683,6 +683,13 @@ public:
MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op,
bool IsFP);
+ // Build and insert \p Res = G_SEXT_INREG \p Op, 1 or \p Res = G_AND \p Op, 1,
+ // or COPY depending on how the target wants to extend boolean values, using
+ // the original register size.
+ MachineInstrBuilder buildBoolExtInReg(const DstOp &Res, const SrcOp &Op,
+ bool IsVector,
+ bool IsFP);
+
/// Build and insert \p Res = G_ZEXT \p Op
///
/// G_ZEXT produces a register of the specified width, with bits 0 to
@@ -1401,6 +1408,40 @@ public:
const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
MachineMemOperand &MMO);
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_FMAX Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the floating point maximum of
+ /// \p Val and the original value. Puts the original value from \p Addr in \p
+ /// OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWFMax(
+ const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
+ MachineMemOperand &MMO);
+
+ /// Build and insert `OldValRes<def> = G_ATOMICRMW_FMIN Addr, Val, MMO`.
+ ///
+ /// Atomically replace the value at \p Addr with the floating point minimum of
+ /// \p Val and the original value. Puts the original value from \p Addr in \p
+ /// OldValRes.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p OldValRes must be a generic virtual register.
+ /// \pre \p Addr must be a generic virtual register with pointer type.
+ /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
+ /// same type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildAtomicRMWFMin(
+ const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
+ MachineMemOperand &MMO);
+
/// Build and insert `G_FENCE Ordering, Scope`.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope);
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 120f89952a95..14bbcd24d04d 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -1195,6 +1195,8 @@ enum NodeType {
ATOMIC_LOAD_UMAX,
ATOMIC_LOAD_FADD,
ATOMIC_LOAD_FSUB,
+ ATOMIC_LOAD_FMAX,
+ ATOMIC_LOAD_FMIN,
// Masked load and store - consecutive vector load and store operations
// with additional mask operand that prevents memory accesses to the
@@ -1285,6 +1287,11 @@ enum NodeType {
VECREDUCE_UMAX,
VECREDUCE_UMIN,
+ // The `llvm.experimental.stackmap` intrinsic.
+ // Operands: input chain, glue, <id>, <numShadowBytes>, [live0[, live1...]]
+ // Outputs: output chain, glue
+ STACKMAP,
+
// Vector Predication
#define BEGIN_REGISTER_VP_SDNODE(VPSDID, ...) VPSDID,
#include "llvm/IR/VPIntrinsics.def"
diff --git a/llvm/include/llvm/CodeGen/MachineScheduler.h b/llvm/include/llvm/CodeGen/MachineScheduler.h
index 0554eb1ab77e..8000c9db428d 100644
--- a/llvm/include/llvm/CodeGen/MachineScheduler.h
+++ b/llvm/include/llvm/CodeGen/MachineScheduler.h
@@ -103,8 +103,10 @@ extern cl::opt<bool> ForceBottomUp;
extern cl::opt<bool> VerifyScheduling;
#ifndef NDEBUG
extern cl::opt<bool> ViewMISchedDAGs;
+extern cl::opt<bool> PrintDAGs;
#else
extern const bool ViewMISchedDAGs;
+extern const bool PrintDAGs;
#endif
class AAResults;
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGISel.h b/llvm/include/llvm/CodeGen/SelectionDAGISel.h
index 35fb0bc80593..87df6d1b1604 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGISel.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGISel.h
@@ -321,6 +321,7 @@ private:
void Select_FREEZE(SDNode *N);
void Select_ARITH_FENCE(SDNode *N);
+ void Select_STACKMAP(SDNode *N);
private:
void DoInstructionSelection();
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 5974f13a296b..c531ddf8e906 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -1403,6 +1403,8 @@ public:
case ISD::ATOMIC_LOAD_UMAX:
case ISD::ATOMIC_LOAD_FADD:
case ISD::ATOMIC_LOAD_FSUB:
+ case ISD::ATOMIC_LOAD_FMAX:
+ case ISD::ATOMIC_LOAD_FMIN:
case ISD::ATOMIC_LOAD:
case ISD::ATOMIC_STORE:
case ISD::MLOAD:
@@ -1468,6 +1470,8 @@ public:
N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_FMAX ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_FMIN ||
N->getOpcode() == ISD::ATOMIC_LOAD ||
N->getOpcode() == ISD::ATOMIC_STORE;
}
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 98b9a416ea59..ab5d3ba0164d 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -546,6 +546,9 @@ public:
return BypassSlowDivWidths;
}
+ /// Return true only if vscale must be a power of two.
+ virtual bool isVScaleKnownToBeAPowerOfTwo() const { return false; }
+
/// Return true if Flow Control is an expensive operation that should be
/// avoided.
bool isJumpExpensive() const { return JumpIsExpensive; }
diff --git a/llvm/include/llvm/DWARFLinker/DWARFLinker.h b/llvm/include/llvm/DWARFLinker/DWARFLinker.h
index 0b2e033bd97a..b2b2e2e873be 100644
--- a/llvm/include/llvm/DWARFLinker/DWARFLinker.h
+++ b/llvm/include/llvm/DWARFLinker/DWARFLinker.h
@@ -692,9 +692,6 @@ private:
bool getDIENames(const DWARFDie &Die, AttributesInfo &Info,
OffsetsStringPool &StringPool, bool StripTemplate = false);
- /// Create a copy of abbreviation Abbrev.
- void copyAbbrev(const DWARFAbbreviationDeclaration &Abbrev, bool hasODR);
-
uint32_t hashFullyQualifiedName(DWARFDie DIE, CompileUnit &U,
const DWARFFile &File,
int RecurseDepth = 0);
diff --git a/llvm/include/llvm/DebugInfo/Symbolize/Symbolize.h b/llvm/include/llvm/DebugInfo/Symbolize/Symbolize.h
index 00c4bf0a615f..f233a183912b 100644
--- a/llvm/include/llvm/DebugInfo/Symbolize/Symbolize.h
+++ b/llvm/include/llvm/DebugInfo/Symbolize/Symbolize.h
@@ -243,6 +243,8 @@ private:
std::function<void()> Evictor;
};
+Optional<ArrayRef<uint8_t>> getBuildID(const ELFObjectFileBase *Obj);
+
} // end namespace symbolize
} // end namespace llvm
diff --git a/llvm/include/llvm/Debuginfod/Debuginfod.h b/llvm/include/llvm/Debuginfod/Debuginfod.h
index 064cfa75b1a1..496b24cfa37e 100644
--- a/llvm/include/llvm/Debuginfod/Debuginfod.h
+++ b/llvm/include/llvm/Debuginfod/Debuginfod.h
@@ -7,23 +7,32 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// This file contains the declarations of getCachedOrDownloadArtifact and
-/// several convenience functions for specific artifact types:
-/// getCachedOrDownloadSource, getCachedOrDownloadExecutable, and
-/// getCachedOrDownloadDebuginfo. This file also declares
-/// getDefaultDebuginfodUrls and getDefaultDebuginfodCacheDirectory.
-///
+/// This file contains several declarations for the debuginfod client and
+/// server. The client functions are getDefaultDebuginfodUrls,
+/// getCachedOrDownloadArtifact, and several convenience functions for specific
+/// artifact types: getCachedOrDownloadSource, getCachedOrDownloadExecutable,
+/// and getCachedOrDownloadDebuginfo. For the server, this file declares the
+/// DebuginfodLogEntry and DebuginfodServer structs, as well as the
+/// DebuginfodLog, DebuginfodCollection classes.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFOD_DEBUGINFOD_H
#define LLVM_DEBUGINFOD_DEBUGINFOD_H
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Debuginfod/HTTPServer.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/RWMutex.h"
+#include "llvm/Support/Timer.h"
#include <chrono>
+#include <condition_variable>
+#include <queue>
namespace llvm {
@@ -68,6 +77,68 @@ Expected<std::string> getCachedOrDownloadArtifact(
StringRef UniqueKey, StringRef UrlPath, StringRef CacheDirectoryPath,
ArrayRef<StringRef> DebuginfodUrls, std::chrono::milliseconds Timeout);
+class ThreadPool;
+
+struct DebuginfodLogEntry {
+ std::string Message;
+ DebuginfodLogEntry() = default;
+ DebuginfodLogEntry(const Twine &Message);
+};
+
+class DebuginfodLog {
+ std::mutex QueueMutex;
+ std::condition_variable QueueCondition;
+ std::queue<DebuginfodLogEntry> LogEntryQueue;
+
+public:
+ // Adds a log entry to end of the queue.
+ void push(DebuginfodLogEntry Entry);
+ // Adds a log entry to end of the queue.
+ void push(const Twine &Message);
+ // Blocks until there are log entries in the queue, then pops and returns the
+ // first one.
+ DebuginfodLogEntry pop();
+};
+
+/// Tracks a collection of debuginfod artifacts on the local filesystem.
+class DebuginfodCollection {
+ SmallVector<std::string, 1> Paths;
+ sys::RWMutex BinariesMutex;
+ StringMap<std::string> Binaries;
+ sys::RWMutex DebugBinariesMutex;
+ StringMap<std::string> DebugBinaries;
+ Error findBinaries(StringRef Path);
+ Expected<Optional<std::string>> getDebugBinaryPath(BuildIDRef);
+ Expected<Optional<std::string>> getBinaryPath(BuildIDRef);
+ // If the collection has not been updated since MinInterval, call update() and
+ // return true. Otherwise return false. If update returns an error, return the
+ // error.
+ Expected<bool> updateIfStale();
+ DebuginfodLog &Log;
+ ThreadPool &Pool;
+ Timer UpdateTimer;
+ sys::Mutex UpdateMutex;
+
+ // Minimum update interval, in seconds, for on-demand updates triggered when a
+ // build-id is not found.
+ double MinInterval;
+
+public:
+ DebuginfodCollection(ArrayRef<StringRef> Paths, DebuginfodLog &Log,
+ ThreadPool &Pool, double MinInterval);
+ Error update();
+ Error updateForever(std::chrono::milliseconds Interval);
+ Expected<std::string> findDebugBinaryPath(BuildIDRef);
+ Expected<std::string> findBinaryPath(BuildIDRef);
+};
+
+struct DebuginfodServer {
+ HTTPServer Server;
+ DebuginfodLog &Log;
+ DebuginfodCollection &Collection;
+ DebuginfodServer(DebuginfodLog &Log, DebuginfodCollection &Collection);
+};
+
} // end namespace llvm
#endif
diff --git a/llvm/include/llvm/Debuginfod/HTTPServer.h b/llvm/include/llvm/Debuginfod/HTTPServer.h
new file mode 100644
index 000000000000..410ba32b3f2e
--- /dev/null
+++ b/llvm/include/llvm/Debuginfod/HTTPServer.h
@@ -0,0 +1,123 @@
+//===-- llvm/Debuginfod/HTTPServer.h - HTTP server library ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the declarations of the HTTPServer and HTTPServerRequest
+/// classes, the HTTPResponse, and StreamingHTTPResponse structs, and the
+/// streamFile function.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_HTTP_SERVER_H
+#define LLVM_SUPPORT_HTTP_SERVER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+
+#ifdef LLVM_ENABLE_HTTPLIB
+// forward declarations
+namespace httplib {
+class Request;
+class Response;
+class Server;
+} // namespace httplib
+#endif
+
+namespace llvm {
+
+struct HTTPResponse;
+struct StreamingHTTPResponse;
+class HTTPServer;
+
+class HTTPServerRequest {
+ friend HTTPServer;
+
+#ifdef LLVM_ENABLE_HTTPLIB
+private:
+ HTTPServerRequest(const httplib::Request &HTTPLibRequest,
+ httplib::Response &HTTPLibResponse);
+ httplib::Response &HTTPLibResponse;
+#endif
+
+public:
+ std::string UrlPath;
+ /// The elements correspond to match groups in the url path matching regex.
+ SmallVector<std::string, 1> UrlPathMatches;
+
+ // TODO bring in HTTP headers
+
+ void setResponse(StreamingHTTPResponse Response);
+ void setResponse(HTTPResponse Response);
+};
+
+struct HTTPResponse {
+ unsigned Code;
+ const char *ContentType;
+ StringRef Body;
+};
+
+typedef std::function<void(HTTPServerRequest &)> HTTPRequestHandler;
+
+/// An HTTPContentProvider is called by the HTTPServer to obtain chunks of the
+/// streaming response body. The returned chunk should be located at Offset
+/// bytes and have Length bytes.
+typedef std::function<StringRef(size_t /*Offset*/, size_t /*Length*/)>
+ HTTPContentProvider;
+
+/// Wraps the content provider with HTTP Status code and headers.
+struct StreamingHTTPResponse {
+ unsigned Code;
+ const char *ContentType;
+ size_t ContentLength;
+ HTTPContentProvider Provider;
+ /// Called after the response transfer is complete with the success value of
+ /// the transfer.
+ std::function<void(bool)> CompletionHandler = [](bool Success) {};
+};
+
+/// Sets the response to stream the file at FilePath, if available, and
+/// otherwise an HTTP 404 error response.
+bool streamFile(HTTPServerRequest &Request, StringRef FilePath);
+
+/// An HTTP server which can listen on a single TCP/IP port for HTTP
+/// requests and delgate them to the appropriate registered handler.
+class HTTPServer {
+#ifdef LLVM_ENABLE_HTTPLIB
+ std::unique_ptr<httplib::Server> Server;
+ unsigned Port = 0;
+#endif
+public:
+ HTTPServer();
+ ~HTTPServer();
+
+ /// Returns true only if LLVM has been compiled with a working HTTPServer.
+ static bool isAvailable();
+
+ /// Registers a URL pattern routing rule. When the server is listening, each
+ /// request is dispatched to the first registered handler whose UrlPathPattern
+ /// matches the UrlPath.
+ Error get(StringRef UrlPathPattern, HTTPRequestHandler Handler);
+
+ /// Attempts to assign the requested port and interface, returning an Error
+ /// upon failure.
+ Error bind(unsigned Port, const char *HostInterface = "0.0.0.0");
+
+ /// Attempts to assign any available port and interface, returning either the
+ /// port number or an Error upon failure.
+ Expected<unsigned> bind(const char *HostInterface = "0.0.0.0");
+
+ /// Attempts to listen for requests on the bound port. Returns an Error if
+ /// called before binding a port.
+ Error listen();
+
+ /// If the server is listening, stop and unbind the socket.
+ void stop();
+};
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_HTTP_SERVER_H
diff --git a/llvm/include/llvm/ExecutionEngine/JITLink/COFF.h b/llvm/include/llvm/ExecutionEngine/JITLink/COFF.h
new file mode 100644
index 000000000000..87d3648d37e8
--- /dev/null
+++ b/llvm/include/llvm/ExecutionEngine/JITLink/COFF.h
@@ -0,0 +1,39 @@
+//===------- COFF.h - Generic JIT link function for COFF ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic jit-link functions for COFF.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_JITLINK_COFF_H
+#define LLVM_EXECUTIONENGINE_JITLINK_COFF_H
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+
+namespace llvm {
+namespace jitlink {
+
+/// Create a LinkGraph from an COFF relocatable object.
+///
+/// Note: The graph does not take ownership of the underlying buffer, nor copy
+/// its contents. The caller is responsible for ensuring that the object buffer
+/// outlives the graph.
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromCOFFObject(MemoryBufferRef ObjectBuffer);
+
+/// Link the given graph.
+///
+/// Uses conservative defaults for GOT and stub handling based on the target
+/// platform.
+void link_COFF(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx);
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_JITLINK_COFF_H
diff --git a/llvm/include/llvm/ExecutionEngine/JITLink/COFF_x86_64.h b/llvm/include/llvm/ExecutionEngine/JITLink/COFF_x86_64.h
new file mode 100644
index 000000000000..fff32d6d9609
--- /dev/null
+++ b/llvm/include/llvm/ExecutionEngine/JITLink/COFF_x86_64.h
@@ -0,0 +1,38 @@
+//===--- COFF_x86_64.h - JIT link functions for COFF/x86-64 ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// jit-link functions for COFF/x86-64.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_JITLINK_COFF_X86_64_H
+#define LLVM_EXECUTIONENGINE_JITLINK_COFF_X86_64_H
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+
+namespace llvm {
+namespace jitlink {
+
+/// Create a LinkGraph from an COFF/x86-64 relocatable object.
+///
+/// Note: The graph does not take ownership of the underlying buffer, nor copy
+/// its contents. The caller is responsible for ensuring that the object buffer
+/// outlives the graph.
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromCOFFObject_x86_64(MemoryBufferRef ObjectBuffer);
+
+/// jit-link the given object buffer, which must be a COFF x86-64 object file.
+void link_COFF_x86_64(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx);
+
+/// Return the string name of the given COFF x86-64 edge kind.
+const char *getCOFFX86RelocationKindName(Edge::Kind R);
+} // end namespace jitlink
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_JITLINK_COFF_X86_64_H
diff --git a/llvm/include/llvm/ExecutionEngine/JITLink/aarch64.h b/llvm/include/llvm/ExecutionEngine/JITLink/aarch64.h
index 53ff6c7a219e..f2c3fba7bcde 100644
--- a/llvm/include/llvm/ExecutionEngine/JITLink/aarch64.h
+++ b/llvm/include/llvm/ExecutionEngine/JITLink/aarch64.h
@@ -33,7 +33,9 @@ enum EdgeKind_aarch64 : Edge::Kind {
GOTPageOffset12,
TLVPage21,
TLVPageOffset12,
- PointerToGOT,
+ TLSDescPage21,
+ TLSDescPageOffset12,
+ Delta32ToGOT,
PairedAddend,
LDRLiteral19,
Delta32,
@@ -223,10 +225,12 @@ inline Error applyFixup(LinkGraph &G, Block &B, const Edge &E) {
break;
}
case TLVPage21:
- case GOTPage21:
case TLVPageOffset12:
+ case TLSDescPage21:
+ case TLSDescPageOffset12:
+ case GOTPage21:
case GOTPageOffset12:
- case PointerToGOT: {
+ case Delta32ToGOT: {
return make_error<JITLinkError>(
"In graph " + G.getName() + ", section " + B.getSection().getName() +
"GOT/TLV edge kinds not lowered: " + getEdgeKindName(E.getKind()));
@@ -273,8 +277,8 @@ public:
"RawInstr isn't a 64-bit LDR immediate");
break;
}
- case aarch64::PointerToGOT: {
- KindToSet = aarch64::Delta64;
+ case aarch64::Delta32ToGOT: {
+ KindToSet = aarch64::Delta32;
break;
}
default:
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/MachOPlatform.h b/llvm/include/llvm/ExecutionEngine/Orc/MachOPlatform.h
index 141dd73548c8..57ffe250a19d 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/MachOPlatform.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/MachOPlatform.h
@@ -138,9 +138,6 @@ private:
using InitSymbolDepMap =
DenseMap<MaterializationResponsibility *, JITLinkSymbolSet>;
- void addEHAndTLVSupportPasses(MaterializationResponsibility &MR,
- jitlink::PassConfiguration &Config);
-
Error associateJITDylibHeaderSymbol(jitlink::LinkGraph &G,
MaterializationResponsibility &MR);
diff --git a/llvm/include/llvm/Frontend/OpenACC/ACC.td b/llvm/include/llvm/Frontend/OpenACC/ACC.td
index c68330665b02..45d815894454 100644
--- a/llvm/include/llvm/Frontend/OpenACC/ACC.td
+++ b/llvm/include/llvm/Frontend/OpenACC/ACC.td
@@ -128,10 +128,8 @@ def ACCC_DeviceResident : Clause<"device_resident"> {
// 2.4
def ACCC_DeviceType : Clause<"device_type"> {
- let flangClass = "ScalarIntExpr";
+ let flangClass = "AccDeviceTypeExprList";
let defaultValue = "*";
- let isValueOptional = true;
- let isValueList = true;
}
// 2.6.6
@@ -218,6 +216,7 @@ def ACCC_Reduction : Clause<"reduction"> {
// 2.5.6
def ACCC_Self : Clause<"self"> {
let flangClass = "AccSelfClause";
+ let isValueOptional = true;
}
// 2.9.5
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
index 8a6b1c7d412d..3dfcabffb58a 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
+++ b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
@@ -599,9 +599,9 @@ public:
/// Add metadata to simd-ize a loop.
///
- /// \param DL Debug location for instructions added by unrolling.
- /// \param Loop The loop to simd-ize.
- void applySimd(DebugLoc DL, CanonicalLoopInfo *Loop);
+ /// \param Loop The loop to simd-ize.
+ /// \param Simdlen The Simdlen length to apply to the simd loop.
+ void applySimd(CanonicalLoopInfo *Loop, ConstantInt *Simdlen);
/// Generator for '#omp flush'
///
@@ -821,6 +821,23 @@ public:
omp::Directive CanceledDirective,
FinalizeCallbackTy ExitCB = {});
+ /// Generate a target region entry call.
+ ///
+ /// \param Loc The location at which the request originated and is fulfilled.
+ /// \param Return Return value of the created function returned by reference.
+ /// \param DeviceID Identifier for the device via the 'device' clause.
+ /// \param NumTeams Numer of teams for the region via the 'num_teams' clause
+ /// or 0 if unspecified and -1 if there is no 'teams' clause.
+ /// \param NumThreads Number of threads via the 'thread_limit' clause.
+ /// \param HostPtr Pointer to the host-side pointer of the target kernel.
+ /// \param KernelArgs Array of arguments to the kernel.
+ /// \param NoWaitKernelArgs Optional array of arguments to the nowait kernel.
+ InsertPointTy emitTargetKernel(const LocationDescription &Loc, Value *&Return,
+ Value *Ident, Value *DeviceID, Value *NumTeams,
+ Value *NumThreads, Value *HostPtr,
+ ArrayRef<Value *> KernelArgs,
+ ArrayRef<Value *> NoWaitArgs = {});
+
/// Generate a barrier runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def b/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
index 14aa53a6b08d..9d1ab57729b7 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
+++ b/llvm/include/llvm/Frontend/OpenMP/OMPKinds.def
@@ -88,6 +88,8 @@ __OMP_ARRAY_TYPE(KmpCriticalName, Int32, 8)
__OMP_STRUCT_TYPE(Ident, ident_t, Int32, Int32, Int32, Int32, Int8Ptr)
__OMP_STRUCT_TYPE(OffloadEntry, __tgt_offload_entry, Int8Ptr, Int8Ptr, SizeTy,
Int32, Int32)
+__OMP_STRUCT_TYPE(KernelArgs, __tgt_kernel_arguments, Int32, Int32, VoidPtrPtr,
+ VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr, Int64)
__OMP_STRUCT_TYPE(AsyncInfo, __tgt_async_info, Int8Ptr)
#undef __OMP_STRUCT_TYPE
@@ -412,6 +414,10 @@ __OMP_RTL(__tgt_target_teams_mapper, false, Int32, IdentPtr, Int64, VoidPtr, Int
__OMP_RTL(__tgt_target_teams_nowait_mapper, false, Int32, IdentPtr, Int64,
VoidPtr, Int32, VoidPtrPtr, VoidPtrPtr, Int64Ptr, Int64Ptr,
VoidPtrPtr, VoidPtrPtr, Int32, Int32, Int32, VoidPtr, Int32, VoidPtr)
+__OMP_RTL(__tgt_target_kernel, false, Int32, IdentPtr, Int64, Int32, Int32,
+ VoidPtr, KernelArgsPtr)
+__OMP_RTL(__tgt_target_kernel_nowait, false, Int32, IdentPtr, Int64, Int32,
+ Int32, VoidPtr, KernelArgsPtr, Int32, VoidPtr, Int32, VoidPtr)
__OMP_RTL(__tgt_register_requires, false, Void, Int64)
__OMP_RTL(__tgt_target_data_begin_mapper, false, Void, IdentPtr, Int64, Int32, VoidPtrPtr,
VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr)
@@ -937,6 +943,10 @@ __OMP_RTL_ATTRS(__tgt_target_teams_mapper, ForkAttrs, AttributeSet(),
ParamAttrs())
__OMP_RTL_ATTRS(__tgt_target_teams_nowait_mapper, ForkAttrs, AttributeSet(),
ParamAttrs())
+__OMP_RTL_ATTRS(__tgt_target_kernel, ForkAttrs, AttributeSet(),
+ ParamAttrs())
+__OMP_RTL_ATTRS(__tgt_target_kernel_nowait, ForkAttrs, AttributeSet(),
+ ParamAttrs())
__OMP_RTL_ATTRS(__tgt_register_requires, ForkAttrs, AttributeSet(),
ParamAttrs())
__OMP_RTL_ATTRS(__tgt_target_data_begin_mapper, ForkAttrs, AttributeSet(),
diff --git a/llvm/include/llvm/IR/Attributes.td b/llvm/include/llvm/IR/Attributes.td
index 7b955b40b0a8..ea4bf80205f8 100644
--- a/llvm/include/llvm/IR/Attributes.td
+++ b/llvm/include/llvm/IR/Attributes.td
@@ -102,6 +102,10 @@ def DisableSanitizerInstrumentation: EnumAttr<"disable_sanitizer_instrumentation
/// Provide pointer element type to intrinsic.
def ElementType : TypeAttr<"elementtype", [ParamAttr]>;
+/// Whether to keep return instructions, or replace with a jump to an external
+/// symbol.
+def FnRetThunkExtern : EnumAttr<"fn_ret_thunk_extern", [FnAttr]>;
+
/// Function may only access memory that is inaccessible from IR.
def InaccessibleMemOnly : EnumAttr<"inaccessiblememonly", [FnAttr]>;
diff --git a/llvm/include/llvm/IR/Constant.h b/llvm/include/llvm/IR/Constant.h
index a97372ebbad2..09fb2c98bff4 100644
--- a/llvm/include/llvm/IR/Constant.h
+++ b/llvm/include/llvm/IR/Constant.h
@@ -115,10 +115,6 @@ public:
/// any constant expressions.
bool containsConstantExpression() const;
- /// Return true if evaluation of this constant could trap. This is true for
- /// things like constant expressions that could divide by zero.
- bool canTrap() const;
-
/// Return true if the value can vary between threads.
bool isThreadDependent() const;
diff --git a/llvm/include/llvm/IR/ConstantFolder.h b/llvm/include/llvm/IR/ConstantFolder.h
index 5e7ddb9aa673..bd28ff87965d 100644
--- a/llvm/include/llvm/IR/ConstantFolder.h
+++ b/llvm/include/llvm/IR/ConstantFolder.h
@@ -44,8 +44,11 @@ public:
Value *RHS) const override {
auto *LC = dyn_cast<Constant>(LHS);
auto *RC = dyn_cast<Constant>(RHS);
- if (LC && RC)
- return ConstantExpr::get(Opc, LC, RC);
+ if (LC && RC) {
+ if (ConstantExpr::isDesirableBinOp(Opc))
+ return ConstantExpr::get(Opc, LC, RC);
+ return ConstantFoldBinaryInstruction(Opc, LC, RC);
+ }
return nullptr;
}
@@ -53,9 +56,12 @@ public:
bool IsExact) const override {
auto *LC = dyn_cast<Constant>(LHS);
auto *RC = dyn_cast<Constant>(RHS);
- if (LC && RC)
- return ConstantExpr::get(Opc, LC, RC,
- IsExact ? PossiblyExactOperator::IsExact : 0);
+ if (LC && RC) {
+ if (ConstantExpr::isDesirableBinOp(Opc))
+ return ConstantExpr::get(Opc, LC, RC,
+ IsExact ? PossiblyExactOperator::IsExact : 0);
+ return ConstantFoldBinaryInstruction(Opc, LC, RC);
+ }
return nullptr;
}
@@ -64,12 +70,15 @@ public:
auto *LC = dyn_cast<Constant>(LHS);
auto *RC = dyn_cast<Constant>(RHS);
if (LC && RC) {
- unsigned Flags = 0;
- if (HasNUW)
- Flags |= OverflowingBinaryOperator::NoUnsignedWrap;
- if (HasNSW)
- Flags |= OverflowingBinaryOperator::NoSignedWrap;
- return ConstantExpr::get(Opc, LC, RC, Flags);
+ if (ConstantExpr::isDesirableBinOp(Opc)) {
+ unsigned Flags = 0;
+ if (HasNUW)
+ Flags |= OverflowingBinaryOperator::NoUnsignedWrap;
+ if (HasNSW)
+ Flags |= OverflowingBinaryOperator::NoSignedWrap;
+ return ConstantExpr::get(Opc, LC, RC, Flags);
+ }
+ return ConstantFoldBinaryInstruction(Opc, LC, RC);
}
return nullptr;
}
@@ -79,6 +88,13 @@ public:
return FoldBinOp(Opc, LHS, RHS);
}
+ Value *FoldUnOpFMF(Instruction::UnaryOps Opc, Value *V,
+ FastMathFlags FMF) const override {
+ if (Constant *C = dyn_cast<Constant>(V))
+ return ConstantExpr::get(Opc, C);
+ return nullptr;
+ }
+
Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
auto *LC = dyn_cast<Constant>(LHS);
auto *RC = dyn_cast<Constant>(RHS);
@@ -155,18 +171,6 @@ public:
}
//===--------------------------------------------------------------------===//
- // Unary Operators
- //===--------------------------------------------------------------------===//
-
- Constant *CreateFNeg(Constant *C) const override {
- return ConstantExpr::getFNeg(C);
- }
-
- Constant *CreateUnOp(Instruction::UnaryOps Opc, Constant *C) const override {
- return ConstantExpr::get(Opc, C);
- }
-
- //===--------------------------------------------------------------------===//
// Cast/Conversion Operators
//===--------------------------------------------------------------------===//
diff --git a/llvm/include/llvm/IR/Constants.h b/llvm/include/llvm/IR/Constants.h
index b5445ff71b74..c50dff43dc74 100644
--- a/llvm/include/llvm/IR/Constants.h
+++ b/llvm/include/llvm/IR/Constants.h
@@ -1017,19 +1017,10 @@ public:
static Constant *getNot(Constant *C);
static Constant *getAdd(Constant *C1, Constant *C2, bool HasNUW = false,
bool HasNSW = false);
- static Constant *getFAdd(Constant *C1, Constant *C2);
static Constant *getSub(Constant *C1, Constant *C2, bool HasNUW = false,
bool HasNSW = false);
- static Constant *getFSub(Constant *C1, Constant *C2);
static Constant *getMul(Constant *C1, Constant *C2, bool HasNUW = false,
bool HasNSW = false);
- static Constant *getFMul(Constant *C1, Constant *C2);
- static Constant *getUDiv(Constant *C1, Constant *C2, bool isExact = false);
- static Constant *getSDiv(Constant *C1, Constant *C2, bool isExact = false);
- static Constant *getFDiv(Constant *C1, Constant *C2);
- static Constant *getURem(Constant *C1, Constant *C2);
- static Constant *getSRem(Constant *C1, Constant *C2);
- static Constant *getFRem(Constant *C1, Constant *C2);
static Constant *getAnd(Constant *C1, Constant *C2);
static Constant *getOr(Constant *C1, Constant *C2);
static Constant *getXor(Constant *C1, Constant *C2);
@@ -1093,14 +1084,6 @@ public:
return getShl(C1, C2, true, false);
}
- static Constant *getExactSDiv(Constant *C1, Constant *C2) {
- return getSDiv(C1, C2, true);
- }
-
- static Constant *getExactUDiv(Constant *C1, Constant *C2) {
- return getUDiv(C1, C2, true);
- }
-
static Constant *getExactAShr(Constant *C1, Constant *C2) {
return getAShr(C1, C2, true);
}
@@ -1201,10 +1184,6 @@ public:
/// Return true if this is a compare constant expression
bool isCompare() const;
- /// Return true if this is an insertvalue or extractvalue expression,
- /// and the getIndices() method may be used.
- bool hasIndices() const;
-
/// Select constant expr
///
/// \param OnlyIfReducedTy see \a getWithOperands() docs.
@@ -1294,9 +1273,6 @@ public:
static Constant *getShuffleVector(Constant *V1, Constant *V2,
ArrayRef<int> Mask,
Type *OnlyIfReducedTy = nullptr);
- static Constant *getInsertValue(Constant *Agg, Constant *Val,
- ArrayRef<unsigned> Idxs,
- Type *OnlyIfReducedTy = nullptr);
/// Return the opcode at the root of this constant expression
unsigned getOpcode() const { return getSubclassDataFromValue(); }
@@ -1305,10 +1281,6 @@ public:
/// FCMP constant expression.
unsigned getPredicate() const;
- /// Assert that this is an insertvalue or exactvalue
- /// expression and return the list of indices.
- ArrayRef<unsigned> getIndices() const;
-
/// Assert that this is a shufflevector and return the mask. See class
/// ShuffleVectorInst for a description of the mask representation.
ArrayRef<int> getShuffleMask() const;
@@ -1352,6 +1324,14 @@ public:
/// would make it harder to remove ConstantExprs altogether.
Instruction *getAsInstruction(Instruction *InsertBefore = nullptr) const;
+ /// Whether creating a constant expression for this binary operator is
+ /// desirable.
+ static bool isDesirableBinOp(unsigned Opcode);
+
+ /// Whether creating a constant expression for this binary operator is
+ /// supported.
+ static bool isSupportedBinOp(unsigned Opcode);
+
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V) {
return V->getValueID() == ConstantExprVal;
diff --git a/llvm/include/llvm/IR/FixedMetadataKinds.def b/llvm/include/llvm/IR/FixedMetadataKinds.def
index 7c32c5d13760..1d24f527df7b 100644
--- a/llvm/include/llvm/IR/FixedMetadataKinds.def
+++ b/llvm/include/llvm/IR/FixedMetadataKinds.def
@@ -44,3 +44,4 @@ LLVM_FIXED_MD_KIND(MD_noundef, "noundef", 29)
LLVM_FIXED_MD_KIND(MD_annotation, "annotation", 30)
LLVM_FIXED_MD_KIND(MD_nosanitize, "nosanitize", 31)
LLVM_FIXED_MD_KIND(MD_func_sanitize, "func_sanitize", 32)
+LLVM_FIXED_MD_KIND(MD_exclude, "exclude", 33)
diff --git a/llvm/include/llvm/IR/GlobalValue.h b/llvm/include/llvm/IR/GlobalValue.h
index a17423dd965b..06702d3cdf6b 100644
--- a/llvm/include/llvm/IR/GlobalValue.h
+++ b/llvm/include/llvm/IR/GlobalValue.h
@@ -295,26 +295,38 @@ public:
void setPartition(StringRef Part);
// ASan, HWASan and Memtag sanitizers have some instrumentation that applies
- // specifically to global variables. This instrumentation is implicitly
- // applied to all global variables when built with -fsanitize=*. What we need
- // is a way to persist the information that a certain global variable should
- // *not* have sanitizers applied, which occurs if:
- // 1. The global variable is in the sanitizer ignore list, or
- // 2. The global variable is created by the sanitizers itself for internal
- // usage, or
- // 3. The global variable has __attribute__((no_sanitize("..."))) or
- // __attribute__((disable_sanitizer_instrumentation)).
- //
- // This is important, a some IR passes like GlobalMerge can delete global
- // variables and replace them with new ones. If the old variables were marked
- // to be unsanitized, then the new ones should also be.
+ // specifically to global variables.
struct SanitizerMetadata {
SanitizerMetadata()
- : NoAddress(false), NoHWAddress(false), NoMemtag(false),
- IsDynInit(false) {}
+ : NoAddress(false), NoHWAddress(false),
+ Memtag(false), IsDynInit(false) {}
+ // For ASan and HWASan, this instrumentation is implicitly applied to all
+ // global variables when built with -fsanitize=*. What we need is a way to
+ // persist the information that a certain global variable should *not* have
+ // sanitizers applied, which occurs if:
+ // 1. The global variable is in the sanitizer ignore list, or
+ // 2. The global variable is created by the sanitizers itself for internal
+ // usage, or
+ // 3. The global variable has __attribute__((no_sanitize("..."))) or
+ // __attribute__((disable_sanitizer_instrumentation)).
+ //
+ // This is important, a some IR passes like GlobalMerge can delete global
+ // variables and replace them with new ones. If the old variables were
+ // marked to be unsanitized, then the new ones should also be.
unsigned NoAddress : 1;
unsigned NoHWAddress : 1;
- unsigned NoMemtag : 1;
+
+ // Memtag sanitization works differently: sanitization is requested by clang
+ // when `-fsanitize=memtag-globals` is provided, and the request can be
+ // denied (and the attribute removed) by the AArch64 global tagging pass if
+ // it can't be fulfilled (e.g. the global variable is a TLS variable).
+ // Memtag sanitization has to interact with other parts of LLVM (like
+ // supressing certain optimisations, emitting assembly directives, or
+ // creating special relocation sections).
+ //
+ // Use `GlobalValue::isTagged()` to check whether tagging should be enabled
+ // for a global variable.
+ unsigned Memtag : 1;
// ASan-specific metadata. Is this global variable dynamically initialized
// (from a C++ language perspective), and should therefore be checked for
@@ -331,6 +343,10 @@ public:
void setSanitizerMetadata(SanitizerMetadata Meta);
void removeSanitizerMetadata();
+ bool isTagged() const {
+ return hasSanitizerMetadata() && getSanitizerMetadata().Memtag;
+ }
+
static LinkageTypes getLinkOnceLinkage(bool ODR) {
return ODR ? LinkOnceODRLinkage : LinkOnceAnyLinkage;
}
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index d8f08934b3d6..cec26e966b5c 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -1162,11 +1162,11 @@ private:
RoundingMode UseRounding = DefaultConstrainedRounding;
if (Rounding)
- UseRounding = Rounding.getValue();
+ UseRounding = Rounding.value();
Optional<StringRef> RoundingStr = convertRoundingModeToStr(UseRounding);
assert(RoundingStr && "Garbage strict rounding mode!");
- auto *RoundingMDS = MDString::get(Context, RoundingStr.getValue());
+ auto *RoundingMDS = MDString::get(Context, RoundingStr.value());
return MetadataAsValue::get(Context, RoundingMDS);
}
@@ -1175,11 +1175,11 @@ private:
fp::ExceptionBehavior UseExcept = DefaultConstrainedExcept;
if (Except)
- UseExcept = Except.getValue();
+ UseExcept = Except.value();
Optional<StringRef> ExceptStr = convertExceptionBehaviorToStr(UseExcept);
assert(ExceptStr && "Garbage strict exception behavior!");
- auto *ExceptMDS = MDString::get(Context, ExceptStr.getValue());
+ auto *ExceptMDS = MDString::get(Context, ExceptStr.value());
return MetadataAsValue::get(Context, ExceptMDS);
}
@@ -1588,8 +1588,8 @@ public:
Value *CreateFNeg(Value *V, const Twine &Name = "",
MDNode *FPMathTag = nullptr) {
- if (auto *VC = dyn_cast<Constant>(V))
- return Insert(Folder.CreateFNeg(VC), Name);
+ if (Value *Res = Folder.FoldUnOpFMF(Instruction::FNeg, V, FMF))
+ return Res;
return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF),
Name);
}
@@ -1598,10 +1598,10 @@ public:
/// default FMF.
Value *CreateFNegFMF(Value *V, Instruction *FMFSource,
const Twine &Name = "") {
- if (auto *VC = dyn_cast<Constant>(V))
- return Insert(Folder.CreateFNeg(VC), Name);
- return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr,
- FMFSource->getFastMathFlags()),
+ FastMathFlags FMF = FMFSource->getFastMathFlags();
+ if (Value *Res = Folder.FoldUnOpFMF(Instruction::FNeg, V, FMF))
+ return Res;
+ return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr, FMF),
Name);
}
@@ -1612,8 +1612,8 @@ public:
Value *CreateUnOp(Instruction::UnaryOps Opc,
Value *V, const Twine &Name = "",
MDNode *FPMathTag = nullptr) {
- if (auto *VC = dyn_cast<Constant>(V))
- return Insert(Folder.CreateUnOp(Opc, VC), Name);
+ if (Value *Res = Folder.FoldUnOpFMF(Opc, V, FMF))
+ return Res;
Instruction *UnOp = UnaryOperator::Create(Opc, V);
if (isa<FPMathOperator>(UnOp))
setFPAttrs(UnOp, FPMathTag, FMF);
diff --git a/llvm/include/llvm/IR/IRBuilderFolder.h b/llvm/include/llvm/IR/IRBuilderFolder.h
index 9505f1e3be2a..b2b27235a1e6 100644
--- a/llvm/include/llvm/IR/IRBuilderFolder.h
+++ b/llvm/include/llvm/IR/IRBuilderFolder.h
@@ -45,6 +45,9 @@ public:
virtual Value *FoldBinOpFMF(Instruction::BinaryOps Opc, Value *LHS,
Value *RHS, FastMathFlags FMF) const = 0;
+ virtual Value *FoldUnOpFMF(Instruction::UnaryOps Opc, Value *V,
+ FastMathFlags FMF) const = 0;
+
virtual Value *FoldICmp(CmpInst::Predicate P, Value *LHS,
Value *RHS) const = 0;
@@ -68,13 +71,6 @@ public:
ArrayRef<int> Mask) const = 0;
//===--------------------------------------------------------------------===//
- // Unary Operators
- //===--------------------------------------------------------------------===//
-
- virtual Value *CreateFNeg(Constant *C) const = 0;
- virtual Value *CreateUnOp(Instruction::UnaryOps Opc, Constant *C) const = 0;
-
- //===--------------------------------------------------------------------===//
// Cast/Conversion Operators
//===--------------------------------------------------------------------===//
diff --git a/llvm/include/llvm/IR/InlineAsm.h b/llvm/include/llvm/IR/InlineAsm.h
index 57f2da27e04e..032a70efdceb 100644
--- a/llvm/include/llvm/IR/InlineAsm.h
+++ b/llvm/include/llvm/IR/InlineAsm.h
@@ -24,6 +24,7 @@
namespace llvm {
+class Error;
class FunctionType;
class PointerType;
template <class ConstantClass> class ConstantUniqueMap;
@@ -83,11 +84,9 @@ public:
const std::string &getAsmString() const { return AsmString; }
const std::string &getConstraintString() const { return Constraints; }
- /// Verify - This static method can be used by the parser to check to see if
- /// the specified constraint string is legal for the type. This returns true
- /// if legal, false if not.
- ///
- static bool Verify(FunctionType *Ty, StringRef Constraints);
+ /// This static method can be used by the parser to check to see if the
+ /// specified constraint string is legal for the type.
+ static Error verify(FunctionType *Ty, StringRef Constraints);
// Constraint String Parsing
enum ConstraintPrefix {
diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h
index d152e86488e1..a14bc39cea65 100644
--- a/llvm/include/llvm/IR/Instructions.h
+++ b/llvm/include/llvm/IR/Instructions.h
@@ -753,8 +753,16 @@ public:
/// *p = old - v
FSub,
+ /// *p = maxnum(old, v)
+ /// \p maxnum matches the behavior of \p llvm.maxnum.*.
+ FMax,
+
+ /// *p = minnum(old, v)
+ /// \p minnum matches the behavior of \p llvm.minnum.*.
+ FMin,
+
FIRST_BINOP = Xchg,
- LAST_BINOP = FSub,
+ LAST_BINOP = FMin,
BAD_BINOP
};
@@ -797,6 +805,8 @@ public:
switch (Op) {
case AtomicRMWInst::FAdd:
case AtomicRMWInst::FSub:
+ case AtomicRMWInst::FMax:
+ case AtomicRMWInst::FMin:
return true;
default:
return false;
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 0dceea13ea36..8bf8e9ca76ad 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -1308,7 +1308,7 @@ def int_coro_noop : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
def int_coro_size : Intrinsic<[llvm_anyint_ty], [], [IntrNoMem]>;
def int_coro_align : Intrinsic<[llvm_anyint_ty], [], [IntrNoMem]>;
-def int_coro_save : Intrinsic<[llvm_token_ty], [llvm_ptr_ty], []>;
+def int_coro_save : Intrinsic<[llvm_token_ty], [llvm_ptr_ty], [IntrNoMerge]>;
def int_coro_suspend : Intrinsic<[llvm_i8_ty], [llvm_token_ty, llvm_i1_ty], []>;
def int_coro_suspend_retcon : Intrinsic<[llvm_any_ty], [llvm_vararg_ty], []>;
def int_coro_prepare_retcon : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty],
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 1256ab2c9f84..fc66bdfc35e0 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -931,6 +931,7 @@ def int_aarch64_st64bv0: Intrinsic<[llvm_i64_ty], !listconcat([llvm_ptr_ty], dat
}
+def llvm_nxv1i1_ty : LLVMType<nxv1i1>;
def llvm_nxv2i1_ty : LLVMType<nxv2i1>;
def llvm_nxv4i1_ty : LLVMType<nxv4i1>;
def llvm_nxv8i1_ty : LLVMType<nxv8i1>;
@@ -2592,27 +2593,27 @@ let TargetPrefix = "aarch64" in {
// Loads
def int_aarch64_sme_ld1b_horiz : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
- def int_aarch64_sme_ld1h_horiz : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
- def int_aarch64_sme_ld1w_horiz : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
- def int_aarch64_sme_ld1d_horiz : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
- def int_aarch64_sme_ld1q_horiz : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
+ def int_aarch64_sme_ld1h_horiz : SME_Load_Store_Intrinsic<llvm_nxv8i1_ty>;
+ def int_aarch64_sme_ld1w_horiz : SME_Load_Store_Intrinsic<llvm_nxv4i1_ty>;
+ def int_aarch64_sme_ld1d_horiz : SME_Load_Store_Intrinsic<llvm_nxv2i1_ty>;
+ def int_aarch64_sme_ld1q_horiz : SME_Load_Store_Intrinsic<llvm_nxv1i1_ty>;
def int_aarch64_sme_ld1b_vert : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
- def int_aarch64_sme_ld1h_vert : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
- def int_aarch64_sme_ld1w_vert : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
- def int_aarch64_sme_ld1d_vert : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
- def int_aarch64_sme_ld1q_vert : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
+ def int_aarch64_sme_ld1h_vert : SME_Load_Store_Intrinsic<llvm_nxv8i1_ty>;
+ def int_aarch64_sme_ld1w_vert : SME_Load_Store_Intrinsic<llvm_nxv4i1_ty>;
+ def int_aarch64_sme_ld1d_vert : SME_Load_Store_Intrinsic<llvm_nxv2i1_ty>;
+ def int_aarch64_sme_ld1q_vert : SME_Load_Store_Intrinsic<llvm_nxv1i1_ty>;
// Stores
def int_aarch64_sme_st1b_horiz : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
- def int_aarch64_sme_st1h_horiz : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
- def int_aarch64_sme_st1w_horiz : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
- def int_aarch64_sme_st1d_horiz : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
- def int_aarch64_sme_st1q_horiz : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
+ def int_aarch64_sme_st1h_horiz : SME_Load_Store_Intrinsic<llvm_nxv8i1_ty>;
+ def int_aarch64_sme_st1w_horiz : SME_Load_Store_Intrinsic<llvm_nxv4i1_ty>;
+ def int_aarch64_sme_st1d_horiz : SME_Load_Store_Intrinsic<llvm_nxv2i1_ty>;
+ def int_aarch64_sme_st1q_horiz : SME_Load_Store_Intrinsic<llvm_nxv1i1_ty>;
def int_aarch64_sme_st1b_vert : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
- def int_aarch64_sme_st1h_vert : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
- def int_aarch64_sme_st1w_vert : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
- def int_aarch64_sme_st1d_vert : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
- def int_aarch64_sme_st1q_vert : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
+ def int_aarch64_sme_st1h_vert : SME_Load_Store_Intrinsic<llvm_nxv8i1_ty>;
+ def int_aarch64_sme_st1w_vert : SME_Load_Store_Intrinsic<llvm_nxv4i1_ty>;
+ def int_aarch64_sme_st1d_vert : SME_Load_Store_Intrinsic<llvm_nxv2i1_ty>;
+ def int_aarch64_sme_st1q_vert : SME_Load_Store_Intrinsic<llvm_nxv1i1_ty>;
// Spill + fill
def int_aarch64_sme_ldr : DefaultAttrsIntrinsic<
@@ -2663,6 +2664,16 @@ let TargetPrefix = "aarch64" in {
def int_aarch64_sme_usmopa_wide : SME_OuterProduct_Intrinsic;
def int_aarch64_sme_usmops_wide : SME_OuterProduct_Intrinsic;
+ class SME_AddVectorToTile_Intrinsic
+ : DefaultAttrsIntrinsic<[],
+ [llvm_i64_ty,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_anyvector_ty]>;
+
+ def int_aarch64_sme_addha : SME_AddVectorToTile_Intrinsic;
+ def int_aarch64_sme_addva : SME_AddVectorToTile_Intrinsic;
+
//
// Counting elements
//
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index c2dcfc254568..e81224d9b890 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -1338,7 +1338,7 @@ def int_amdgcn_exp : Intrinsic <[], [
LLVMMatchType<0>, // src2
LLVMMatchType<0>, // src3
llvm_i1_ty, // done
- llvm_i1_ty // vm
+ llvm_i1_ty // vm (ignored on GFX11+)
],
[ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<6>>,
ImmArg<ArgIndex<7>>, IntrWriteMem, IntrInaccessibleMemOnly,
diff --git a/llvm/include/llvm/IR/IntrinsicsDirectX.td b/llvm/include/llvm/IR/IntrinsicsDirectX.td
index 4a21cf1eb7fc..57c47a15bd70 100644
--- a/llvm/include/llvm/IR/IntrinsicsDirectX.td
+++ b/llvm/include/llvm/IR/IntrinsicsDirectX.td
@@ -10,11 +10,11 @@
//
//===----------------------------------------------------------------------===//
-let TargetPrefix = "dxil" in {
+let TargetPrefix = "dx" in {
-def int_dxil_thread_id : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrWillReturn]>;
-def int_dxil_group_id : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrWillReturn]>;
-def int_dxil_thread_id_in_group : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrWillReturn]>;
-def int_dxil_flattened_thread_id_in_group : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrWillReturn]>;
+def int_dx_thread_id : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrWillReturn]>;
+def int_dx_group_id : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrWillReturn]>;
+def int_dx_thread_id_in_group : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrWillReturn]>;
+def int_dx_flattened_thread_id_in_group : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrWillReturn]>;
}
diff --git a/llvm/include/llvm/IR/IntrinsicsX86.td b/llvm/include/llvm/IR/IntrinsicsX86.td
index 0930abcc0993..c274e3504250 100644
--- a/llvm/include/llvm/IR/IntrinsicsX86.td
+++ b/llvm/include/llvm/IR/IntrinsicsX86.td
@@ -72,6 +72,12 @@ let TargetPrefix = "x86" in {
[ImmArg<ArgIndex<1>>]>;
}
+// Read Processor Register.
+let TargetPrefix = "x86" in {
+ def int_x86_rdpru : ClangBuiltin<"__builtin_ia32_rdpru">,
+ Intrinsic<[llvm_i64_ty], [llvm_i32_ty], []>;
+}
+
//===----------------------------------------------------------------------===//
// CET SS
let TargetPrefix = "x86" in {
diff --git a/llvm/include/llvm/IR/Metadata.h b/llvm/include/llvm/IR/Metadata.h
index be359d94f812..ec769ce95160 100644
--- a/llvm/include/llvm/IR/Metadata.h
+++ b/llvm/include/llvm/IR/Metadata.h
@@ -951,7 +951,9 @@ class MDNode : public Metadata {
/// The operands are in turn located immediately before the header.
/// For resizable MDNodes, the space for the storage vector is also allocated
/// immediately before the header, overlapping with the operands.
- struct Header {
+ /// Explicity set alignment because bitfields by default have an
+ /// alignment of 1 on z/OS.
+ struct alignas(alignof(size_t)) Header {
bool IsResizable : 1;
bool IsLarge : 1;
size_t SmallSize : 4;
@@ -997,7 +999,13 @@ class MDNode : public Metadata {
alignTo(getAllocSize(), alignof(uint64_t));
}
- void *getLargePtr() const;
+ void *getLargePtr() const {
+ static_assert(alignof(LargeStorageVector) <= alignof(Header),
+ "LargeStorageVector too strongly aligned");
+ return reinterpret_cast<char *>(const_cast<Header *>(this)) -
+ sizeof(LargeStorageVector);
+ }
+
void *getSmallPtr();
LargeStorageVector &getLarge() {
@@ -1030,6 +1038,12 @@ class MDNode : public Metadata {
return makeArrayRef(reinterpret_cast<const MDOperand *>(this) - SmallSize,
SmallNumOps);
}
+
+ unsigned getNumOperands() const {
+ if (!IsLarge)
+ return SmallNumOps;
+ return getLarge().size();
+ }
};
Header &getHeader() { return *(reinterpret_cast<Header *>(this) - 1); }
@@ -1281,7 +1295,7 @@ public:
}
/// Return number of MDNode operands.
- unsigned getNumOperands() const { return getHeader().operands().size(); }
+ unsigned getNumOperands() const { return getHeader().getNumOperands(); }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Metadata *MD) {
diff --git a/llvm/include/llvm/IR/Module.h b/llvm/include/llvm/IR/Module.h
index fc2d60947118..24da08d70b72 100644
--- a/llvm/include/llvm/IR/Module.h
+++ b/llvm/include/llvm/IR/Module.h
@@ -911,6 +911,10 @@ public:
StringRef getStackProtectorGuardReg() const;
void setStackProtectorGuardReg(StringRef Reg);
+ /// Get/set a symbol to use as the stack protector guard.
+ StringRef getStackProtectorGuardSymbol() const;
+ void setStackProtectorGuardSymbol(StringRef Symbol);
+
/// Get/set what offset from the stack protector to use.
int getStackProtectorGuardOffset() const;
void setStackProtectorGuardOffset(int Offset);
diff --git a/llvm/include/llvm/IR/ModuleSummaryIndex.h b/llvm/include/llvm/IR/ModuleSummaryIndex.h
index b76bc879fb45..f1dd29926278 100644
--- a/llvm/include/llvm/IR/ModuleSummaryIndex.h
+++ b/llvm/include/llvm/IR/ModuleSummaryIndex.h
@@ -1153,8 +1153,8 @@ private:
// Used in cases where we want to record the name of a global, but
// don't have the string owned elsewhere (e.g. the Strtab on a module).
- StringSaver Saver;
BumpPtrAllocator Alloc;
+ StringSaver Saver;
// The total number of basic blocks in the module in the per-module summary or
// the total number of basic blocks in the LTO unit in the combined index.
diff --git a/llvm/include/llvm/IR/NoFolder.h b/llvm/include/llvm/IR/NoFolder.h
index 4e9f772dfdb6..56ccfc694c5f 100644
--- a/llvm/include/llvm/IR/NoFolder.h
+++ b/llvm/include/llvm/IR/NoFolder.h
@@ -65,6 +65,11 @@ public:
return nullptr;
}
+ Value *FoldUnOpFMF(Instruction::UnaryOps Opc, Value *V,
+ FastMathFlags FMF) const override {
+ return nullptr;
+ }
+
Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
return nullptr;
}
@@ -103,19 +108,6 @@ public:
}
//===--------------------------------------------------------------------===//
- // Unary Operators
- //===--------------------------------------------------------------------===//
-
- Instruction *CreateFNeg(Constant *C) const override {
- return UnaryOperator::CreateFNeg(C);
- }
-
- Instruction *CreateUnOp(Instruction::UnaryOps Opc,
- Constant *C) const override {
- return UnaryOperator::Create(Opc, C);
- }
-
- //===--------------------------------------------------------------------===//
// Cast/Conversion Operators
//===--------------------------------------------------------------------===//
diff --git a/llvm/include/llvm/IR/OptBisect.h b/llvm/include/llvm/IR/OptBisect.h
index 63fd98073b51..14488bb1b37c 100644
--- a/llvm/include/llvm/IR/OptBisect.h
+++ b/llvm/include/llvm/IR/OptBisect.h
@@ -15,7 +15,6 @@
#define LLVM_IR_OPTBISECT_H
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/ManagedStatic.h"
#include <limits>
namespace llvm {
@@ -90,7 +89,8 @@ private:
/// Singleton instance of the OptBisect class, so multiple pass managers don't
/// need to coordinate their uses of OptBisect.
-extern ManagedStatic<OptBisect> OptBisector;
+OptBisect &getOptBisector();
+
} // end namespace llvm
#endif // LLVM_IR_OPTBISECT_H
diff --git a/llvm/include/llvm/LTO/Config.h b/llvm/include/llvm/LTO/Config.h
index 54bb82d84d96..b2ed8e60bd77 100644
--- a/llvm/include/llvm/LTO/Config.h
+++ b/llvm/include/llvm/LTO/Config.h
@@ -267,8 +267,12 @@ struct Config {
/// the given output file name, and (2) creates a resolution file whose name
/// is prefixed by the given output file name and sets ResolutionFile to its
/// file handle.
+ ///
+ /// SaveTempsArgs can be specified to select which temps to save.
+ /// If SaveTempsArgs is not provided, all temps are saved.
Error addSaveTemps(std::string OutputFileName,
- bool UseInputModulePath = false);
+ bool UseInputModulePath = false,
+ const DenseSet<StringRef> &SaveTempsArgs = {});
};
struct LTOLLVMDiagnosticHandler : public DiagnosticHandler {
diff --git a/llvm/include/llvm/MC/MCDwarf.h b/llvm/include/llvm/MC/MCDwarf.h
index ce65b173b3d2..8b2ae84749b4 100644
--- a/llvm/include/llvm/MC/MCDwarf.h
+++ b/llvm/include/llvm/MC/MCDwarf.h
@@ -374,8 +374,8 @@ public:
Header.RootFile.DirIndex = 0;
Header.RootFile.Checksum = Checksum;
Header.RootFile.Source = Source;
- Header.trackMD5Usage(Checksum.hasValue());
- Header.HasSource = Source.hasValue();
+ Header.trackMD5Usage(Checksum.has_value());
+ Header.HasSource = Source.has_value();
}
void resetFileTable() { Header.resetFileTable(); }
diff --git a/llvm/include/llvm/MC/MCSymbolWasm.h b/llvm/include/llvm/MC/MCSymbolWasm.h
index 5eab32cb5c12..33ec0db8fcab 100644
--- a/llvm/include/llvm/MC/MCSymbolWasm.h
+++ b/llvm/include/llvm/MC/MCSymbolWasm.h
@@ -89,7 +89,7 @@ public:
bool hasImportModule() const { return ImportModule.has_value(); }
StringRef getImportModule() const {
if (ImportModule)
- return ImportModule.getValue();
+ return ImportModule.value();
// Use a default module name of "env" for now, for compatibility with
// existing tools.
// TODO(sbc): Find a way to specify a default value in the object format
@@ -101,13 +101,13 @@ public:
bool hasImportName() const { return ImportName.has_value(); }
StringRef getImportName() const {
if (ImportName)
- return ImportName.getValue();
+ return ImportName.value();
return getName();
}
void setImportName(StringRef Name) { ImportName = Name; }
bool hasExportName() const { return ExportName.has_value(); }
- StringRef getExportName() const { return ExportName.getValue(); }
+ StringRef getExportName() const { return ExportName.value(); }
void setExportName(StringRef Name) { ExportName = Name; }
bool isFunctionTable() const {
@@ -130,14 +130,14 @@ public:
const wasm::WasmGlobalType &getGlobalType() const {
assert(GlobalType);
- return GlobalType.getValue();
+ return GlobalType.value();
}
void setGlobalType(wasm::WasmGlobalType GT) { GlobalType = GT; }
bool hasTableType() const { return TableType.has_value(); }
const wasm::WasmTableType &getTableType() const {
assert(hasTableType());
- return TableType.getValue();
+ return TableType.value();
}
void setTableType(wasm::WasmTableType TT) { TableType = TT; }
void setTableType(wasm::ValType VT) {
diff --git a/llvm/include/llvm/MC/MCSymbolXCOFF.h b/llvm/include/llvm/MC/MCSymbolXCOFF.h
index 2ec265e66300..cc19f882e6ad 100644
--- a/llvm/include/llvm/MC/MCSymbolXCOFF.h
+++ b/llvm/include/llvm/MC/MCSymbolXCOFF.h
@@ -40,7 +40,7 @@ public:
XCOFF::StorageClass getStorageClass() const {
assert(StorageClass && "StorageClass not set on XCOFF MCSymbol.");
- return StorageClass.getValue();
+ return StorageClass.value();
}
StringRef getUnqualifiedName() const { return getUnqualifiedName(getName()); }
diff --git a/llvm/include/llvm/ObjCopy/CommonConfig.h b/llvm/include/llvm/ObjCopy/CommonConfig.h
index 24503caed342..4921f5281ca6 100644
--- a/llvm/include/llvm/ObjCopy/CommonConfig.h
+++ b/llvm/include/llvm/ObjCopy/CommonConfig.h
@@ -241,6 +241,7 @@ struct CommonConfig {
StringMap<SectionRename> SectionsToRename;
StringMap<uint64_t> SetSectionAlignment;
StringMap<SectionFlagsUpdate> SetSectionFlags;
+ StringMap<uint64_t> SetSectionType;
StringMap<StringRef> SymbolsToRename;
// Symbol info specified by --add-symbol option.
diff --git a/llvm/include/llvm/Object/Decompressor.h b/llvm/include/llvm/Object/Decompressor.h
index e04ee3c3e4c0..00b6c2016742 100644
--- a/llvm/include/llvm/Object/Decompressor.h
+++ b/llvm/include/llvm/Object/Decompressor.h
@@ -33,12 +33,12 @@ public:
/// @param Out Destination buffer.
template <class T> Error resizeAndDecompress(T &Out) {
Out.resize(DecompressedSize);
- return decompress({Out.data(), (size_t)DecompressedSize});
+ return decompress({(uint8_t *)Out.data(), (size_t)DecompressedSize});
}
/// Uncompress section data to raw buffer provided.
/// @param Buffer Destination buffer.
- Error decompress(MutableArrayRef<char> Buffer);
+ Error decompress(MutableArrayRef<uint8_t> Buffer);
/// Return memory buffer size required for decompression.
uint64_t getDecompressedSize() { return DecompressedSize; }
diff --git a/llvm/include/llvm/Support/ARMTargetParser.def b/llvm/include/llvm/Support/ARMTargetParser.def
index 6a1ac7213dad..645c3b8963f5 100644
--- a/llvm/include/llvm/Support/ARMTargetParser.def
+++ b/llvm/include/llvm/Support/ARMTargetParser.def
@@ -303,6 +303,9 @@ ARM_CPU_NAME("cortex-m33", ARMV8MMainline, FK_FPV5_SP_D16, false, ARM::AEK_DSP)
ARM_CPU_NAME("cortex-m35p", ARMV8MMainline, FK_FPV5_SP_D16, false, ARM::AEK_DSP)
ARM_CPU_NAME("cortex-m55", ARMV8_1MMainline, FK_FP_ARMV8_FULLFP16_D16, false,
(ARM::AEK_DSP | ARM::AEK_SIMD | ARM::AEK_FP | ARM::AEK_FP16))
+ARM_CPU_NAME("cortex-m85", ARMV8_1MMainline, FK_FP_ARMV8_FULLFP16_D16, false,
+ (ARM::AEK_DSP | ARM::AEK_SIMD | ARM::AEK_FP | ARM::AEK_FP16 |
+ ARM::AEK_RAS | ARM::AEK_PACBTI))
ARM_CPU_NAME("cortex-a32", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("cortex-a35", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("cortex-a53", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
diff --git a/llvm/include/llvm/Support/Allocator.h b/llvm/include/llvm/Support/Allocator.h
index 5ca0c9decac3..041729fa6594 100644
--- a/llvm/include/llvm/Support/Allocator.h
+++ b/llvm/include/llvm/Support/Allocator.h
@@ -63,7 +63,9 @@ template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096,
class BumpPtrAllocatorImpl
: public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize,
SizeThreshold, GrowthDelay>>,
- private AllocatorT {
+ private detail::AllocatorHolder<AllocatorT> {
+ using AllocTy = detail::AllocatorHolder<AllocatorT>;
+
public:
static_assert(SizeThreshold <= SlabSize,
"The SizeThreshold must be at most the SlabSize to ensure "
@@ -77,12 +79,12 @@ public:
template <typename T>
BumpPtrAllocatorImpl(T &&Allocator)
- : AllocatorT(std::forward<T &&>(Allocator)) {}
+ : AllocTy(std::forward<T &&>(Allocator)) {}
// Manually implement a move constructor as we must clear the old allocator's
// slabs as a matter of correctness.
BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
- : AllocatorT(static_cast<AllocatorT &&>(Old)), CurPtr(Old.CurPtr),
+ : AllocTy(std::move(Old.getAllocator())), CurPtr(Old.CurPtr),
End(Old.End), Slabs(std::move(Old.Slabs)),
CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) {
@@ -107,7 +109,7 @@ public:
RedZoneSize = RHS.RedZoneSize;
Slabs = std::move(RHS.Slabs);
CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
- AllocatorT::operator=(static_cast<AllocatorT &&>(RHS));
+ AllocTy::operator=(std::move(RHS.getAllocator()));
RHS.CurPtr = RHS.End = nullptr;
RHS.BytesAllocated = 0;
@@ -175,7 +177,7 @@ public:
size_t PaddedSize = SizeToAllocate + Alignment.value() - 1;
if (PaddedSize > SizeThreshold) {
void *NewSlab =
- AllocatorT::Allocate(PaddedSize, alignof(std::max_align_t));
+ this->getAllocator().Allocate(PaddedSize, alignof(std::max_align_t));
// We own the new slab and don't want anyone reading anyting other than
// pieces returned from this method. So poison the whole slab.
__asan_poison_memory_region(NewSlab, PaddedSize);
@@ -334,8 +336,8 @@ private:
void StartNewSlab() {
size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
- void *NewSlab =
- AllocatorT::Allocate(AllocatedSlabSize, alignof(std::max_align_t));
+ void *NewSlab = this->getAllocator().Allocate(AllocatedSlabSize,
+ alignof(std::max_align_t));
// We own the new slab and don't want anyone reading anything other than
// pieces returned from this method. So poison the whole slab.
__asan_poison_memory_region(NewSlab, AllocatedSlabSize);
@@ -351,7 +353,8 @@ private:
for (; I != E; ++I) {
size_t AllocatedSlabSize =
computeSlabSize(std::distance(Slabs.begin(), I));
- AllocatorT::Deallocate(*I, AllocatedSlabSize, alignof(std::max_align_t));
+ this->getAllocator().Deallocate(*I, AllocatedSlabSize,
+ alignof(std::max_align_t));
}
}
@@ -360,7 +363,7 @@ private:
for (auto &PtrAndSize : CustomSizedSlabs) {
void *Ptr = PtrAndSize.first;
size_t Size = PtrAndSize.second;
- AllocatorT::Deallocate(Ptr, Size, alignof(std::max_align_t));
+ this->getAllocator().Deallocate(Ptr, Size, alignof(std::max_align_t));
}
}
diff --git a/llvm/include/llvm/Support/AllocatorBase.h b/llvm/include/llvm/Support/AllocatorBase.h
index eccced1d1ff4..5d05d3f8777b 100644
--- a/llvm/include/llvm/Support/AllocatorBase.h
+++ b/llvm/include/llvm/Support/AllocatorBase.h
@@ -99,6 +99,28 @@ public:
void PrintStats() const {}
};
+namespace detail {
+
+template <typename Alloc> class AllocatorHolder : Alloc {
+public:
+ AllocatorHolder() = default;
+ AllocatorHolder(const Alloc &A) : Alloc(A) {}
+ AllocatorHolder(Alloc &&A) : Alloc(static_cast<Alloc &&>(A)) {}
+ Alloc &getAllocator() { return *this; }
+ const Alloc &getAllocator() const { return *this; }
+};
+
+template <typename Alloc> class AllocatorHolder<Alloc &> {
+ Alloc &A;
+
+public:
+ AllocatorHolder(Alloc &A) : A(A) {}
+ Alloc &getAllocator() { return A; }
+ const Alloc &getAllocator() const { return A; }
+};
+
+} // namespace detail
+
} // namespace llvm
#endif // LLVM_SUPPORT_ALLOCATORBASE_H
diff --git a/llvm/include/llvm/Support/Casting.h b/llvm/include/llvm/Support/Casting.h
index 894c1f439b64..5444d777b749 100644
--- a/llvm/include/llvm/Support/Casting.h
+++ b/llvm/include/llvm/Support/Casting.h
@@ -638,9 +638,7 @@ template <typename T, typename Enable = void> struct ValueIsPresent {
template <typename T> struct ValueIsPresent<Optional<T>> {
using UnwrappedType = T;
static inline bool isPresent(const Optional<T> &t) { return t.has_value(); }
- static inline decltype(auto) unwrapValue(Optional<T> &t) {
- return t.getValue();
- }
+ static inline decltype(auto) unwrapValue(Optional<T> &t) { return t.value(); }
};
// If something is "nullable" then we just compare it to nullptr to see if it
diff --git a/llvm/include/llvm/Support/CodeGen.h b/llvm/include/llvm/Support/CodeGen.h
index 71d0ddbfe05e..425d3a3d95d4 100644
--- a/llvm/include/llvm/Support/CodeGen.h
+++ b/llvm/include/llvm/Support/CodeGen.h
@@ -103,6 +103,13 @@ namespace llvm {
Async = 2, ///< "Asynchronous" unwind tables (instr precise)
Default = 2,
};
+
+ enum class FunctionReturnThunksKind : unsigned int {
+ Keep = 0, ///< No function return thunk.
+ Extern = 1, ///< Replace returns with jump to thunk, don't emit thunk.
+ Invalid = 2, ///< Not used.
+ };
+
} // namespace llvm
#endif
diff --git a/llvm/include/llvm/Support/Compression.h b/llvm/include/llvm/Support/Compression.h
index e6f898229412..c99f811459ab 100644
--- a/llvm/include/llvm/Support/Compression.h
+++ b/llvm/include/llvm/Support/Compression.h
@@ -13,6 +13,7 @@
#ifndef LLVM_SUPPORT_COMPRESSION_H
#define LLVM_SUPPORT_COMPRESSION_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -20,28 +21,30 @@ template <typename T> class SmallVectorImpl;
class Error;
class StringRef;
+namespace compression {
namespace zlib {
-static constexpr int NoCompression = 0;
-static constexpr int BestSpeedCompression = 1;
-static constexpr int DefaultCompression = 6;
-static constexpr int BestSizeCompression = 9;
+constexpr int NoCompression = 0;
+constexpr int BestSpeedCompression = 1;
+constexpr int DefaultCompression = 6;
+constexpr int BestSizeCompression = 9;
bool isAvailable();
-void compress(StringRef InputBuffer, SmallVectorImpl<char> &CompressedBuffer,
+void compress(ArrayRef<uint8_t> Input,
+ SmallVectorImpl<uint8_t> &CompressedBuffer,
int Level = DefaultCompression);
-Error uncompress(StringRef InputBuffer, char *UncompressedBuffer,
+Error uncompress(ArrayRef<uint8_t> Input, uint8_t *UncompressedBuffer,
size_t &UncompressedSize);
-Error uncompress(StringRef InputBuffer,
- SmallVectorImpl<char> &UncompressedBuffer,
+Error uncompress(ArrayRef<uint8_t> Input,
+ SmallVectorImpl<uint8_t> &UncompressedBuffer,
size_t UncompressedSize);
-uint32_t crc32(StringRef Buffer);
+} // End of namespace zlib
-} // End of namespace zlib
+} // End of namespace compression
} // End of namespace llvm
diff --git a/llvm/include/llvm/Support/ConvertUTF.h b/llvm/include/llvm/Support/ConvertUTF.h
index 662f3aca5b54..1e05cfe1f424 100644
--- a/llvm/include/llvm/Support/ConvertUTF.h
+++ b/llvm/include/llvm/Support/ConvertUTF.h
@@ -181,6 +181,8 @@ Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd);
Boolean isLegalUTF8String(const UTF8 **source, const UTF8 *sourceEnd);
+unsigned getUTF8SequenceSize(const UTF8 *source, const UTF8 *sourceEnd);
+
unsigned getNumBytesForUTF8(UTF8 firstByte);
/*************************************************************************/
diff --git a/llvm/include/llvm/Support/Error.h b/llvm/include/llvm/Support/Error.h
index 1a801b6f2c7a..3c2c2c8b8ceb 100644
--- a/llvm/include/llvm/Support/Error.h
+++ b/llvm/include/llvm/Support/Error.h
@@ -1270,7 +1270,7 @@ public:
assert(Err && "Trying to log after takeError().");
OS << "'" << FileName << "': ";
if (Line)
- OS << "line " << Line.getValue() << ": ";
+ OS << "line " << Line.value() << ": ";
Err->log(OS);
}
diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def
index 8df7ced0029d..5d6be0fe655e 100644
--- a/llvm/include/llvm/Support/TargetOpcodes.def
+++ b/llvm/include/llvm/Support/TargetOpcodes.def
@@ -384,6 +384,8 @@ HANDLE_TARGET_OPCODE(G_ATOMICRMW_UMAX)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_UMIN)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_FADD)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_FSUB)
+HANDLE_TARGET_OPCODE(G_ATOMICRMW_FMAX)
+HANDLE_TARGET_OPCODE(G_ATOMICRMW_FMIN)
// Generic atomic fence
HANDLE_TARGET_OPCODE(G_FENCE)
diff --git a/llvm/include/llvm/Support/X86TargetParser.def b/llvm/include/llvm/Support/X86TargetParser.def
index 58fa3b3842e7..1a928e5a9acc 100644
--- a/llvm/include/llvm/Support/X86TargetParser.def
+++ b/llvm/include/llvm/Support/X86TargetParser.def
@@ -178,6 +178,7 @@ X86_FEATURE (PREFETCHWT1, "prefetchwt1")
X86_FEATURE (PRFCHW, "prfchw")
X86_FEATURE (PTWRITE, "ptwrite")
X86_FEATURE (RDPID, "rdpid")
+X86_FEATURE (RDPRU, "rdpru")
X86_FEATURE (RDRND, "rdrnd")
X86_FEATURE (RDSEED, "rdseed")
X86_FEATURE (RTM, "rtm")
diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td
index 3e2f18b57d1e..5652e60d081c 100644
--- a/llvm/include/llvm/Target/GenericOpcodes.td
+++ b/llvm/include/llvm/Target/GenericOpcodes.td
@@ -1126,6 +1126,8 @@ def G_ATOMICRMW_UMAX : G_ATOMICRMW_OP;
def G_ATOMICRMW_UMIN : G_ATOMICRMW_OP;
def G_ATOMICRMW_FADD : G_ATOMICRMW_OP;
def G_ATOMICRMW_FSUB : G_ATOMICRMW_OP;
+def G_ATOMICRMW_FMAX : G_ATOMICRMW_OP;
+def G_ATOMICRMW_FMIN : G_ATOMICRMW_OP;
def G_FENCE : GenericInstruction {
let OutOperandList = (outs);
diff --git a/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
index 12eee24b578f..ef4fc85b245d 100644
--- a/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
+++ b/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
@@ -184,6 +184,8 @@ def : GINodeEquiv<G_STORE, st> { let CheckMMOIsNonAtomic = true; }
def : GINodeEquiv<G_LOAD, atomic_load> {
let CheckMMOIsNonAtomic = false;
let CheckMMOIsAtomic = true;
+ let IfSignExtend = G_SEXTLOAD;
+ let IfZeroExtend = G_ZEXTLOAD;
}
// Operands are swapped for atomic_store vs. regular store
@@ -206,6 +208,8 @@ def : GINodeEquiv<G_ATOMICRMW_UMIN, atomic_load_umin>;
def : GINodeEquiv<G_ATOMICRMW_UMAX, atomic_load_umax>;
def : GINodeEquiv<G_ATOMICRMW_FADD, atomic_load_fadd>;
def : GINodeEquiv<G_ATOMICRMW_FSUB, atomic_load_fsub>;
+def : GINodeEquiv<G_ATOMICRMW_FMAX, atomic_load_fmax>;
+def : GINodeEquiv<G_ATOMICRMW_FMIN, atomic_load_fmin>;
def : GINodeEquiv<G_FENCE, atomic_fence>;
// Specifies the GlobalISel equivalents for SelectionDAG's ComplexPattern.
diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index 47b686aca7b5..171fdb1b98e0 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -651,6 +651,10 @@ def atomic_load_fadd : SDNode<"ISD::ATOMIC_LOAD_FADD" , SDTFPAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_fsub : SDNode<"ISD::ATOMIC_LOAD_FSUB" , SDTFPAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_fmax : SDNode<"ISD::ATOMIC_LOAD_FMAX", SDTFPAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_fmin : SDNode<"ISD::ATOMIC_LOAD_FMIN", SDTFPAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load : SDNode<"ISD::ATOMIC_LOAD", SDTAtomicLoad,
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
@@ -805,6 +809,10 @@ class PatFrags<dag ops, list<dag> frags, code pred = [{}],
// They will be tested prior to the code in pred and must not be used in
// ImmLeaf and its subclasses.
+ // If set to true, a predicate is added that checks for the absence of use of
+ // the first result.
+ bit HasNoUse = ?;
+
// Is the desired pre-packaged predicate for a load?
bit IsLoad = ?;
// Is the desired pre-packaged predicate for a store?
@@ -1619,18 +1627,34 @@ defm atomic_load_umax : binary_atomic_op<atomic_load_umax>;
defm atomic_store : binary_atomic_op<atomic_store>;
defm atomic_cmp_swap : ternary_atomic_op<atomic_cmp_swap>;
+/// Atomic load which zeroes the excess high bits.
+def atomic_load_zext :
+ PatFrag<(ops node:$ptr), (atomic_load node:$ptr)> {
+ let IsAtomic = true; // FIXME: Should be IsLoad and/or IsAtomic?
+ let IsZeroExtLoad = true;
+}
+
+/// Atomic load which sign extends the excess high bits.
+def atomic_load_sext :
+ PatFrag<(ops node:$ptr), (atomic_load node:$ptr)> {
+ let IsAtomic = true; // FIXME: Should be IsLoad and/or IsAtomic?
+ let IsSignExtLoad = true;
+}
+
def atomic_load_8 :
PatFrag<(ops node:$ptr),
(atomic_load node:$ptr)> {
let IsAtomic = true;
let MemoryVT = i8;
}
+
def atomic_load_16 :
PatFrag<(ops node:$ptr),
(atomic_load node:$ptr)> {
let IsAtomic = true;
let MemoryVT = i16;
}
+
def atomic_load_32 :
PatFrag<(ops node:$ptr),
(atomic_load node:$ptr)> {
@@ -1644,6 +1668,40 @@ def atomic_load_64 :
let MemoryVT = i64;
}
+def atomic_load_zext_8 :
+ PatFrag<(ops node:$ptr), (atomic_load_zext node:$ptr)> {
+ let IsAtomic = true; // FIXME: Should be IsLoad and/or IsAtomic?
+ let MemoryVT = i8;
+}
+
+def atomic_load_zext_16 :
+ PatFrag<(ops node:$ptr), (atomic_load_zext node:$ptr)> {
+ let IsAtomic = true; // FIXME: Should be IsLoad and/or IsAtomic?
+ let MemoryVT = i16;
+}
+
+def atomic_load_sext_8 :
+ PatFrag<(ops node:$ptr), (atomic_load_sext node:$ptr)> {
+ let IsAtomic = true; // FIXME: Should be IsLoad and/or IsAtomic?
+ let MemoryVT = i8;
+}
+
+def atomic_load_sext_16 :
+ PatFrag<(ops node:$ptr), (atomic_load_sext node:$ptr)> {
+ let IsAtomic = true; // FIXME: Should be IsLoad and/or IsAtomic?
+ let MemoryVT = i16;
+}
+
+// Atomic load which zeroes or anyextends the high bits.
+def atomic_load_az_8 : PatFrags<(ops node:$op),
+ [(atomic_load_8 node:$op),
+ (atomic_load_zext_8 node:$op)]>;
+
+// Atomic load which zeroes or anyextends the high bits.
+def atomic_load_az_16 : PatFrags<(ops node:$op),
+ [(atomic_load_16 node:$op),
+ (atomic_load_zext_16 node:$op)]>;
+
def nonext_masked_gather :
PatFrag<(ops node:$def, node:$pred, node:$ptr, node:$idx),
(masked_gather node:$def, node:$pred, node:$ptr, node:$idx), [{
diff --git a/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h b/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h
index 2676f2705424..c8ea1f5b6624 100644
--- a/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h
+++ b/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h
@@ -211,8 +211,6 @@ private:
void addExtensionsToPM(ExtensionPointTy ETy,
legacy::PassManagerBase &PM) const;
void addInitialAliasAnalysisPasses(legacy::PassManagerBase &PM) const;
- void addLTOOptimizationPasses(legacy::PassManagerBase &PM);
- void addLateLTOOptimizationPasses(legacy::PassManagerBase &PM);
void addFunctionSimplificationPasses(legacy::PassManagerBase &MPM);
void addVectorPasses(legacy::PassManagerBase &PM, bool IsFullLTO);
diff --git a/llvm/include/llvm/Transforms/Utils/Debugify.h b/llvm/include/llvm/Transforms/Utils/Debugify.h
index 405bbb8e0be8..24b9eeab6ee4 100644
--- a/llvm/include/llvm/Transforms/Utils/Debugify.h
+++ b/llvm/include/llvm/Transforms/Utils/Debugify.h
@@ -101,7 +101,18 @@ llvm::FunctionPass *createDebugifyFunctionPass(
llvm::StringRef NameOfWrappedPass = "",
DebugInfoPerPass *DebugInfoBeforePass = nullptr);
-struct NewPMDebugifyPass : public llvm::PassInfoMixin<NewPMDebugifyPass> {
+class NewPMDebugifyPass : public llvm::PassInfoMixin<NewPMDebugifyPass> {
+ llvm::StringRef NameOfWrappedPass;
+ DebugInfoPerPass *DebugInfoBeforePass = nullptr;
+ enum DebugifyMode Mode = DebugifyMode::NoDebugify;
+public:
+ NewPMDebugifyPass(
+ enum DebugifyMode Mode = DebugifyMode::SyntheticDebugInfo,
+ llvm::StringRef NameOfWrappedPass = "",
+ DebugInfoPerPass *DebugInfoBeforePass = nullptr)
+ : NameOfWrappedPass(NameOfWrappedPass),
+ DebugInfoBeforePass(DebugInfoBeforePass), Mode(Mode) {}
+
llvm::PreservedAnalyses run(llvm::Module &M, llvm::ModuleAnalysisManager &AM);
};
@@ -148,18 +159,65 @@ llvm::FunctionPass *createCheckDebugifyFunctionPass(
DebugInfoPerPass *DebugInfoBeforePass = nullptr,
llvm::StringRef OrigDIVerifyBugsReportFilePath = "");
-struct NewPMCheckDebugifyPass
+class NewPMCheckDebugifyPass
: public llvm::PassInfoMixin<NewPMCheckDebugifyPass> {
+ llvm::StringRef NameOfWrappedPass;
+ llvm::StringRef OrigDIVerifyBugsReportFilePath;
+ DebugifyStatsMap *StatsMap;
+ DebugInfoPerPass *DebugInfoBeforePass;
+ enum DebugifyMode Mode;
+ bool Strip;
+public:
+ NewPMCheckDebugifyPass(
+ bool Strip = false, llvm::StringRef NameOfWrappedPass = "",
+ DebugifyStatsMap *StatsMap = nullptr,
+ enum DebugifyMode Mode = DebugifyMode::SyntheticDebugInfo,
+ DebugInfoPerPass *DebugInfoBeforePass = nullptr,
+ llvm::StringRef OrigDIVerifyBugsReportFilePath = "")
+ : NameOfWrappedPass(NameOfWrappedPass),
+ OrigDIVerifyBugsReportFilePath(OrigDIVerifyBugsReportFilePath),
+ StatsMap(StatsMap), DebugInfoBeforePass(DebugInfoBeforePass), Mode(Mode),
+ Strip(Strip) {}
+
llvm::PreservedAnalyses run(llvm::Module &M, llvm::ModuleAnalysisManager &AM);
};
namespace llvm {
void exportDebugifyStats(StringRef Path, const DebugifyStatsMap &Map);
-struct DebugifyEachInstrumentation {
- DebugifyStatsMap StatsMap;
+class DebugifyEachInstrumentation {
+ llvm::StringRef OrigDIVerifyBugsReportFilePath = "";
+ DebugInfoPerPass *DebugInfoBeforePass = nullptr;
+ enum DebugifyMode Mode = DebugifyMode::NoDebugify;
+ DebugifyStatsMap *DIStatsMap = nullptr;
+
+public:
void registerCallbacks(PassInstrumentationCallbacks &PIC);
+ // Used within DebugifyMode::SyntheticDebugInfo mode.
+ void setDIStatsMap(DebugifyStatsMap &StatMap) { DIStatsMap = &StatMap; }
+ const DebugifyStatsMap &getDebugifyStatsMap() const { return *DIStatsMap; }
+ // Used within DebugifyMode::OriginalDebugInfo mode.
+ void setDebugInfoBeforePass(DebugInfoPerPass &PerPassMap) {
+ DebugInfoBeforePass = &PerPassMap;
+ }
+ DebugInfoPerPass &getDebugInfoPerPass() { return *DebugInfoBeforePass; }
+
+ void setOrigDIVerifyBugsReportFilePath(StringRef BugsReportFilePath) {
+ OrigDIVerifyBugsReportFilePath = BugsReportFilePath;
+ }
+ StringRef getOrigDIVerifyBugsReportFilePath() const {
+ return OrigDIVerifyBugsReportFilePath;
+ }
+
+ void setDebugifyMode(enum DebugifyMode M) { Mode = M; }
+
+ bool isSyntheticDebugInfo() const {
+ return Mode == DebugifyMode::SyntheticDebugInfo;
+ }
+ bool isOriginalDebugInfoMode() const {
+ return Mode == DebugifyMode::OriginalDebugInfo;
+ }
};
/// DebugifyCustomPassManager wraps each pass with the debugify passes if
diff --git a/llvm/include/llvm/Transforms/Utils/LoopUtils.h b/llvm/include/llvm/Transforms/Utils/LoopUtils.h
index 676c0c1487db..adb39a410b55 100644
--- a/llvm/include/llvm/Transforms/Utils/LoopUtils.h
+++ b/llvm/include/llvm/Transforms/Utils/LoopUtils.h
@@ -435,7 +435,13 @@ bool cannotBeMaxInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
bool cannotBeMinInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
bool Signed);
-enum ReplaceExitVal { NeverRepl, OnlyCheapRepl, NoHardUse, AlwaysRepl };
+enum ReplaceExitVal {
+ NeverRepl,
+ OnlyCheapRepl,
+ NoHardUse,
+ UnusedIndVarInLoop,
+ AlwaysRepl
+};
/// If the final value of any expressions that are recurrent in the loop can
/// be computed, substitute the exit values from the loop into any instructions
diff --git a/llvm/include/llvm/Transforms/Utils/ModuleUtils.h b/llvm/include/llvm/Transforms/Utils/ModuleUtils.h
index 85263fc00bc3..335cf7acc2f7 100644
--- a/llvm/include/llvm/Transforms/Utils/ModuleUtils.h
+++ b/llvm/include/llvm/Transforms/Utils/ModuleUtils.h
@@ -109,7 +109,8 @@ void filterDeadComdatFunctions(
std::string getUniqueModuleId(Module *M);
/// Embed the memory buffer \p Buf into the module \p M as a global using the
-/// specified section name.
+/// specified section name. Also provide a metadata entry to identify it in the
+/// module using the same section name.
void embedBufferInModule(Module &M, MemoryBufferRef Buf, StringRef SectionName,
Align Alignment = Align(1));
diff --git a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
index b01bd222b252..2d69c2f86642 100644
--- a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
+++ b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
@@ -219,16 +219,9 @@ public:
ExactFPMathInst = I;
}
- void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; }
-
Instruction *getExactFPInst() { return ExactFPMathInst; }
- unsigned getNumRuntimePointerChecks() const {
- return NumRuntimePointerChecks;
- }
-
private:
- unsigned NumRuntimePointerChecks = 0;
Instruction *ExactFPMathInst = nullptr;
};