aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/llvm-c/OrcBindings.h28
-rw-r--r--include/llvm/ADT/APInt.h6
-rw-r--r--include/llvm/ADT/STLExtras.h4
-rw-r--r--include/llvm/ADT/SmallPtrSet.h11
-rw-r--r--include/llvm/Analysis/BlockFrequencyInfoImpl.h2
-rw-r--r--include/llvm/Analysis/CGSCCPassManager.h17
-rw-r--r--include/llvm/Analysis/InlineCost.h2
-rw-r--r--include/llvm/Analysis/LazyCallGraph.h18
-rw-r--r--include/llvm/Analysis/MemoryBuiltins.h3
-rw-r--r--include/llvm/Analysis/RegionInfoImpl.h8
-rw-r--r--include/llvm/Analysis/TargetTransformInfo.h41
-rw-r--r--include/llvm/Analysis/TargetTransformInfoImpl.h14
-rw-r--r--include/llvm/Analysis/ValueTracking.h3
-rw-r--r--include/llvm/BinaryFormat/Wasm.h4
-rw-r--r--include/llvm/Bitcode/LLVMBitCodes.h12
-rw-r--r--include/llvm/CodeGen/AsmPrinter.h4
-rw-r--r--include/llvm/CodeGen/BasicTTIImpl.h2
-rw-r--r--include/llvm/CodeGen/GlobalISel/InstructionSelector.h158
-rw-r--r--include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h337
-rw-r--r--include/llvm/CodeGen/GlobalISel/LegalizerHelper.h8
-rw-r--r--include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h53
-rw-r--r--include/llvm/CodeGen/LiveRegUnits.h10
-rw-r--r--include/llvm/CodeGen/MachineFunction.h2
-rw-r--r--include/llvm/CodeGen/MachineMemOperand.h15
-rw-r--r--include/llvm/CodeGen/RuntimeLibcalls.h23
-rw-r--r--include/llvm/CodeGen/ScheduleDAG.h8
-rw-r--r--include/llvm/CodeGen/SelectionDAG.h4
-rw-r--r--include/llvm/CodeGen/SelectionDAGNodes.h8
-rw-r--r--include/llvm/DebugInfo/CodeView/SymbolRecord.h4
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeIndex.h21
-rw-r--r--include/llvm/DebugInfo/DIContext.h24
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFContext.h25
-rw-r--r--include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h6
-rw-r--r--include/llvm/DebugInfo/PDB/Native/DbiStream.h2
-rw-r--r--include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h12
-rw-r--r--include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h2
-rw-r--r--include/llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h49
-rw-r--r--include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h2
-rw-r--r--include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h2
-rw-r--r--include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h6
-rw-r--r--include/llvm/DebugInfo/PDB/Native/NativeSession.h7
-rw-r--r--include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h4
-rw-r--r--include/llvm/DebugInfo/PDB/Native/PDBStringTable.h1
-rw-r--r--include/llvm/DebugInfo/PDB/Native/PublicsStream.h4
-rw-r--r--include/llvm/DebugInfo/PDB/Native/PublicsStreamBuilder.h54
-rw-r--r--include/llvm/DebugInfo/PDB/Native/RawTypes.h13
-rw-r--r--include/llvm/ExecutionEngine/JITSymbol.h88
-rw-r--r--include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h177
-rw-r--r--include/llvm/ExecutionEngine/Orc/ExecutionUtils.h25
-rw-r--r--include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h36
-rw-r--r--include/llvm/ExecutionEngine/Orc/IRCompileLayer.h18
-rw-r--r--include/llvm/ExecutionEngine/Orc/IRTransformLayer.h16
-rw-r--r--include/llvm/ExecutionEngine/Orc/LambdaResolver.h2
-rw-r--r--include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h88
-rw-r--r--include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h17
-rw-r--r--include/llvm/ExecutionEngine/Orc/OrcError.h15
-rw-r--r--include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h31
-rw-r--r--include/llvm/ExecutionEngine/RuntimeDyld.h15
-rw-r--r--include/llvm/IR/Constants.h10
-rw-r--r--include/llvm/IR/IRBuilder.h20
-rw-r--r--include/llvm/IR/Instructions.h210
-rw-r--r--include/llvm/IR/IntrinsicInst.h169
-rw-r--r--include/llvm/IR/Intrinsics.td16
-rw-r--r--include/llvm/IR/LLVMContext.h28
-rw-r--r--include/llvm/IR/Module.h2
-rw-r--r--include/llvm/IR/ModuleSummaryIndex.h10
-rw-r--r--include/llvm/IR/PassManager.h31
-rw-r--r--include/llvm/IR/PatternMatch.h73
-rw-r--r--include/llvm/IR/SafepointIRVerifier.h35
-rw-r--r--include/llvm/IR/Type.h6
-rw-r--r--include/llvm/InitializePasses.h2
-rw-r--r--include/llvm/MC/MCAsmBackend.h2
-rw-r--r--include/llvm/MC/MCDisassembler/MCDisassembler.h1
-rw-r--r--include/llvm/MC/MCMachObjectWriter.h3
-rw-r--r--include/llvm/MC/MCObjectWriter.h2
-rw-r--r--include/llvm/MC/MCSymbolWasm.h14
-rw-r--r--include/llvm/Object/COFF.h3
-rw-r--r--include/llvm/Object/Wasm.h14
-rw-r--r--include/llvm/ObjectYAML/WasmYAML.h3
-rw-r--r--include/llvm/Option/OptTable.h3
-rw-r--r--include/llvm/Passes/PassBuilder.h294
-rw-r--r--include/llvm/ProfileData/InstrProf.h54
-rw-r--r--include/llvm/ProfileData/InstrProfReader.h37
-rw-r--r--include/llvm/ProfileData/InstrProfWriter.h16
-rw-r--r--include/llvm/ProfileData/ProfileCommon.h3
-rw-r--r--include/llvm/Support/BlockFrequency.h4
-rw-r--r--include/llvm/Support/Compiler.h10
-rw-r--r--include/llvm/Support/DynamicLibrary.h16
-rw-r--r--include/llvm/Support/ErrorHandling.h48
-rw-r--r--include/llvm/Support/GenericDomTreeConstruction.h169
-rw-r--r--include/llvm/Support/ReverseIteration.h17
-rw-r--r--include/llvm/Support/UnicodeCharRanges.h7
-rw-r--r--include/llvm/Target/GlobalISel/SelectionDAGCompat.td1
-rw-r--r--include/llvm/Target/TargetInstrInfo.h10
-rw-r--r--include/llvm/Target/TargetLowering.h30
-rw-r--r--include/llvm/Transforms/Scalar/GVN.h30
-rw-r--r--include/llvm/Transforms/Utils/LowerMemIntrinsics.h26
-rw-r--r--include/llvm/Transforms/Utils/SSAUpdaterImpl.h10
-rw-r--r--include/llvm/module.modulemap1
99 files changed, 2313 insertions, 698 deletions
diff --git a/include/llvm-c/OrcBindings.h b/include/llvm-c/OrcBindings.h
index d86ea8808889..7ee395431358 100644
--- a/include/llvm-c/OrcBindings.h
+++ b/include/llvm-c/OrcBindings.h
@@ -113,8 +113,9 @@ void LLVMOrcDisposeMangledSymbol(char *MangledSymbol);
/**
* Create a lazy compile callback.
*/
-LLVMOrcTargetAddress
+LLVMOrcErrorCode
LLVMOrcCreateLazyCompileCallback(LLVMOrcJITStackRef JITStack,
+ LLVMOrcTargetAddress *RetAddr,
LLVMOrcLazyCompileCallbackFn Callback,
void *CallbackCtx);
@@ -135,8 +136,9 @@ LLVMOrcErrorCode LLVMOrcSetIndirectStubPointer(LLVMOrcJITStackRef JITStack,
/**
* Add module to be eagerly compiled.
*/
-LLVMOrcModuleHandle
+LLVMOrcErrorCode
LLVMOrcAddEagerlyCompiledIR(LLVMOrcJITStackRef JITStack,
+ LLVMOrcModuleHandle *RetHandle,
LLVMSharedModuleRef Mod,
LLVMOrcSymbolResolverFn SymbolResolver,
void *SymbolResolverCtx);
@@ -144,8 +146,9 @@ LLVMOrcAddEagerlyCompiledIR(LLVMOrcJITStackRef JITStack,
/**
* Add module to be lazily compiled one function at a time.
*/
-LLVMOrcModuleHandle
+LLVMOrcErrorCode
LLVMOrcAddLazilyCompiledIR(LLVMOrcJITStackRef JITStack,
+ LLVMOrcModuleHandle *RetHandle,
LLVMSharedModuleRef Mod,
LLVMOrcSymbolResolverFn SymbolResolver,
void *SymbolResolverCtx);
@@ -153,10 +156,11 @@ LLVMOrcAddLazilyCompiledIR(LLVMOrcJITStackRef JITStack,
/**
* Add an object file.
*/
-LLVMOrcModuleHandle LLVMOrcAddObjectFile(LLVMOrcJITStackRef JITStack,
- LLVMSharedObjectBufferRef Obj,
- LLVMOrcSymbolResolverFn SymbolResolver,
- void *SymbolResolverCtx);
+LLVMOrcErrorCode LLVMOrcAddObjectFile(LLVMOrcJITStackRef JITStack,
+ LLVMOrcModuleHandle *RetHandle,
+ LLVMSharedObjectBufferRef Obj,
+ LLVMOrcSymbolResolverFn SymbolResolver,
+ void *SymbolResolverCtx);
/**
* Remove a module set from the JIT.
@@ -164,18 +168,20 @@ LLVMOrcModuleHandle LLVMOrcAddObjectFile(LLVMOrcJITStackRef JITStack,
* This works for all modules that can be added via OrcAdd*, including object
* files.
*/
-void LLVMOrcRemoveModule(LLVMOrcJITStackRef JITStack, LLVMOrcModuleHandle H);
+LLVMOrcErrorCode LLVMOrcRemoveModule(LLVMOrcJITStackRef JITStack,
+ LLVMOrcModuleHandle H);
/**
* Get symbol address from JIT instance.
*/
-LLVMOrcTargetAddress LLVMOrcGetSymbolAddress(LLVMOrcJITStackRef JITStack,
- const char *SymbolName);
+LLVMOrcErrorCode LLVMOrcGetSymbolAddress(LLVMOrcJITStackRef JITStack,
+ LLVMOrcTargetAddress *RetAddr,
+ const char *SymbolName);
/**
* Dispose of an ORC JIT stack.
*/
-void LLVMOrcDisposeInstance(LLVMOrcJITStackRef JITStack);
+LLVMOrcErrorCode LLVMOrcDisposeInstance(LLVMOrcJITStackRef JITStack);
#ifdef __cplusplus
}
diff --git a/include/llvm/ADT/APInt.h b/include/llvm/ADT/APInt.h
index e5f0c35534ac..a1cce6e5fe17 100644
--- a/include/llvm/ADT/APInt.h
+++ b/include/llvm/ADT/APInt.h
@@ -401,7 +401,11 @@ public:
/// \brief Determine if this is a value of 1.
///
/// This checks to see if the value of this APInt is one.
- bool isOneValue() const { return getActiveBits() == 1; }
+ bool isOneValue() const {
+ if (isSingleWord())
+ return U.VAL == 1;
+ return countLeadingZerosSlowCase() == BitWidth - 1;
+ }
/// \brief Determine if this is the largest unsigned value.
///
diff --git a/include/llvm/ADT/STLExtras.h b/include/llvm/ADT/STLExtras.h
index 8c28412bb607..83f289c42a23 100644
--- a/include/llvm/ADT/STLExtras.h
+++ b/include/llvm/ADT/STLExtras.h
@@ -100,6 +100,8 @@ class function_ref<Ret(Params...)> {
}
public:
+ function_ref() : callback(nullptr) {}
+
template <typename Callable>
function_ref(Callable &&callable,
typename std::enable_if<
@@ -110,6 +112,8 @@ public:
Ret operator()(Params ...params) const {
return callback(callable, std::forward<Params>(params)...);
}
+
+ operator bool() const { return callback; }
};
// deleter - Very very very simple method that is used to invoke operator
diff --git a/include/llvm/ADT/SmallPtrSet.h b/include/llvm/ADT/SmallPtrSet.h
index a2ad74b1e04a..4e8a2490ee3c 100644
--- a/include/llvm/ADT/SmallPtrSet.h
+++ b/include/llvm/ADT/SmallPtrSet.h
@@ -15,9 +15,9 @@
#ifndef LLVM_ADT_SMALLPTRSET_H
#define LLVM_ADT_SMALLPTRSET_H
-#include "llvm/Config/abi-breaking.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
+#include "llvm/Support/ReverseIteration.h"
#include "llvm/Support/type_traits.h"
#include <cassert>
#include <cstddef>
@@ -29,15 +29,6 @@
namespace llvm {
-#if LLVM_ENABLE_ABI_BREAKING_CHECKS
-template <class T = void> struct ReverseIterate { static bool value; };
-#if LLVM_ENABLE_REVERSE_ITERATION
-template <class T> bool ReverseIterate<T>::value = true;
-#else
-template <class T> bool ReverseIterate<T>::value = false;
-#endif
-#endif
-
/// SmallPtrSetImplBase - This is the common code shared among all the
/// SmallPtrSet<>'s, which is almost everything. SmallPtrSet has two modes, one
/// for small and one for large sets.
diff --git a/include/llvm/Analysis/BlockFrequencyInfoImpl.h b/include/llvm/Analysis/BlockFrequencyInfoImpl.h
index 3e05e09900a5..5de3821242e0 100644
--- a/include/llvm/Analysis/BlockFrequencyInfoImpl.h
+++ b/include/llvm/Analysis/BlockFrequencyInfoImpl.h
@@ -1353,4 +1353,4 @@ struct BFIDOTGraphTraitsBase : public DefaultDOTGraphTraits {
#undef DEBUG_TYPE
-#endif
+#endif // LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
diff --git a/include/llvm/Analysis/CGSCCPassManager.h b/include/llvm/Analysis/CGSCCPassManager.h
index a15a9e18c815..32868cbecdcf 100644
--- a/include/llvm/Analysis/CGSCCPassManager.h
+++ b/include/llvm/Analysis/CGSCCPassManager.h
@@ -577,12 +577,17 @@ public:
// analyses will eventually occur when the module pass completes.
PA.intersect(std::move(PassPA));
- // Update the call graph based on this function pass. This may also
- // update the current SCC to point to a smaller, more refined SCC.
- CurrentC = &updateCGAndAnalysisManagerForFunctionPass(
- CG, *CurrentC, *N, AM, UR, DebugLogging);
- assert(CG.lookupSCC(*N) == CurrentC &&
- "Current SCC not updated to the SCC containing the current node!");
+ // If the call graph hasn't been preserved, update it based on this
+ // function pass. This may also update the current SCC to point to
+ // a smaller, more refined SCC.
+ auto PAC = PA.getChecker<LazyCallGraphAnalysis>();
+ if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Module>>()) {
+ CurrentC = &updateCGAndAnalysisManagerForFunctionPass(
+ CG, *CurrentC, *N, AM, UR, DebugLogging);
+ assert(
+ CG.lookupSCC(*N) == CurrentC &&
+ "Current SCC not updated to the SCC containing the current node!");
+ }
}
// By definition we preserve the proxy. And we preserve all analyses on
diff --git a/include/llvm/Analysis/InlineCost.h b/include/llvm/Analysis/InlineCost.h
index ce0b7895f253..f33a2de5a5f4 100644
--- a/include/llvm/Analysis/InlineCost.h
+++ b/include/llvm/Analysis/InlineCost.h
@@ -160,7 +160,7 @@ InlineParams getInlineParams(int Threshold);
/// the -Oz flag.
InlineParams getInlineParams(unsigned OptLevel, unsigned SizeOptLevel);
-/// Return the cost associated with a callsite, including paramater passing
+/// Return the cost associated with a callsite, including parameter passing
/// and the call/return instruction.
int getCallsiteCost(CallSite CS, const DataLayout &DL);
diff --git a/include/llvm/Analysis/LazyCallGraph.h b/include/llvm/Analysis/LazyCallGraph.h
index ad7f5c80549f..3a052761ad7d 100644
--- a/include/llvm/Analysis/LazyCallGraph.h
+++ b/include/llvm/Analysis/LazyCallGraph.h
@@ -652,17 +652,23 @@ public:
/// Make an existing internal ref edge into a call edge.
///
/// This may form a larger cycle and thus collapse SCCs into TargetN's SCC.
- /// If that happens, the deleted SCC pointers are returned. These SCCs are
- /// not in a valid state any longer but the pointers will remain valid
- /// until destruction of the parent graph instance for the purpose of
- /// clearing cached information.
+ /// If that happens, the optional callback \p MergedCB will be invoked (if
+ /// provided) on the SCCs being merged away prior to actually performing
+ /// the merge. Note that this will never include the target SCC as that
+ /// will be the SCC functions are merged into to resolve the cycle. Once
+ /// this function returns, these merged SCCs are not in a valid state but
+ /// the pointers will remain valid until destruction of the parent graph
+ /// instance for the purpose of clearing cached information. This function
+ /// also returns 'true' if a cycle was formed and some SCCs merged away as
+ /// a convenience.
///
/// After this operation, both SourceN's SCC and TargetN's SCC may move
/// position within this RefSCC's postorder list. Any SCCs merged are
/// merged into the TargetN's SCC in order to preserve reachability analyses
/// which took place on that SCC.
- SmallVector<SCC *, 1> switchInternalEdgeToCall(Node &SourceN,
- Node &TargetN);
+ bool switchInternalEdgeToCall(
+ Node &SourceN, Node &TargetN,
+ function_ref<void(ArrayRef<SCC *> MergedSCCs)> MergeCB = {});
/// Make an existing internal call edge between separate SCCs into a ref
/// edge.
diff --git a/include/llvm/Analysis/MemoryBuiltins.h b/include/llvm/Analysis/MemoryBuiltins.h
index 60dafccd84bd..23ab372703ee 100644
--- a/include/llvm/Analysis/MemoryBuiltins.h
+++ b/include/llvm/Analysis/MemoryBuiltins.h
@@ -224,6 +224,9 @@ public:
SizeOffsetType visitSelectInst(SelectInst &I);
SizeOffsetType visitUndefValue(UndefValue&);
SizeOffsetType visitInstruction(Instruction &I);
+
+private:
+ bool CheckedZextOrTrunc(APInt &I);
};
typedef std::pair<Value*, Value*> SizeOffsetEvalType;
diff --git a/include/llvm/Analysis/RegionInfoImpl.h b/include/llvm/Analysis/RegionInfoImpl.h
index c0337b6daf37..cd4ec0a03a9e 100644
--- a/include/llvm/Analysis/RegionInfoImpl.h
+++ b/include/llvm/Analysis/RegionInfoImpl.h
@@ -34,10 +34,10 @@
#include <type_traits>
#include <vector>
-namespace llvm {
-
#define DEBUG_TYPE "region"
+namespace llvm {
+
//===----------------------------------------------------------------------===//
/// RegionBase Implementation
template <class Tr>
@@ -901,8 +901,8 @@ void RegionInfoBase<Tr>::calculate(FuncT &F) {
buildRegionsTree(DT->getNode(BB), TopLevelRegion);
}
-#undef DEBUG_TYPE
-
} // end namespace llvm
+#undef DEBUG_TYPE
+
#endif // LLVM_ANALYSIS_REGIONINFOIMPL_H
diff --git a/include/llvm/Analysis/TargetTransformInfo.h b/include/llvm/Analysis/TargetTransformInfo.h
index 68fbf640994c..dfb525e3de7a 100644
--- a/include/llvm/Analysis/TargetTransformInfo.h
+++ b/include/llvm/Analysis/TargetTransformInfo.h
@@ -753,6 +753,28 @@ public:
Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
Type *ExpectedType) const;
+ /// \returns The type to use in a loop expansion of a memcpy call.
+ Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
+ unsigned SrcAlign, unsigned DestAlign) const;
+
+ /// \param[out] OpsOut The operand types to copy RemainingBytes of memory.
+ /// \param RemainingBytes The number of bytes to copy.
+ ///
+ /// Calculates the operand types to use when copying \p RemainingBytes of
+ /// memory, where source and destination alignments are \p SrcAlign and
+ /// \p DestAlign respectively.
+ void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut,
+ LLVMContext &Context,
+ unsigned RemainingBytes,
+ unsigned SrcAlign,
+ unsigned DestAlign) const;
+
+ /// \returns True if we want to test the new memcpy lowering functionality in
+ /// Transform/Utils.
+ /// Temporary. Will be removed once we move to the new functionality and
+ /// remove the old.
+ bool useWideIRMemcpyLoopLowering() const;
+
/// \returns True if the two functions have compatible attributes for inlining
/// purposes.
bool areInlineCompatible(const Function *Caller,
@@ -953,6 +975,12 @@ public:
virtual unsigned getAtomicMemIntrinsicMaxElementSize() const = 0;
virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
Type *ExpectedType) = 0;
+ virtual Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
+ unsigned SrcAlign,
+ unsigned DestAlign) const = 0;
+ virtual void getMemcpyLoopResidualLoweringType(
+ SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
+ unsigned RemainingBytes, unsigned SrcAlign, unsigned DestAlign) const = 0;
virtual bool areInlineCompatible(const Function *Caller,
const Function *Callee) const = 0;
virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const = 0;
@@ -1266,6 +1294,19 @@ public:
Type *ExpectedType) override {
return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
}
+ Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
+ unsigned SrcAlign,
+ unsigned DestAlign) const override {
+ return Impl.getMemcpyLoopLoweringType(Context, Length, SrcAlign, DestAlign);
+ }
+ void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut,
+ LLVMContext &Context,
+ unsigned RemainingBytes,
+ unsigned SrcAlign,
+ unsigned DestAlign) const override {
+ Impl.getMemcpyLoopResidualLoweringType(OpsOut, Context, RemainingBytes,
+ SrcAlign, DestAlign);
+ }
bool areInlineCompatible(const Function *Caller,
const Function *Callee) const override {
return Impl.areInlineCompatible(Caller, Callee);
diff --git a/include/llvm/Analysis/TargetTransformInfoImpl.h b/include/llvm/Analysis/TargetTransformInfoImpl.h
index 0246fc1c02cc..8740ee92eed5 100644
--- a/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -444,6 +444,20 @@ public:
return nullptr;
}
+ Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
+ unsigned SrcAlign, unsigned DestAlign) const {
+ return Type::getInt8Ty(Context);
+ }
+
+ void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut,
+ LLVMContext &Context,
+ unsigned RemainingBytes,
+ unsigned SrcAlign,
+ unsigned DestAlign) const {
+ for (unsigned i = 0; i != RemainingBytes; ++i)
+ OpsOut.push_back(Type::getInt8Ty(Context));
+ }
+
bool areInlineCompatible(const Function *Caller,
const Function *Callee) const {
return (Caller->getFnAttribute("target-cpu") ==
diff --git a/include/llvm/Analysis/ValueTracking.h b/include/llvm/Analysis/ValueTracking.h
index e953ec8ab6ab..f4c57d4289fc 100644
--- a/include/llvm/Analysis/ValueTracking.h
+++ b/include/llvm/Analysis/ValueTracking.h
@@ -523,8 +523,7 @@ template <typename T> class ArrayRef;
/// (A)
Optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
const DataLayout &DL,
- bool InvertAPred = false,
- unsigned Depth = 0,
+ bool LHSIsFalse = false, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
diff --git a/include/llvm/BinaryFormat/Wasm.h b/include/llvm/BinaryFormat/Wasm.h
index eef473b20dde..23e30b7a868d 100644
--- a/include/llvm/BinaryFormat/Wasm.h
+++ b/include/llvm/BinaryFormat/Wasm.h
@@ -94,7 +94,7 @@ struct WasmFunction {
};
struct WasmDataSegment {
- uint32_t Index;
+ uint32_t MemoryIndex;
WasmInitExpr Offset;
ArrayRef<uint8_t> Content;
};
@@ -107,7 +107,7 @@ struct WasmElemSegment {
struct WasmRelocation {
uint32_t Type; // The type of the relocation.
- int32_t Index; // Index into function to global index space.
+ uint32_t Index; // Index into function to global index space.
uint64_t Offset; // Offset from the start of the section.
int64_t Addend; // A value to add to the symbol.
};
diff --git a/include/llvm/Bitcode/LLVMBitCodes.h b/include/llvm/Bitcode/LLVMBitCodes.h
index 5435e48ff424..3777f956cf27 100644
--- a/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/include/llvm/Bitcode/LLVMBitCodes.h
@@ -59,6 +59,8 @@ enum BlockIDs {
FULL_LTO_GLOBALVAL_SUMMARY_BLOCK_ID,
SYMTAB_BLOCK_ID,
+
+ SYNC_SCOPE_NAMES_BLOCK_ID,
};
/// Identification block contains a string that describes the producer details,
@@ -172,6 +174,10 @@ enum OperandBundleTagCode {
OPERAND_BUNDLE_TAG = 1, // TAG: [strchr x N]
};
+enum SyncScopeNameCode {
+ SYNC_SCOPE_NAME = 1,
+};
+
// Value symbol table codes.
enum ValueSymtabCodes {
VST_CODE_ENTRY = 1, // VST_ENTRY: [valueid, namechar x N]
@@ -404,12 +410,6 @@ enum AtomicOrderingCodes {
ORDERING_SEQCST = 6
};
-/// Encoded SynchronizationScope values.
-enum AtomicSynchScopeCodes {
- SYNCHSCOPE_SINGLETHREAD = 0,
- SYNCHSCOPE_CROSSTHREAD = 1
-};
-
/// Markers and flags for call instruction.
enum CallMarkersFlags {
CALL_TAIL = 0,
diff --git a/include/llvm/CodeGen/AsmPrinter.h b/include/llvm/CodeGen/AsmPrinter.h
index c898667f1474..60bbc9aaa5bd 100644
--- a/include/llvm/CodeGen/AsmPrinter.h
+++ b/include/llvm/CodeGen/AsmPrinter.h
@@ -608,8 +608,8 @@ private:
// Internal Implementation Details
//===------------------------------------------------------------------===//
- /// This emits visibility information about symbol, if this is suported by the
- /// target.
+ /// This emits visibility information about symbol, if this is supported by
+ /// the target.
void EmitVisibility(MCSymbol *Sym, unsigned Visibility,
bool IsDefinition = true) const;
diff --git a/include/llvm/CodeGen/BasicTTIImpl.h b/include/llvm/CodeGen/BasicTTIImpl.h
index a740df96899d..b59fd60e8aed 100644
--- a/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/include/llvm/CodeGen/BasicTTIImpl.h
@@ -428,7 +428,7 @@ public:
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
- bool IsFloat = Ty->getScalarType()->isFloatingPointTy();
+ bool IsFloat = Ty->isFPOrFPVectorTy();
// Assume that floating point arithmetic operations cost twice as much as
// integer operations.
unsigned OpCost = (IsFloat ? 2 : 1);
diff --git a/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
index ec60123e54b1..59a4073646eb 100644
--- a/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
+++ b/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
@@ -16,14 +16,17 @@
#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
+#include "llvm/ADT/SmallVector.h"
#include <bitset>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <initializer_list>
+#include <vector>
namespace llvm {
+class LLT;
class MachineInstr;
class MachineInstrBuilder;
class MachineOperand;
@@ -58,6 +61,131 @@ public:
}
};
+enum {
+ /// Record the specified instruction
+ /// - NewInsnID - Instruction ID to define
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ GIM_RecordInsn,
+
+ /// Check the feature bits
+ /// - Expected features
+ GIM_CheckFeatures,
+
+ /// Check the opcode on the specified instruction
+ /// - InsnID - Instruction ID
+ /// - Expected opcode
+ GIM_CheckOpcode,
+ /// Check the instruction has the right number of operands
+ /// - InsnID - Instruction ID
+ /// - Expected number of operands
+ GIM_CheckNumOperands,
+
+ /// Check the type for the specified operand
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - Expected type
+ GIM_CheckType,
+ /// Check the register bank for the specified operand
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - Expected register bank (specified as a register class)
+ GIM_CheckRegBankForClass,
+ /// Check the operand matches a complex predicate
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - RendererID - The renderer to hold the result
+ /// - Complex predicate ID
+ GIM_CheckComplexPattern,
+ /// Check the operand is a specific integer
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - Expected integer
+ GIM_CheckConstantInt,
+ /// Check the operand is a specific literal integer (i.e. MO.isImm() or MO.isCImm() is true).
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - Expected integer
+ GIM_CheckLiteralInt,
+ /// Check the operand is a specific intrinsic ID
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - Expected Intrinsic ID
+ GIM_CheckIntrinsicID,
+ /// Check the specified operand is an MBB
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ GIM_CheckIsMBB,
+
+ /// Check if the specified operand is safe to fold into the current
+ /// instruction.
+ /// - InsnID - Instruction ID
+ GIM_CheckIsSafeToFold,
+
+ //=== Renderers ===
+
+ /// Mutate an instruction
+ /// - NewInsnID - Instruction ID to define
+ /// - OldInsnID - Instruction ID to mutate
+ /// - NewOpcode - The new opcode to use
+ GIR_MutateOpcode,
+ /// Build a new instruction
+ /// - InsnID - Instruction ID to define
+ /// - Opcode - The new opcode to use
+ GIR_BuildMI,
+
+ /// Copy an operand to the specified instruction
+ /// - NewInsnID - Instruction ID to modify
+ /// - OldInsnID - Instruction ID to copy from
+ /// - OpIdx - The operand to copy
+ GIR_Copy,
+ /// Copy an operand to the specified instruction
+ /// - NewInsnID - Instruction ID to modify
+ /// - OldInsnID - Instruction ID to copy from
+ /// - OpIdx - The operand to copy
+ /// - SubRegIdx - The subregister to copy
+ GIR_CopySubReg,
+ /// Add an implicit register def to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - RegNum - The register to add
+ GIR_AddImplicitDef,
+ /// Add an implicit register use to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - RegNum - The register to add
+ GIR_AddImplicitUse,
+ /// Add an register to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - RegNum - The register to add
+ GIR_AddRegister,
+ /// Add an immediate to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - Imm - The immediate to add
+ GIR_AddImm,
+ /// Render complex operands to the specified instruction
+ /// - InsnID - Instruction ID to modify
+ /// - RendererID - The renderer to call
+ GIR_ComplexRenderer,
+
+ /// Constrain an instruction operand to a register class.
+ /// - InsnID - Instruction ID to modify
+ /// - OpIdx - Operand index
+ /// - RCEnum - Register class enumeration value
+ GIR_ConstrainOperandRC,
+ /// Constrain an instructions operands according to the instruction
+ /// description.
+ /// - InsnID - Instruction ID to modify
+ GIR_ConstrainSelectedInstOperands,
+ /// Merge all memory operands into instruction.
+ /// - InsnID - Instruction ID to modify
+ GIR_MergeMemOperands,
+ /// Erase from parent.
+ /// - InsnID - Instruction ID to erase
+ GIR_EraseFromParent,
+
+ /// A successful emission
+ GIR_Done,
+};
+
/// Provides the logic to select generic machine instructions.
class InstructionSelector {
public:
@@ -78,9 +206,39 @@ public:
protected:
using ComplexRendererFn = std::function<void(MachineInstrBuilder &)>;
+ using RecordedMIVector = SmallVector<MachineInstr *, 4>;
+ using NewMIVector = SmallVector<MachineInstrBuilder, 4>;
+
+ struct MatcherState {
+ std::vector<ComplexRendererFn> Renderers;
+ RecordedMIVector MIs;
+
+ MatcherState(unsigned MaxRenderers);
+ };
+public:
+ template <class PredicateBitset, class ComplexMatcherMemFn>
+ struct MatcherInfoTy {
+ const LLT *TypeObjects;
+ const PredicateBitset *FeatureBitsets;
+ const std::vector<ComplexMatcherMemFn> ComplexPredicates;
+ };
+
+protected:
InstructionSelector();
+ /// Execute a given matcher table and return true if the match was successful
+ /// and false otherwise.
+ template <class TgtInstructionSelector, class PredicateBitset,
+ class ComplexMatcherMemFn>
+ bool executeMatchTable(
+ TgtInstructionSelector &ISel, NewMIVector &OutMIs, MatcherState &State,
+ const MatcherInfoTy<PredicateBitset, ComplexMatcherMemFn> &MatcherInfo,
+ const int64_t *MatchTable, const TargetInstrInfo &TII,
+ MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
+ const RegisterBankInfo &RBI,
+ const PredicateBitset &AvailableFeatures) const;
+
/// Constrain a register operand of an instruction \p I to a specified
/// register class. This could involve inserting COPYs before (for uses) or
/// after (for defs) and may replace the operand of \p I.
diff --git a/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
new file mode 100644
index 000000000000..98b6b859b9e2
--- /dev/null
+++ b/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
@@ -0,0 +1,337 @@
+//==-- llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h ---------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file declares the API for the instruction selector.
+/// This class is responsible for selecting machine instructions.
+/// It's implemented by the target. It's used by the InstructionSelect pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
+#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
+
+namespace llvm {
+template <class TgtInstructionSelector, class PredicateBitset,
+ class ComplexMatcherMemFn>
+bool InstructionSelector::executeMatchTable(
+ TgtInstructionSelector &ISel, NewMIVector &OutMIs, MatcherState &State,
+ const MatcherInfoTy<PredicateBitset, ComplexMatcherMemFn> &MatcherInfo,
+ const int64_t *MatchTable, const TargetInstrInfo &TII,
+ MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
+ const RegisterBankInfo &RBI,
+ const PredicateBitset &AvailableFeatures) const {
+ const int64_t *Command = MatchTable;
+ while (true) {
+ switch (*Command++) {
+ case GIM_RecordInsn: {
+ int64_t NewInsnID = *Command++;
+ int64_t InsnID = *Command++;
+ int64_t OpIdx = *Command++;
+
+ // As an optimisation we require that MIs[0] is always the root. Refuse
+ // any attempt to modify it.
+ assert(NewInsnID != 0 && "Refusing to modify MIs[0]");
+ (void)NewInsnID;
+
+ MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!MO.isReg()) {
+ DEBUG(dbgs() << "Rejected (not a register)\n");
+ return false;
+ }
+ if (TRI.isPhysicalRegister(MO.getReg())) {
+ DEBUG(dbgs() << "Rejected (is a physical register)\n");
+ return false;
+ }
+
+ assert((size_t)NewInsnID == State.MIs.size() &&
+ "Expected to store MIs in order");
+ State.MIs.push_back(MRI.getVRegDef(MO.getReg()));
+ DEBUG(dbgs() << "MIs[" << NewInsnID << "] = GIM_RecordInsn(" << InsnID
+ << ", " << OpIdx << ")\n");
+ break;
+ }
+
+ case GIM_CheckFeatures: {
+ int64_t ExpectedBitsetID = *Command++;
+ DEBUG(dbgs() << "GIM_CheckFeatures(ExpectedBitsetID=" << ExpectedBitsetID
+ << ")\n");
+ if ((AvailableFeatures & MatcherInfo.FeatureBitsets[ExpectedBitsetID]) !=
+ MatcherInfo.FeatureBitsets[ExpectedBitsetID]) {
+ DEBUG(dbgs() << "Rejected\n");
+ return false;
+ }
+ break;
+ }
+
+ case GIM_CheckOpcode: {
+ int64_t InsnID = *Command++;
+ int64_t Expected = *Command++;
+
+ unsigned Opcode = State.MIs[InsnID]->getOpcode();
+ DEBUG(dbgs() << "GIM_CheckOpcode(MIs[" << InsnID << "], ExpectedOpcode="
+ << Expected << ") // Got=" << Opcode << "\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (Opcode != Expected)
+ return false;
+ break;
+ }
+ case GIM_CheckNumOperands: {
+ int64_t InsnID = *Command++;
+ int64_t Expected = *Command++;
+ DEBUG(dbgs() << "GIM_CheckNumOperands(MIs[" << InsnID
+ << "], Expected=" << Expected << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (State.MIs[InsnID]->getNumOperands() != Expected)
+ return false;
+ break;
+ }
+
+ case GIM_CheckType: {
+ int64_t InsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ int64_t TypeID = *Command++;
+ DEBUG(dbgs() << "GIM_CheckType(MIs[" << InsnID << "]->getOperand("
+ << OpIdx << "), TypeID=" << TypeID << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (MRI.getType(State.MIs[InsnID]->getOperand(OpIdx).getReg()) !=
+ MatcherInfo.TypeObjects[TypeID])
+ return false;
+ break;
+ }
+ case GIM_CheckRegBankForClass: {
+ int64_t InsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ int64_t RCEnum = *Command++;
+ DEBUG(dbgs() << "GIM_CheckRegBankForClass(MIs[" << InsnID
+ << "]->getOperand(" << OpIdx << "), RCEnum=" << RCEnum
+ << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (&RBI.getRegBankFromRegClass(*TRI.getRegClass(RCEnum)) !=
+ RBI.getRegBank(State.MIs[InsnID]->getOperand(OpIdx).getReg(), MRI, TRI))
+ return false;
+ break;
+ }
+ case GIM_CheckComplexPattern: {
+ int64_t InsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ int64_t RendererID = *Command++;
+ int64_t ComplexPredicateID = *Command++;
+ DEBUG(dbgs() << "State.Renderers[" << RendererID
+ << "] = GIM_CheckComplexPattern(MIs[" << InsnID
+ << "]->getOperand(" << OpIdx
+ << "), ComplexPredicateID=" << ComplexPredicateID << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ // FIXME: Use std::invoke() when it's available.
+ if (!(State.Renderers[RendererID] =
+ (ISel.*MatcherInfo.ComplexPredicates[ComplexPredicateID])(
+ State.MIs[InsnID]->getOperand(OpIdx))))
+ return false;
+ break;
+ }
+ case GIM_CheckConstantInt: {
+ int64_t InsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ int64_t Value = *Command++;
+ DEBUG(dbgs() << "GIM_CheckConstantInt(MIs[" << InsnID << "]->getOperand("
+ << OpIdx << "), Value=" << Value << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (!isOperandImmEqual(State.MIs[InsnID]->getOperand(OpIdx), Value, MRI))
+ return false;
+ break;
+ }
+ case GIM_CheckLiteralInt: {
+ int64_t InsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ int64_t Value = *Command++;
+ DEBUG(dbgs() << "GIM_CheckLiteralInt(MIs[" << InsnID << "]->getOperand(" << OpIdx
+ << "), Value=" << Value << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &OM = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!OM.isCImm() || !OM.getCImm()->equalsInt(Value))
+ return false;
+ break;
+ }
+ case GIM_CheckIntrinsicID: {
+ int64_t InsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ int64_t Value = *Command++;
+ DEBUG(dbgs() << "GIM_CheckIntrinsicID(MIs[" << InsnID << "]->getOperand(" << OpIdx
+ << "), Value=" << Value << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ MachineOperand &OM = State.MIs[InsnID]->getOperand(OpIdx);
+ if (!OM.isIntrinsicID() || OM.getIntrinsicID() != Value)
+ return false;
+ break;
+ }
+ case GIM_CheckIsMBB: {
+ int64_t InsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ DEBUG(dbgs() << "GIM_CheckIsMBB(MIs[" << InsnID << "]->getOperand("
+ << OpIdx << "))\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (!State.MIs[InsnID]->getOperand(OpIdx).isMBB())
+ return false;
+ break;
+ }
+
+ case GIM_CheckIsSafeToFold: {
+ int64_t InsnID = *Command++;
+ DEBUG(dbgs() << "GIM_CheckIsSafeToFold(MIs[" << InsnID << "])\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ if (!isObviouslySafeToFold(*State.MIs[InsnID]))
+ return false;
+ break;
+ }
+
+ case GIR_MutateOpcode: {
+ int64_t OldInsnID = *Command++;
+ int64_t NewInsnID = *Command++;
+ int64_t NewOpcode = *Command++;
+ assert((size_t)NewInsnID == OutMIs.size() &&
+ "Expected to store MIs in order");
+ OutMIs.push_back(
+ MachineInstrBuilder(*State.MIs[OldInsnID]->getParent()->getParent(),
+ State.MIs[OldInsnID]));
+ OutMIs[NewInsnID]->setDesc(TII.get(NewOpcode));
+ DEBUG(dbgs() << "GIR_MutateOpcode(OutMIs[" << NewInsnID << "], MIs["
+ << OldInsnID << "], " << NewOpcode << ")\n");
+ break;
+ }
+ case GIR_BuildMI: {
+ int64_t InsnID = *Command++;
+ int64_t Opcode = *Command++;
+ assert((size_t)InsnID == OutMIs.size() &&
+ "Expected to store MIs in order");
+ (void)InsnID;
+ OutMIs.push_back(BuildMI(*State.MIs[0]->getParent(), State.MIs[0],
+ State.MIs[0]->getDebugLoc(), TII.get(Opcode)));
+ DEBUG(dbgs() << "GIR_BuildMI(OutMIs[" << InsnID << "], " << Opcode
+ << ")\n");
+ break;
+ }
+
+ case GIR_Copy: {
+ int64_t NewInsnID = *Command++;
+ int64_t OldInsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+ OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(OpIdx));
+ DEBUG(dbgs() << "GIR_Copy(OutMIs[" << NewInsnID << "], MIs[" << OldInsnID
+ << "], " << OpIdx << ")\n");
+ break;
+ }
+ case GIR_CopySubReg: {
+ int64_t NewInsnID = *Command++;
+ int64_t OldInsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ int64_t SubRegIdx = *Command++;
+ assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
+ OutMIs[NewInsnID].addReg(State.MIs[OldInsnID]->getOperand(OpIdx).getReg(),
+ 0, SubRegIdx);
+ DEBUG(dbgs() << "GIR_CopySubReg(OutMIs[" << NewInsnID << "], MIs["
+ << OldInsnID << "], " << OpIdx << ", " << SubRegIdx
+ << ")\n");
+ break;
+ }
+ case GIR_AddImplicitDef: {
+ int64_t InsnID = *Command++;
+ int64_t RegNum = *Command++;
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ OutMIs[InsnID].addDef(RegNum, RegState::Implicit);
+ DEBUG(dbgs() << "GIR_AddImplicitDef(OutMIs[" << InsnID << "], " << RegNum
+ << ")\n");
+ break;
+ }
+ case GIR_AddImplicitUse: {
+ int64_t InsnID = *Command++;
+ int64_t RegNum = *Command++;
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ OutMIs[InsnID].addUse(RegNum, RegState::Implicit);
+ DEBUG(dbgs() << "GIR_AddImplicitUse(OutMIs[" << InsnID << "], " << RegNum
+ << ")\n");
+ break;
+ }
+ case GIR_AddRegister: {
+ int64_t InsnID = *Command++;
+ int64_t RegNum = *Command++;
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ OutMIs[InsnID].addReg(RegNum);
+ DEBUG(dbgs() << "GIR_AddRegister(OutMIs[" << InsnID << "], " << RegNum
+ << ")\n");
+ break;
+ }
+ case GIR_AddImm: {
+ int64_t InsnID = *Command++;
+ int64_t Imm = *Command++;
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ OutMIs[InsnID].addImm(Imm);
+ DEBUG(dbgs() << "GIR_AddImm(OutMIs[" << InsnID << "], " << Imm << ")\n");
+ break;
+ }
+ case GIR_ComplexRenderer: {
+ int64_t InsnID = *Command++;
+ int64_t RendererID = *Command++;
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ State.Renderers[RendererID](OutMIs[InsnID]);
+ DEBUG(dbgs() << "GIR_ComplexRenderer(OutMIs[" << InsnID << "], "
+ << RendererID << ")\n");
+ break;
+ }
+
+ case GIR_ConstrainOperandRC: {
+ int64_t InsnID = *Command++;
+ int64_t OpIdx = *Command++;
+ int64_t RCEnum = *Command++;
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ constrainOperandRegToRegClass(*OutMIs[InsnID].getInstr(), OpIdx,
+ *TRI.getRegClass(RCEnum), TII, TRI, RBI);
+ DEBUG(dbgs() << "GIR_ConstrainOperandRC(OutMIs[" << InsnID << "], "
+ << OpIdx << ", " << RCEnum << ")\n");
+ break;
+ }
+ case GIR_ConstrainSelectedInstOperands: {
+ int64_t InsnID = *Command++;
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ constrainSelectedInstRegOperands(*OutMIs[InsnID].getInstr(), TII, TRI,
+ RBI);
+ DEBUG(dbgs() << "GIR_ConstrainSelectedInstOperands(OutMIs[" << InsnID
+ << "])\n");
+ break;
+ }
+ case GIR_MergeMemOperands: {
+ int64_t InsnID = *Command++;
+ assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
+ for (const auto *FromMI : State.MIs)
+ for (const auto &MMO : FromMI->memoperands())
+ OutMIs[InsnID].addMemOperand(MMO);
+ DEBUG(dbgs() << "GIR_MergeMemOperands(OutMIs[" << InsnID << "])\n");
+ break;
+ }
+ case GIR_EraseFromParent: {
+ int64_t InsnID = *Command++;
+ assert(State.MIs[InsnID] &&
+ "Attempted to erase an undefined instruction");
+ State.MIs[InsnID]->eraseFromParent();
+ DEBUG(dbgs() << "GIR_EraseFromParent(MIs[" << InsnID << "])\n");
+ break;
+ }
+
+ case GIR_Done:
+ DEBUG(dbgs() << "GIR_Done");
+ return true;
+
+ default:
+ llvm_unreachable("Unexpected command");
+ }
+ }
+}
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
diff --git a/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 5197ba869c0a..1fd45b52e3ac 100644
--- a/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -101,11 +101,11 @@ private:
const LegalizerInfo &LI;
};
-/// Helper function that replaces \p MI with a libcall.
+/// Helper function that creates the given libcall.
LegalizerHelper::LegalizeResult
-replaceWithLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder,
- RTLIB::Libcall Libcall, const CallLowering::ArgInfo &Result,
- ArrayRef<CallLowering::ArgInfo> Args);
+createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
+ const CallLowering::ArgInfo &Result,
+ ArrayRef<CallLowering::ArgInfo> Args);
} // End namespace llvm.
diff --git a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index c9327d50432e..85e6fef1f3c2 100644
--- a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -19,6 +19,7 @@
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugLoc.h"
@@ -59,6 +60,21 @@ class MachineIRBuilder {
}
void validateTruncExt(unsigned Dst, unsigned Src, bool IsExtend);
+ MachineInstrBuilder buildBinaryOp(unsigned Opcode, unsigned Res, unsigned Op0, unsigned Op1);
+
+ unsigned getDestFromArg(unsigned Reg) { return Reg; }
+ unsigned getDestFromArg(LLT Ty) {
+ return getMF().getRegInfo().createGenericVirtualRegister(Ty);
+ }
+ unsigned getDestFromArg(const TargetRegisterClass *RC) {
+ return getMF().getRegInfo().createVirtualRegister(RC);
+ }
+
+ unsigned getRegFromArg(unsigned Reg) { return Reg; }
+
+ unsigned getRegFromArg(const MachineInstrBuilder &MIB) {
+ return MIB->getOperand(0).getReg();
+ }
public:
/// Getter for the function we currently build.
@@ -120,6 +136,22 @@ public:
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildInstr(unsigned Opcode);
+ /// DAG like Generic method for building arbitrary instructions as above.
+ /// \Opc opcode for the instruction.
+ /// \Ty Either LLT/TargetRegisterClass/unsigned types for Dst
+ /// \Args Variadic list of uses of types(unsigned/MachineInstrBuilder)
+ /// Uses of type MachineInstrBuilder will perform
+ /// getOperand(0).getReg() to convert to register.
+ template <typename DstTy, typename... UseArgsTy>
+ MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty,
+ UseArgsTy &&... Args) {
+ auto MIB = buildInstr(Opc).addDef(getDestFromArg(Ty));
+ unsigned It[] = {(getRegFromArg(Args))...};
+ for (const auto &i : It)
+ MIB.addUse(i);
+ return MIB;
+ }
+
/// Build but don't insert <empty> = \p Opcode <empty>.
///
/// \pre setMF, setBasicBlock or setMI must have been called.
@@ -188,6 +220,11 @@ public:
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildAdd(unsigned Res, unsigned Op0,
unsigned Op1);
+ template <typename DstTy, typename... UseArgsTy>
+ MachineInstrBuilder buildAdd(DstTy &&Ty, UseArgsTy &&... UseArgs) {
+ unsigned Res = getDestFromArg(Ty);
+ return buildAdd(Res, (getRegFromArg(UseArgs))...);
+ }
/// Build and insert \p Res<def> = G_SUB \p Op0, \p Op1
///
@@ -295,6 +332,18 @@ public:
MachineInstrBuilder buildAnd(unsigned Res, unsigned Op0,
unsigned Op1);
+ /// Build and insert \p Res<def> = G_OR \p Op0, \p Op1
+ ///
+ /// G_OR sets \p Res to the bitwise or of integer parameters \p Op0 and \p
+ /// Op1.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
+ /// with the same (scalar or vector) type).
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildOr(unsigned Res, unsigned Op0, unsigned Op1);
+
/// Build and insert \p Res<def> = G_ANYEXT \p Op0
///
/// G_ANYEXT produces a register of the specified width, with bits 0 to
@@ -416,6 +465,10 @@ public:
/// \return The newly created instruction.
MachineInstrBuilder buildConstant(unsigned Res, int64_t Val);
+ template <typename DstType>
+ MachineInstrBuilder buildConstant(DstType &&Res, int64_t Val) {
+ return buildConstant(getDestFromArg(Res), Val);
+ }
/// Build and insert \p Res = G_FCONSTANT \p Val
///
/// G_FCONSTANT is a floating-point constant with the specified size and
diff --git a/include/llvm/CodeGen/LiveRegUnits.h b/include/llvm/CodeGen/LiveRegUnits.h
index fa1ec867ea3d..c28b1a06854f 100644
--- a/include/llvm/CodeGen/LiveRegUnits.h
+++ b/include/llvm/CodeGen/LiveRegUnits.h
@@ -93,12 +93,14 @@ public:
}
/// Updates liveness when stepping backwards over the instruction \p MI.
+ /// This removes all register units defined or clobbered in \p MI and then
+ /// adds the units used (as in use operands) in \p MI.
void stepBackward(const MachineInstr &MI);
- /// Mark all register units live during instruction \p MI.
- /// This can be used to accumulate live/unoccupied registers over a range of
- /// instructions.
- void accumulateBackward(const MachineInstr &MI);
+ /// Adds all register units used, defined or clobbered in \p MI.
+ /// This is useful when walking over a range of instruction to find registers
+ /// unused over the whole range.
+ void accumulate(const MachineInstr &MI);
/// Adds registers living out of block \p MBB.
/// Live out registers are the union of the live-in registers of the successor
diff --git a/include/llvm/CodeGen/MachineFunction.h b/include/llvm/CodeGen/MachineFunction.h
index f67da7b01c54..19173fa39bdc 100644
--- a/include/llvm/CodeGen/MachineFunction.h
+++ b/include/llvm/CodeGen/MachineFunction.h
@@ -650,7 +650,7 @@ public:
MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
unsigned base_alignment, const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr,
- SynchronizationScope SynchScope = CrossThread,
+ SyncScope::ID SSID = SyncScope::System,
AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
diff --git a/include/llvm/CodeGen/MachineMemOperand.h b/include/llvm/CodeGen/MachineMemOperand.h
index 78adce507b8c..a9de0db05d72 100644
--- a/include/llvm/CodeGen/MachineMemOperand.h
+++ b/include/llvm/CodeGen/MachineMemOperand.h
@@ -114,6 +114,9 @@ public:
MOInvariant = 1u << 5,
// Reserved for use by target-specific passes.
+ // Targets may override getSerializableMachineMemOperandTargetFlags() to
+ // enable MIR serialization/parsing of these flags. If more of these flags
+ // are added, the MIR printing/parsing code will need to be updated as well.
MOTargetFlag1 = 1u << 6,
MOTargetFlag2 = 1u << 7,
MOTargetFlag3 = 1u << 8,
@@ -124,8 +127,8 @@ public:
private:
/// Atomic information for this memory operation.
struct MachineAtomicInfo {
- /// Synchronization scope for this memory operation.
- unsigned SynchScope : 1; // enum SynchronizationScope
+ /// Synchronization scope ID for this memory operation.
+ unsigned SSID : 8; // SyncScope::ID
/// Atomic ordering requirements for this memory operation. For cmpxchg
/// atomic operations, atomic ordering requirements when store occurs.
unsigned Ordering : 4; // enum AtomicOrdering
@@ -152,7 +155,7 @@ public:
unsigned base_alignment,
const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr,
- SynchronizationScope SynchScope = CrossThread,
+ SyncScope::ID SSID = SyncScope::System,
AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
@@ -202,9 +205,9 @@ public:
/// Return the range tag for the memory reference.
const MDNode *getRanges() const { return Ranges; }
- /// Return the synchronization scope for this memory operation.
- SynchronizationScope getSynchScope() const {
- return static_cast<SynchronizationScope>(AtomicInfo.SynchScope);
+ /// Returns the synchronization scope ID for this memory operation.
+ SyncScope::ID getSyncScopeID() const {
+ return static_cast<SyncScope::ID>(AtomicInfo.SSID);
}
/// Return the atomic ordering requirements for this memory operation. For
diff --git a/include/llvm/CodeGen/RuntimeLibcalls.h b/include/llvm/CodeGen/RuntimeLibcalls.h
index 8c3aacaa8efc..08151be11083 100644
--- a/include/llvm/CodeGen/RuntimeLibcalls.h
+++ b/include/llvm/CodeGen/RuntimeLibcalls.h
@@ -340,6 +340,18 @@ namespace RTLIB {
MEMCPY_ELEMENT_UNORDERED_ATOMIC_8,
MEMCPY_ELEMENT_UNORDERED_ATOMIC_16,
+ MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1,
+ MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2,
+ MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4,
+ MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8,
+ MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16,
+
+ MEMSET_ELEMENT_UNORDERED_ATOMIC_1,
+ MEMSET_ELEMENT_UNORDERED_ATOMIC_2,
+ MEMSET_ELEMENT_UNORDERED_ATOMIC_4,
+ MEMSET_ELEMENT_UNORDERED_ATOMIC_8,
+ MEMSET_ELEMENT_UNORDERED_ATOMIC_16,
+
// EXCEPTION HANDLING
UNWIND_RESUME,
@@ -515,6 +527,17 @@ namespace RTLIB {
/// MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
/// UNKNOW_LIBCALL if there is none.
Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);
+
+ /// getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return
+ /// MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
+ /// UNKNOW_LIBCALL if there is none.
+ Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);
+
+ /// getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return
+ /// MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
+ /// UNKNOW_LIBCALL if there is none.
+ Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);
+
}
}
diff --git a/include/llvm/CodeGen/ScheduleDAG.h b/include/llvm/CodeGen/ScheduleDAG.h
index 4d72eda5c71a..25afc5b506df 100644
--- a/include/llvm/CodeGen/ScheduleDAG.h
+++ b/include/llvm/CodeGen/ScheduleDAG.h
@@ -235,6 +235,9 @@ class TargetRegisterInfo;
"SDep::Output edge cannot use the zero register!");
Contents.Reg = Reg;
}
+
+ raw_ostream &print(raw_ostream &O,
+ const TargetRegisterInfo *TRI = nullptr) const;
};
template <>
@@ -458,7 +461,10 @@ class TargetRegisterInfo;
void dump(const ScheduleDAG *G) const;
void dumpAll(const ScheduleDAG *G) const;
- void print(raw_ostream &O, const ScheduleDAG *G) const;
+ raw_ostream &print(raw_ostream &O,
+ const SUnit *N = nullptr,
+ const SUnit *X = nullptr) const;
+ raw_ostream &print(raw_ostream &O, const ScheduleDAG *G) const;
private:
void ComputeDepth();
diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h
index f3f3003b7e20..55a23c3cca9b 100644
--- a/include/llvm/CodeGen/SelectionDAG.h
+++ b/include/llvm/CodeGen/SelectionDAG.h
@@ -927,7 +927,7 @@ public:
SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
unsigned Alignment, AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope);
+ SyncScope::ID SSID);
SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT,
SDVTList VTs, SDValue Chain, SDValue Ptr,
SDValue Cmp, SDValue Swp, MachineMemOperand *MMO);
@@ -937,7 +937,7 @@ public:
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
SDValue Ptr, SDValue Val, const Value *PtrVal,
unsigned Alignment, AtomicOrdering Ordering,
- SynchronizationScope SynchScope);
+ SyncScope::ID SSID);
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
SDValue Ptr, SDValue Val, MachineMemOperand *MMO);
diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h
index d9f8af0e21d1..db42fb6c170c 100644
--- a/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -1213,8 +1213,8 @@ public:
/// Returns the Ranges that describes the dereference.
const MDNode *getRanges() const { return MMO->getRanges(); }
- /// Return the synchronization scope for this memory operation.
- SynchronizationScope getSynchScope() const { return MMO->getSynchScope(); }
+ /// Returns the synchronization scope ID for this memory operation.
+ SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
/// Return the atomic ordering requirements for this memory operation. For
/// cmpxchg atomic operations, return the atomic ordering requirements when
@@ -1432,8 +1432,8 @@ public:
int64_t getSExtValue() const { return Value->getSExtValue(); }
bool isOne() const { return Value->isOne(); }
- bool isNullValue() const { return Value->isNullValue(); }
- bool isAllOnesValue() const { return Value->isAllOnesValue(); }
+ bool isNullValue() const { return Value->isZero(); }
+ bool isAllOnesValue() const { return Value->isMinusOne(); }
bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
diff --git a/include/llvm/DebugInfo/CodeView/SymbolRecord.h b/include/llvm/DebugInfo/CodeView/SymbolRecord.h
index 7941af8be8af..cdfc1745cea5 100644
--- a/include/llvm/DebugInfo/CodeView/SymbolRecord.h
+++ b/include/llvm/DebugInfo/CodeView/SymbolRecord.h
@@ -735,6 +735,10 @@ public:
uint16_t VersionBackendQFE;
StringRef Version;
+ void setLanguage(SourceLanguage Lang) {
+ Flags = CompileSym3Flags((uint32_t(Flags) & 0xFFFFFF00) | uint32_t(Lang));
+ }
+
uint8_t getLanguage() const { return static_cast<uint32_t>(Flags) & 0xFF; }
uint32_t getFlags() const { return static_cast<uint32_t>(Flags) & ~0xFF; }
diff --git a/include/llvm/DebugInfo/CodeView/TypeIndex.h b/include/llvm/DebugInfo/CodeView/TypeIndex.h
index 10d51c2d6244..e0c2226bdbd7 100644
--- a/include/llvm/DebugInfo/CodeView/TypeIndex.h
+++ b/include/llvm/DebugInfo/CodeView/TypeIndex.h
@@ -10,9 +10,11 @@
#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEINDEX_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPEINDEX_H
+#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/Endian.h"
#include <cassert>
#include <cinttypes>
+#include <functional>
namespace llvm {
@@ -265,6 +267,23 @@ struct TypeIndexOffset {
void printTypeIndex(ScopedPrinter &Printer, StringRef FieldName, TypeIndex TI,
TypeCollection &Types);
}
-}
+
+template <> struct DenseMapInfo<codeview::TypeIndex> {
+ static inline codeview::TypeIndex getEmptyKey() {
+ return codeview::TypeIndex{DenseMapInfo<uint32_t>::getEmptyKey()};
+ }
+ static inline codeview::TypeIndex getTombstoneKey() {
+ return codeview::TypeIndex{DenseMapInfo<uint32_t>::getTombstoneKey()};
+ }
+ static unsigned getHashValue(const codeview::TypeIndex &TI) {
+ return DenseMapInfo<uint32_t>::getHashValue(TI.getIndex());
+ }
+ static bool isEqual(const codeview::TypeIndex &LHS,
+ const codeview::TypeIndex &RHS) {
+ return LHS == RHS;
+ }
+};
+
+} // namespace llvm
#endif
diff --git a/include/llvm/DebugInfo/DIContext.h b/include/llvm/DebugInfo/DIContext.h
index 4126e245ff13..936813dc6abc 100644
--- a/include/llvm/DebugInfo/DIContext.h
+++ b/include/llvm/DebugInfo/DIContext.h
@@ -204,7 +204,9 @@ public:
/// need to be consistent with the addresses used to query the DIContext and
/// the output of this function should be deterministic, i.e. repeated calls with
/// the same Sec should give the same address.
- virtual uint64_t getSectionLoadAddress(const object::SectionRef &Sec) const = 0;
+ virtual uint64_t getSectionLoadAddress(const object::SectionRef &Sec) const {
+ return 0;
+ }
/// If conveniently available, return the content of the given Section.
///
@@ -221,12 +223,28 @@ public:
return false;
}
+ // FIXME: This is untested and unused anywhere in the LLVM project, it's
+ // used/needed by Julia (an external project). It should have some coverage
+ // (at least tests, but ideally example functionality).
/// Obtain a copy of this LoadedObjectInfo.
- ///
- /// The caller is responsible for deallocation once the copy is no longer required.
virtual std::unique_ptr<LoadedObjectInfo> clone() const = 0;
};
+template <typename Derived, typename Base = LoadedObjectInfo>
+struct LoadedObjectInfoHelper : Base {
+protected:
+ LoadedObjectInfoHelper(const LoadedObjectInfoHelper &) = default;
+ LoadedObjectInfoHelper() = default;
+
+public:
+ template <typename... Ts>
+ LoadedObjectInfoHelper(Ts &&... Args) : Base(std::forward<Ts>(Args)...) {}
+
+ std::unique_ptr<llvm::LoadedObjectInfo> clone() const override {
+ return llvm::make_unique<Derived>(static_cast<const Derived &>(*this));
+ }
+};
+
} // end namespace llvm
#endif // LLVM_DEBUGINFO_DICONTEXT_H
diff --git a/include/llvm/DebugInfo/DWARF/DWARFContext.h b/include/llvm/DebugInfo/DWARF/DWARFContext.h
index 739aa1f9ee74..ee2e805050c0 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFContext.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFContext.h
@@ -226,11 +226,7 @@ public:
virtual bool isLittleEndian() const = 0;
virtual uint8_t getAddressSize() const = 0;
virtual const DWARFSection &getInfoSection() = 0;
-
- using TypeSectionMap = MapVector<object::SectionRef, DWARFSection,
- std::map<object::SectionRef, unsigned>>;
-
- virtual const TypeSectionMap &getTypesSections() = 0;
+ virtual void forEachTypesSections(function_ref<void(DWARFSection &)> F) = 0;
virtual StringRef getAbbrevSection() = 0;
virtual const DWARFSection &getLocSection() = 0;
virtual StringRef getARangeSection() = 0;
@@ -252,7 +248,8 @@ public:
// Sections for DWARF5 split dwarf proposal.
virtual const DWARFSection &getInfoDWOSection() = 0;
- virtual const TypeSectionMap &getTypesDWOSections() = 0;
+ virtual void
+ forEachTypesDWOSections(function_ref<void(DWARFSection &)> F) = 0;
virtual StringRef getAbbrevDWOSection() = 0;
virtual const DWARFSection &getLineDWOSection() = 0;
virtual const DWARFSection &getLocDWOSection() = 0;
@@ -294,6 +291,9 @@ enum class ErrorPolicy { Halt, Continue };
class DWARFContextInMemory : public DWARFContext {
virtual void anchor();
+ using TypeSectionMap = MapVector<object::SectionRef, DWARFSection,
+ std::map<object::SectionRef, unsigned>>;
+
StringRef FileName;
bool IsLittleEndian;
uint8_t AddressSize;
@@ -338,7 +338,8 @@ class DWARFContextInMemory : public DWARFContext {
SmallVector<SmallString<32>, 4> UncompressedSections;
- StringRef *MapSectionToMember(StringRef Name);
+ DWARFSection *mapNameToDWARFSection(StringRef Name);
+ StringRef *mapSectionToMember(StringRef Name);
/// If Sec is compressed section, decompresses and updates its contents
/// provided by Data. Otherwise leaves it unchanged.
@@ -362,7 +363,10 @@ public:
bool isLittleEndian() const override { return IsLittleEndian; }
uint8_t getAddressSize() const override { return AddressSize; }
const DWARFSection &getInfoSection() override { return InfoSection; }
- const TypeSectionMap &getTypesSections() override { return TypesSections; }
+ void forEachTypesSections(function_ref<void(DWARFSection &)> F) override {
+ for (auto &P : TypesSections)
+ F(P.second);
+ }
StringRef getAbbrevSection() override { return AbbrevSection; }
const DWARFSection &getLocSection() override { return LocSection; }
StringRef getARangeSection() override { return ARangeSection; }
@@ -389,8 +393,9 @@ public:
// Sections for DWARF5 split dwarf proposal.
const DWARFSection &getInfoDWOSection() override { return InfoDWOSection; }
- const TypeSectionMap &getTypesDWOSections() override {
- return TypesDWOSections;
+ void forEachTypesDWOSections(function_ref<void(DWARFSection &)> F) override {
+ for (auto &P : TypesDWOSections)
+ F(P.second);
}
StringRef getAbbrevDWOSection() override { return AbbrevDWOSection; }
diff --git a/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h b/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h
index e4cb1b24e30d..c918a5d5e976 100644
--- a/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h
+++ b/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h
@@ -47,6 +47,7 @@ public:
DbiModuleDescriptorBuilder &
operator=(const DbiModuleDescriptorBuilder &) = delete;
+ void setPdbFilePathNI(uint32_t NI);
void setObjFileName(StringRef Name);
void addSymbol(codeview::CVSymbol Symbol);
@@ -68,6 +69,10 @@ public:
uint32_t calculateSerializedLength() const;
+ /// Return the offset within the module symbol stream of the next symbol
+ /// record passed to addSymbol. Add four to account for the signature.
+ uint32_t getNextSymbolOffset() const { return SymbolByteSize + 4; }
+
void finalize();
Error finalizeMsfLayout();
@@ -81,6 +86,7 @@ private:
msf::MSFBuilder &MSF;
uint32_t SymbolByteSize = 0;
+ uint32_t PdbFilePathNI = 0;
std::string ModuleName;
std::string ObjFileName;
std::vector<std::string> SourceFiles;
diff --git a/include/llvm/DebugInfo/PDB/Native/DbiStream.h b/include/llvm/DebugInfo/PDB/Native/DbiStream.h
index 3bf790726656..4be113f28d6f 100644
--- a/include/llvm/DebugInfo/PDB/Native/DbiStream.h
+++ b/include/llvm/DebugInfo/PDB/Native/DbiStream.h
@@ -83,6 +83,8 @@ public:
FixedStreamArray<SecMapEntry> getSectionMap() const;
void visitSectionContributions(ISectionContribVisitor &Visitor) const;
+ Expected<StringRef> getECName(uint32_t NI) const;
+
private:
Error initializeSectionContributionData();
Error initializeSectionHeadersData();
diff --git a/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h b/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
index 744411854181..63eb34f0326a 100644
--- a/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
+++ b/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
@@ -15,6 +15,7 @@
#include "llvm/Support/Error.h"
#include "llvm/DebugInfo/PDB/Native/PDBFile.h"
+#include "llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h"
#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"
#include "llvm/Support/BinaryByteStream.h"
@@ -54,8 +55,13 @@ public:
// Add given bytes as a new stream.
Error addDbgStream(pdb::DbgHeaderType Type, ArrayRef<uint8_t> Data);
+ uint32_t addECName(StringRef Name);
+
uint32_t calculateSerializedLength() const;
+ void setPublicsStreamIndex(uint32_t Index);
+ void setSymbolRecordStreamIndex(uint32_t Index);
+
Expected<DbiModuleDescriptorBuilder &> addModuleInfo(StringRef ModuleName);
Error addModuleSourceFile(StringRef Module, StringRef File);
Error addModuleSourceFile(DbiModuleDescriptorBuilder &Module, StringRef File);
@@ -75,7 +81,7 @@ public:
private:
struct DebugStream {
ArrayRef<uint8_t> Data;
- uint16_t StreamNumber = 0;
+ uint16_t StreamNumber = kInvalidStreamIndex;
};
Error finalize();
@@ -87,7 +93,6 @@ private:
uint32_t calculateNamesBufferSize() const;
uint32_t calculateDbgStreamsSize() const;
- Error generateModiSubstream();
Error generateFileInfoSubstream();
msf::MSFBuilder &Msf;
@@ -100,6 +105,8 @@ private:
uint16_t PdbDllRbld;
uint16_t Flags;
PDB_Machine MachineType;
+ uint32_t PublicsStreamIndex = kInvalidStreamIndex;
+ uint32_t SymRecordStreamIndex = kInvalidStreamIndex;
const DbiStreamHeader *Header;
@@ -108,6 +115,7 @@ private:
StringMap<uint32_t> SourceFileNames;
+ PDBStringTableBuilder ECNamesBuilder;
WritableBinaryStreamRef NamesBuffer;
MutableBinaryByteStream FileInfoBuffer;
std::vector<SectionContrib> SectionContribs;
diff --git a/include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h b/include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h
index 25f66240a6a2..17a82b7ce12d 100644
--- a/include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h
+++ b/include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h
@@ -44,7 +44,7 @@ public:
bool get(StringRef Stream, uint32_t &StreamNo) const;
void set(StringRef Stream, uint32_t StreamNo);
void remove(StringRef Stream);
-
+ const StringMap<uint32_t> &getStringMap() const { return Mapping; }
iterator_range<StringMapConstIterator<uint32_t>> entries() const;
private:
diff --git a/include/llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h b/include/llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h
new file mode 100644
index 000000000000..4f532c6e3829
--- /dev/null
+++ b/include/llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h
@@ -0,0 +1,49 @@
+//===- NativeBuiltinSymbol.h -------------------------------------- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEBUILTINSYMBOL_H
+#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEBUILTINSYMBOL_H
+
+#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
+
+#include "llvm/DebugInfo/PDB/PDBTypes.h"
+
+namespace llvm {
+namespace pdb {
+
+class NativeSession;
+
+class NativeBuiltinSymbol : public NativeRawSymbol {
+public:
+ NativeBuiltinSymbol(NativeSession &PDBSession, SymIndexId Id,
+ PDB_BuiltinType T, uint64_t L);
+ ~NativeBuiltinSymbol() override;
+
+ virtual std::unique_ptr<NativeRawSymbol> clone() const override;
+
+ void dump(raw_ostream &OS, int Indent) const override;
+
+ PDB_SymType getSymTag() const override;
+
+ PDB_BuiltinType getBuiltinType() const override;
+ bool isConstType() const override;
+ uint64_t getLength() const override;
+ bool isUnalignedType() const override;
+ bool isVolatileType() const override;
+
+protected:
+ NativeSession &Session;
+ PDB_BuiltinType Type;
+ uint64_t Length;
+};
+
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h b/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
index 1687737f0e7f..bd5c09e5ff76 100644
--- a/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
+++ b/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
@@ -18,7 +18,7 @@ namespace pdb {
class NativeCompilandSymbol : public NativeRawSymbol {
public:
- NativeCompilandSymbol(NativeSession &Session, uint32_t SymbolId,
+ NativeCompilandSymbol(NativeSession &Session, SymIndexId SymbolId,
DbiModuleDescriptor MI);
std::unique_ptr<NativeRawSymbol> clone() const override;
diff --git a/include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h b/include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h
index 15bac78df191..ddb7f811da38 100644
--- a/include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h
+++ b/include/llvm/DebugInfo/PDB/Native/NativeExeSymbol.h
@@ -18,7 +18,7 @@ namespace pdb {
class NativeExeSymbol : public NativeRawSymbol {
public:
- NativeExeSymbol(NativeSession &Session, uint32_t SymbolId);
+ NativeExeSymbol(NativeSession &Session, SymIndexId SymbolId);
std::unique_ptr<NativeRawSymbol> clone() const override;
diff --git a/include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h b/include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h
index a24a972879d2..66a9eae28e23 100644
--- a/include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h
+++ b/include/llvm/DebugInfo/PDB/Native/NativeRawSymbol.h
@@ -19,9 +19,11 @@ namespace pdb {
class NativeSession;
+typedef uint32_t SymIndexId;
+
class NativeRawSymbol : public IPDBRawSymbol {
public:
- NativeRawSymbol(NativeSession &PDBSession, uint32_t SymbolId);
+ NativeRawSymbol(NativeSession &PDBSession, SymIndexId SymbolId);
virtual std::unique_ptr<NativeRawSymbol> clone() const = 0;
@@ -205,7 +207,7 @@ public:
protected:
NativeSession &Session;
- uint32_t SymbolId;
+ SymIndexId SymbolId;
};
} // end namespace pdb
diff --git a/include/llvm/DebugInfo/PDB/Native/NativeSession.h b/include/llvm/DebugInfo/PDB/Native/NativeSession.h
index dd40874dc5f2..b16ce231c349 100644
--- a/include/llvm/DebugInfo/PDB/Native/NativeSession.h
+++ b/include/llvm/DebugInfo/PDB/Native/NativeSession.h
@@ -10,9 +10,13 @@
#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVESESSION_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVESESSION_H
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"
#include "llvm/DebugInfo/PDB/IPDBSession.h"
#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
+#include "llvm/DebugInfo/PDB/Native/NativeBuiltinSymbol.h"
#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Error.h"
@@ -35,6 +39,8 @@ public:
std::unique_ptr<PDBSymbolCompiland>
createCompilandSymbol(DbiModuleDescriptor MI);
+ SymIndexId findSymbolByTypeIndex(codeview::TypeIndex TI);
+
uint64_t getLoadAddress() const override;
void setLoadAddress(uint64_t Address) override;
std::unique_ptr<PDBSymbolExe> getGlobalScope() override;
@@ -77,6 +83,7 @@ private:
std::unique_ptr<PDBFile> Pdb;
std::unique_ptr<BumpPtrAllocator> Allocator;
std::vector<std::unique_ptr<NativeRawSymbol>> SymbolCache;
+ DenseMap<codeview::TypeIndex, SymIndexId> TypeIndexToSymbolId;
};
}
}
diff --git a/include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h b/include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h
index cd7d3b063793..2dc23f819d3b 100644
--- a/include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h
+++ b/include/llvm/DebugInfo/PDB/Native/PDBFileBuilder.h
@@ -31,11 +31,13 @@ class MSFBuilder;
namespace pdb {
class DbiStreamBuilder;
class InfoStreamBuilder;
+class PublicsStreamBuilder;
class TpiStreamBuilder;
class PDBFileBuilder {
public:
explicit PDBFileBuilder(BumpPtrAllocator &Allocator);
+ ~PDBFileBuilder();
PDBFileBuilder(const PDBFileBuilder &) = delete;
PDBFileBuilder &operator=(const PDBFileBuilder &) = delete;
@@ -47,6 +49,7 @@ public:
TpiStreamBuilder &getTpiBuilder();
TpiStreamBuilder &getIpiBuilder();
PDBStringTableBuilder &getStringTableBuilder();
+ PublicsStreamBuilder &getPublicsBuilder();
Error commit(StringRef Filename);
@@ -61,6 +64,7 @@ private:
std::unique_ptr<msf::MSFBuilder> Msf;
std::unique_ptr<InfoStreamBuilder> Info;
std::unique_ptr<DbiStreamBuilder> Dbi;
+ std::unique_ptr<PublicsStreamBuilder> Publics;
std::unique_ptr<TpiStreamBuilder> Tpi;
std::unique_ptr<TpiStreamBuilder> Ipi;
diff --git a/include/llvm/DebugInfo/PDB/Native/PDBStringTable.h b/include/llvm/DebugInfo/PDB/Native/PDBStringTable.h
index 86ef1136b41d..29167c966d42 100644
--- a/include/llvm/DebugInfo/PDB/Native/PDBStringTable.h
+++ b/include/llvm/DebugInfo/PDB/Native/PDBStringTable.h
@@ -56,7 +56,6 @@ private:
const PDBStringTableHeader *Header = nullptr;
codeview::DebugStringTableSubsectionRef Strings;
FixedStreamArray<support::ulittle32_t> IDs;
- uint32_t ByteSize = 0;
uint32_t NameCount = 0;
};
diff --git a/include/llvm/DebugInfo/PDB/Native/PublicsStream.h b/include/llvm/DebugInfo/PDB/Native/PublicsStream.h
index 4570c80c76d7..9ace826bd8f7 100644
--- a/include/llvm/DebugInfo/PDB/Native/PublicsStream.h
+++ b/include/llvm/DebugInfo/PDB/Native/PublicsStream.h
@@ -25,8 +25,6 @@ struct GSIHashHeader;
class PDBFile;
class PublicsStream {
- struct HeaderInfo;
-
public:
PublicsStream(PDBFile &File, std::unique_ptr<msf::MappedBlockStream> Stream);
~PublicsStream();
@@ -65,7 +63,7 @@ private:
FixedStreamArray<support::ulittle32_t> ThunkMap;
FixedStreamArray<SectionOffset> SectionOffsets;
- const HeaderInfo *Header;
+ const PublicsStreamHeader *Header;
const GSIHashHeader *HashHdr;
};
}
diff --git a/include/llvm/DebugInfo/PDB/Native/PublicsStreamBuilder.h b/include/llvm/DebugInfo/PDB/Native/PublicsStreamBuilder.h
new file mode 100644
index 000000000000..5ab57ebef53d
--- /dev/null
+++ b/include/llvm/DebugInfo/PDB/Native/PublicsStreamBuilder.h
@@ -0,0 +1,54 @@
+//===- PublicsStreamBuilder.h - PDB Publics Stream Creation -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_PDBPUBLICSTREAMBUILDER_H
+#define LLVM_DEBUGINFO_PDB_RAW_PDBPUBLICSTREAMBUILDER_H
+
+#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
+#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
+#include "llvm/Support/BinaryByteStream.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/BinaryStreamWriter.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace msf {
+class MSFBuilder;
+}
+namespace pdb {
+class PublicsStream;
+struct PublicsStreamHeader;
+
+class PublicsStreamBuilder {
+public:
+ explicit PublicsStreamBuilder(msf::MSFBuilder &Msf);
+ ~PublicsStreamBuilder();
+
+ PublicsStreamBuilder(const PublicsStreamBuilder &) = delete;
+ PublicsStreamBuilder &operator=(const PublicsStreamBuilder &) = delete;
+
+ Error finalizeMsfLayout();
+ uint32_t calculateSerializedLength() const;
+
+ Error commit(BinaryStreamWriter &PublicsWriter);
+
+ uint32_t getStreamIndex() const { return StreamIdx; }
+ uint32_t getRecordStreamIdx() const { return RecordStreamIdx; }
+
+private:
+ uint32_t StreamIdx = kInvalidStreamIndex;
+ uint32_t RecordStreamIdx = kInvalidStreamIndex;
+ std::vector<PSHashRecord> HashRecords;
+ msf::MSFBuilder &Msf;
+};
+} // namespace pdb
+} // namespace llvm
+
+#endif
diff --git a/include/llvm/DebugInfo/PDB/Native/RawTypes.h b/include/llvm/DebugInfo/PDB/Native/RawTypes.h
index 771272d6a47d..a3cdd3f09a44 100644
--- a/include/llvm/DebugInfo/PDB/Native/RawTypes.h
+++ b/include/llvm/DebugInfo/PDB/Native/RawTypes.h
@@ -255,6 +255,19 @@ struct ModuleInfoHeader {
/// char ObjFileName[];
};
+// This is PSGSIHDR struct defined in
+// https://github.com/Microsoft/microsoft-pdb/blob/master/PDB/dbi/gsi.h
+struct PublicsStreamHeader {
+ support::ulittle32_t SymHash;
+ support::ulittle32_t AddrMap;
+ support::ulittle32_t NumThunks;
+ support::ulittle32_t SizeOfThunk;
+ support::ulittle16_t ISectThunkTable;
+ char Padding[2];
+ support::ulittle32_t OffThunkTable;
+ support::ulittle32_t NumSections;
+};
+
/// Defines a 128-bit unique identifier. This maps to a GUID on Windows, but
/// is abstracted here for the purposes of non-Windows platforms that don't have
/// the GUID structure defined.
diff --git a/include/llvm/ExecutionEngine/JITSymbol.h b/include/llvm/ExecutionEngine/JITSymbol.h
index f09e95fddb97..4172f240ba39 100644
--- a/include/llvm/ExecutionEngine/JITSymbol.h
+++ b/include/llvm/ExecutionEngine/JITSymbol.h
@@ -21,6 +21,8 @@
#include <functional>
#include <string>
+#include "llvm/Support/Error.h"
+
namespace llvm {
class GlobalValue;
@@ -41,10 +43,11 @@ public:
enum FlagNames : UnderlyingType {
None = 0,
- Weak = 1U << 0,
- Common = 1U << 1,
- Absolute = 1U << 2,
- Exported = 1U << 3
+ HasError = 1U << 0,
+ Weak = 1U << 1,
+ Common = 1U << 2,
+ Absolute = 1U << 3,
+ Exported = 1U << 4
};
/// @brief Default-construct a JITSymbolFlags instance.
@@ -53,6 +56,11 @@ public:
/// @brief Construct a JITSymbolFlags instance from the given flags.
JITSymbolFlags(FlagNames Flags) : Flags(Flags) {}
+ /// @brief Return true if there was an error retrieving this symbol.
+ bool hasError() const {
+ return (Flags & HasError) == HasError;
+ }
+
/// @brief Returns true is the Weak flag is set.
bool isWeak() const {
return (Flags & Weak) == Weak;
@@ -113,11 +121,17 @@ private:
/// @brief Represents a symbol in the JIT.
class JITSymbol {
public:
- using GetAddressFtor = std::function<JITTargetAddress()>;
+ using GetAddressFtor = std::function<Expected<JITTargetAddress>()>;
+
+ /// @brief Create a 'null' symbol, used to represent a "symbol not found"
+ /// result from a successful (non-erroneous) lookup.
+ JITSymbol(std::nullptr_t)
+ : CachedAddr(0) {}
- /// @brief Create a 'null' symbol that represents failure to find a symbol
- /// definition.
- JITSymbol(std::nullptr_t) {}
+ /// @brief Create a JITSymbol representing an error in the symbol lookup
+ /// process (e.g. a network failure during a remote lookup).
+ JITSymbol(Error Err)
+ : Err(std::move(Err)), Flags(JITSymbolFlags::HasError) {}
/// @brief Create a symbol for a definition with a known address.
JITSymbol(JITTargetAddress Addr, JITSymbolFlags Flags)
@@ -137,18 +151,59 @@ public:
/// user can materialize the definition at any time by calling the getAddress
/// method.
JITSymbol(GetAddressFtor GetAddress, JITSymbolFlags Flags)
- : GetAddress(std::move(GetAddress)), Flags(Flags) {}
+ : GetAddress(std::move(GetAddress)), CachedAddr(0), Flags(Flags) {}
+
+ JITSymbol(const JITSymbol&) = delete;
+ JITSymbol& operator=(const JITSymbol&) = delete;
+
+ JITSymbol(JITSymbol &&Other)
+ : GetAddress(std::move(Other.GetAddress)), Flags(std::move(Other.Flags)) {
+ if (Flags.hasError())
+ Err = std::move(Other.Err);
+ else
+ CachedAddr = std::move(Other.CachedAddr);
+ }
+
+ JITSymbol& operator=(JITSymbol &&Other) {
+ GetAddress = std::move(Other.GetAddress);
+ Flags = std::move(Other.Flags);
+ if (Flags.hasError())
+ Err = std::move(Other.Err);
+ else
+ CachedAddr = std::move(Other.CachedAddr);
+ return *this;
+ }
+
+ ~JITSymbol() {
+ if (Flags.hasError())
+ Err.~Error();
+ else
+ CachedAddr.~JITTargetAddress();
+ }
/// @brief Returns true if the symbol exists, false otherwise.
- explicit operator bool() const { return CachedAddr || GetAddress; }
+ explicit operator bool() const {
+ return !Flags.hasError() && (CachedAddr || GetAddress);
+ }
+
+ /// @brief Move the error field value out of this JITSymbol.
+ Error takeError() {
+ if (Flags.hasError())
+ return std::move(Err);
+ return Error::success();
+ }
/// @brief Get the address of the symbol in the target address space. Returns
/// '0' if the symbol does not exist.
- JITTargetAddress getAddress() {
+ Expected<JITTargetAddress> getAddress() {
+ assert(!Flags.hasError() && "getAddress called on error value");
if (GetAddress) {
- CachedAddr = GetAddress();
- assert(CachedAddr && "Symbol could not be materialized.");
- GetAddress = nullptr;
+ if (auto CachedAddrOrErr = GetAddress()) {
+ GetAddress = nullptr;
+ CachedAddr = *CachedAddrOrErr;
+ assert(CachedAddr && "Symbol could not be materialized.");
+ } else
+ return CachedAddrOrErr.takeError();
}
return CachedAddr;
}
@@ -157,7 +212,10 @@ public:
private:
GetAddressFtor GetAddress;
- JITTargetAddress CachedAddr = 0;
+ union {
+ JITTargetAddress CachedAddr;
+ Error Err;
+ };
JITSymbolFlags Flags;
};
diff --git a/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h b/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
index 8ac1b6bca0a7..c1acca386820 100644
--- a/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
@@ -146,7 +146,7 @@ private:
std::unique_ptr<JITSymbolResolver>)>;
struct SourceModuleEntry {
- std::unique_ptr<ResourceOwner<Module>> SourceMod;
+ std::shared_ptr<Module> SourceMod;
std::set<Function*> StubsToClone;
};
@@ -154,7 +154,7 @@ private:
using SourceModuleHandle = typename SourceModulesList::size_type;
SourceModuleHandle
- addSourceModule(std::unique_ptr<ResourceOwner<Module>> M) {
+ addSourceModule(std::shared_ptr<Module> M) {
SourceModuleHandle H = SourceModules.size();
SourceModules.push_back(SourceModuleEntry());
SourceModules.back().SourceMod = std::move(M);
@@ -162,7 +162,7 @@ private:
}
Module& getSourceModule(SourceModuleHandle H) {
- return SourceModules[H].SourceMod->getResource();
+ return *SourceModules[H].SourceMod;
}
std::set<Function*>& getStubsToClone(SourceModuleHandle H) {
@@ -176,19 +176,21 @@ private:
for (auto BLH : BaseLayerHandles)
if (auto Sym = BaseLayer.findSymbolIn(BLH, Name, ExportedSymbolsOnly))
return Sym;
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
return nullptr;
}
- void removeModulesFromBaseLayer(BaseLayerT &BaseLayer) {
+ Error removeModulesFromBaseLayer(BaseLayerT &BaseLayer) {
for (auto &BLH : BaseLayerHandles)
- BaseLayer.removeModule(BLH);
+ if (auto Err = BaseLayer.removeModule(BLH))
+ return Err;
+ return Error::success();
}
- std::unique_ptr<JITSymbolResolver> ExternalSymbolResolver;
- std::unique_ptr<ResourceOwner<RuntimeDyld::MemoryManager>> MemMgr;
+ std::shared_ptr<JITSymbolResolver> ExternalSymbolResolver;
std::unique_ptr<IndirectStubsMgrT> StubsMgr;
StaticGlobalRenamer StaticRenamer;
- ModuleAdderFtor ModuleAdder;
SourceModulesList SourceModules;
std::vector<BaseLayerModuleHandleT> BaseLayerHandles;
};
@@ -196,6 +198,7 @@ private:
using LogicalDylibList = std::list<LogicalDylib>;
public:
+
/// @brief Handle to loaded module.
using ModuleHandleT = typename LogicalDylibList::iterator;
@@ -217,48 +220,41 @@ public:
CloneStubsIntoPartitions(CloneStubsIntoPartitions) {}
~CompileOnDemandLayer() {
+ // FIXME: Report error on log.
while (!LogicalDylibs.empty())
- removeModule(LogicalDylibs.begin());
+ consumeError(removeModule(LogicalDylibs.begin()));
}
/// @brief Add a module to the compile-on-demand layer.
- template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
- ModuleHandleT addModule(std::shared_ptr<Module> M,
- MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver) {
+ Expected<ModuleHandleT>
+ addModule(std::shared_ptr<Module> M,
+ std::shared_ptr<JITSymbolResolver> Resolver) {
LogicalDylibs.push_back(LogicalDylib());
auto &LD = LogicalDylibs.back();
LD.ExternalSymbolResolver = std::move(Resolver);
LD.StubsMgr = CreateIndirectStubsManager();
- auto &MemMgrRef = *MemMgr;
- LD.MemMgr = wrapOwnership<RuntimeDyld::MemoryManager>(std::move(MemMgr));
-
- LD.ModuleAdder =
- [&MemMgrRef](BaseLayerT &B, std::unique_ptr<Module> M,
- std::unique_ptr<JITSymbolResolver> R) {
- return B.addModule(std::move(M), &MemMgrRef, std::move(R));
- };
-
// Process each of the modules in this module set.
- addLogicalModule(LogicalDylibs.back(), std::move(M));
+ if (auto Err = addLogicalModule(LD, std::move(M)))
+ return std::move(Err);
return std::prev(LogicalDylibs.end());
}
/// @brief Add extra modules to an existing logical module.
- void addExtraModule(ModuleHandleT H, std::shared_ptr<Module> M) {
- addLogicalModule(*H, std::move(M));
+ Error addExtraModule(ModuleHandleT H, std::shared_ptr<Module> M) {
+ return addLogicalModule(*H, std::move(M));
}
/// @brief Remove the module represented by the given handle.
///
/// This will remove all modules in the layers below that were derived from
/// the module represented by H.
- void removeModule(ModuleHandleT H) {
- H->removeModulesFromBaseLayer(BaseLayer);
+ Error removeModule(ModuleHandleT H) {
+ auto Err = H->removeModulesFromBaseLayer(BaseLayer);
LogicalDylibs.erase(H);
+ return Err;
}
/// @brief Search for the given named symbol.
@@ -272,6 +268,8 @@ public:
return Sym;
if (auto Sym = findSymbolIn(LDI, Name, ExportedSymbolsOnly))
return Sym;
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
}
return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
}
@@ -309,8 +307,9 @@ public:
}
private:
- template <typename ModulePtrT>
- void addLogicalModule(LogicalDylib &LD, ModulePtrT SrcMPtr) {
+
+ Error addLogicalModule(LogicalDylib &LD, std::shared_ptr<Module> SrcMPtr) {
+
// Rename all static functions / globals to $static.X :
// This will unique the names across all modules in the logical dylib,
// simplifying symbol lookup.
@@ -322,7 +321,7 @@ private:
// Create a logical module handle for SrcM within the logical dylib.
Module &SrcM = *SrcMPtr;
- auto LMId = LD.addSourceModule(wrapOwnership<Module>(std::move(SrcMPtr)));
+ auto LMId = LD.addSourceModule(std::move(SrcMPtr));
// Create stub functions.
const DataLayout &DL = SrcM.getDataLayout();
@@ -335,9 +334,12 @@ private:
// Skip weak functions for which we already have definitions.
auto MangledName = mangle(F.getName(), DL);
- if (F.hasWeakLinkage() || F.hasLinkOnceLinkage())
+ if (F.hasWeakLinkage() || F.hasLinkOnceLinkage()) {
if (auto Sym = LD.findSymbol(BaseLayer, MangledName, false))
continue;
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
+ }
// Record all functions defined by this module.
if (CloneStubsIntoPartitions)
@@ -350,9 +352,15 @@ private:
StubInits[MangledName] =
std::make_pair(CCInfo.getAddress(),
JITSymbolFlags::fromGlobalValue(F));
- CCInfo.setCompileAction([this, &LD, LMId, &F]() {
- return this->extractAndCompile(LD, LMId, F);
- });
+ CCInfo.setCompileAction([this, &LD, LMId, &F]() -> JITTargetAddress {
+ if (auto FnImplAddrOrErr = this->extractAndCompile(LD, LMId, F))
+ return *FnImplAddrOrErr;
+ else {
+ // FIXME: Report error, return to 'abort' or something similar.
+ consumeError(FnImplAddrOrErr.takeError());
+ return 0;
+ }
+ });
}
auto EC = LD.StubsMgr->createStubs(StubInits);
@@ -367,7 +375,7 @@ private:
// empty globals module.
if (SrcM.global_empty() && SrcM.alias_empty() &&
!SrcM.getModuleFlagsMetadata())
- return;
+ return Error::success();
// Create the GlobalValues module.
auto GVsM = llvm::make_unique<Module>((SrcM.getName() + ".globals").str(),
@@ -393,8 +401,9 @@ private:
// Initializers may refer to functions declared (but not defined) in this
// module. Build a materializer to clone decls on demand.
+ Error MaterializerErrors = Error::success();
auto Materializer = createLambdaMaterializer(
- [&LD, &GVsM](Value *V) -> Value* {
+ [&LD, &GVsM, &MaterializerErrors](Value *V) -> Value* {
if (auto *F = dyn_cast<Function>(V)) {
// Decls in the original module just get cloned.
if (F->isDeclaration())
@@ -405,13 +414,24 @@ private:
// instead.
const DataLayout &DL = GVsM->getDataLayout();
std::string FName = mangle(F->getName(), DL);
- auto StubSym = LD.StubsMgr->findStub(FName, false);
unsigned PtrBitWidth = DL.getPointerTypeSizeInBits(F->getType());
- ConstantInt *StubAddr =
- ConstantInt::get(GVsM->getContext(),
- APInt(PtrBitWidth, StubSym.getAddress()));
+ JITTargetAddress StubAddr = 0;
+
+ // Get the address for the stub. If we encounter an error while
+ // doing so, stash it in the MaterializerErrors variable and use a
+ // null address as a placeholder.
+ if (auto StubSym = LD.StubsMgr->findStub(FName, false)) {
+ if (auto StubAddrOrErr = StubSym.getAddress())
+ StubAddr = *StubAddrOrErr;
+ else
+ MaterializerErrors = joinErrors(std::move(MaterializerErrors),
+ StubAddrOrErr.takeError());
+ }
+
+ ConstantInt *StubAddrCI =
+ ConstantInt::get(GVsM->getContext(), APInt(PtrBitWidth, StubAddr));
Constant *Init = ConstantExpr::getCast(Instruction::IntToPtr,
- StubAddr, F->getType());
+ StubAddrCI, F->getType());
return GlobalAlias::create(F->getFunctionType(),
F->getType()->getAddressSpace(),
F->getLinkage(), F->getName(),
@@ -435,22 +455,31 @@ private:
NewA->setAliasee(cast<Constant>(Init));
}
+ if (MaterializerErrors)
+ return MaterializerErrors;
+
// Build a resolver for the globals module and add it to the base layer.
auto GVsResolver = createLambdaResolver(
- [this, &LD](const std::string &Name) {
+ [this, &LD](const std::string &Name) -> JITSymbol {
if (auto Sym = LD.StubsMgr->findStub(Name, false))
return Sym;
if (auto Sym = LD.findSymbol(BaseLayer, Name, false))
return Sym;
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
return LD.ExternalSymbolResolver->findSymbolInLogicalDylib(Name);
},
[&LD](const std::string &Name) {
return LD.ExternalSymbolResolver->findSymbol(Name);
});
- auto GVsH = LD.ModuleAdder(BaseLayer, std::move(GVsM),
- std::move(GVsResolver));
- LD.BaseLayerHandles.push_back(GVsH);
+ if (auto GVsHOrErr =
+ BaseLayer.addModule(std::move(GVsM), std::move(GVsResolver)))
+ LD.BaseLayerHandles.push_back(*GVsHOrErr);
+ else
+ return GVsHOrErr.takeError();
+
+ return Error::success();
}
static std::string mangle(StringRef Name, const DataLayout &DL) {
@@ -462,7 +491,7 @@ private:
return MangledName;
}
- JITTargetAddress
+ Expected<JITTargetAddress>
extractAndCompile(LogicalDylib &LD,
typename LogicalDylib::SourceModuleHandle LMId,
Function &F) {
@@ -475,34 +504,42 @@ private:
// Grab the name of the function being called here.
std::string CalledFnName = mangle(F.getName(), SrcM.getDataLayout());
- auto Part = Partition(F);
- auto PartH = emitPartition(LD, LMId, Part);
-
JITTargetAddress CalledAddr = 0;
- for (auto *SubF : Part) {
- std::string FnName = mangle(SubF->getName(), SrcM.getDataLayout());
- auto FnBodySym = BaseLayer.findSymbolIn(PartH, FnName, false);
- assert(FnBodySym && "Couldn't find function body.");
-
- JITTargetAddress FnBodyAddr = FnBodySym.getAddress();
-
- // If this is the function we're calling record the address so we can
- // return it from this function.
- if (SubF == &F)
- CalledAddr = FnBodyAddr;
-
- // Update the function body pointer for the stub.
- if (auto EC = LD.StubsMgr->updatePointer(FnName, FnBodyAddr))
- return 0;
- }
+ auto Part = Partition(F);
+ if (auto PartHOrErr = emitPartition(LD, LMId, Part)) {
+ auto &PartH = *PartHOrErr;
+ for (auto *SubF : Part) {
+ std::string FnName = mangle(SubF->getName(), SrcM.getDataLayout());
+ if (auto FnBodySym = BaseLayer.findSymbolIn(PartH, FnName, false)) {
+ if (auto FnBodyAddrOrErr = FnBodySym.getAddress()) {
+ JITTargetAddress FnBodyAddr = *FnBodyAddrOrErr;
+
+ // If this is the function we're calling record the address so we can
+ // return it from this function.
+ if (SubF == &F)
+ CalledAddr = FnBodyAddr;
+
+ // Update the function body pointer for the stub.
+ if (auto EC = LD.StubsMgr->updatePointer(FnName, FnBodyAddr))
+ return 0;
+
+ } else
+ return FnBodyAddrOrErr.takeError();
+ } else if (auto Err = FnBodySym.takeError())
+ return std::move(Err);
+ else
+ llvm_unreachable("Function not emitted for partition");
+ }
- LD.BaseLayerHandles.push_back(PartH);
+ LD.BaseLayerHandles.push_back(PartH);
+ } else
+ return PartHOrErr.takeError();
return CalledAddr;
}
template <typename PartitionT>
- BaseLayerModuleHandleT
+ Expected<BaseLayerModuleHandleT>
emitPartition(LogicalDylib &LD,
typename LogicalDylib::SourceModuleHandle LMId,
const PartitionT &Part) {
@@ -566,16 +603,18 @@ private:
// Create memory manager and symbol resolver.
auto Resolver = createLambdaResolver(
- [this, &LD](const std::string &Name) {
+ [this, &LD](const std::string &Name) -> JITSymbol {
if (auto Sym = LD.findSymbol(BaseLayer, Name, false))
return Sym;
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
return LD.ExternalSymbolResolver->findSymbolInLogicalDylib(Name);
},
[&LD](const std::string &Name) {
return LD.ExternalSymbolResolver->findSymbol(Name);
});
- return LD.ModuleAdder(BaseLayer, std::move(M), std::move(Resolver));
+ return BaseLayer.addModule(std::move(M), std::move(Resolver));
}
BaseLayerT &BaseLayer;
diff --git a/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h b/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
index bf8cca406844..d9b45c6a1e29 100644
--- a/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
+++ b/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
@@ -17,6 +17,8 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/ExecutionEngine/Orc/OrcError.h"
#include <algorithm>
#include <cstdint>
#include <string>
@@ -99,19 +101,24 @@ public:
/// @brief Run the recorded constructors/destructors through the given JIT
/// layer.
- bool runViaLayer(JITLayerT &JITLayer) const {
+ Error runViaLayer(JITLayerT &JITLayer) const {
using CtorDtorTy = void (*)();
- bool Error = false;
for (const auto &CtorDtorName : CtorDtorNames)
if (auto CtorDtorSym = JITLayer.findSymbolIn(H, CtorDtorName, false)) {
- CtorDtorTy CtorDtor =
- reinterpret_cast<CtorDtorTy>(
- static_cast<uintptr_t>(CtorDtorSym.getAddress()));
- CtorDtor();
- } else
- Error = true;
- return !Error;
+ if (auto AddrOrErr = CtorDtorSym.getAddress()) {
+ CtorDtorTy CtorDtor =
+ reinterpret_cast<CtorDtorTy>(static_cast<uintptr_t>(*AddrOrErr));
+ CtorDtor();
+ } else
+ return AddrOrErr.takeError();
+ } else {
+ if (auto Err = CtorDtorSym.takeError())
+ return Err;
+ else
+ return make_error<JITSymbolNotFound>(CtorDtorName);
+ }
+ return Error::success();
}
private:
diff --git a/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h b/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h
index d582e9a33241..ff54ef625ebb 100644
--- a/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h
@@ -17,9 +17,14 @@
#include "llvm/ExecutionEngine/JITSymbol.h"
#include <map>
+#include <memory>
#include <string>
namespace llvm {
+
+class Module;
+class JITSymbolResolver;
+
namespace orc {
/// @brief Global mapping layer.
@@ -32,25 +37,22 @@ namespace orc {
template <typename BaseLayerT>
class GlobalMappingLayer {
public:
- /// @brief Handle to a set of added modules.
- using ModuleSetHandleT = typename BaseLayerT::ModuleSetHandleT;
+
+ /// @brief Handle to an added module.
+ using ModuleHandleT = typename BaseLayerT::ModuleHandleT;
/// @brief Construct an GlobalMappingLayer with the given BaseLayer
GlobalMappingLayer(BaseLayerT &BaseLayer) : BaseLayer(BaseLayer) {}
- /// @brief Add the given module set to the JIT.
+ /// @brief Add the given module to the JIT.
/// @return A handle for the added modules.
- template <typename ModuleSetT, typename MemoryManagerPtrT,
- typename SymbolResolverPtrT>
- ModuleSetHandleT addModuleSet(ModuleSetT Ms,
- MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver) {
- return BaseLayer.addModuleSet(std::move(Ms), std::move(MemMgr),
- std::move(Resolver));
+ ModuleHandleT addModule(std::shared_ptr<Module> M,
+ std::shared_ptr<JITSymbolResolver> Resolver) {
+ return BaseLayer.addModule(std::move(M), std::move(Resolver));
}
/// @brief Remove the module set associated with the handle H.
- void removeModuleSet(ModuleSetHandleT H) { BaseLayer.removeModuleSet(H); }
+ void removeModule(ModuleHandleT H) { BaseLayer.removeModule(H); }
/// @brief Manually set the address to return for the given symbol.
void setGlobalMapping(const std::string &Name, JITTargetAddress Addr) {
@@ -78,15 +80,15 @@ public:
return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
}
- /// @brief Get the address of the given symbol in the context of the set of
- /// modules represented by the handle H. This call is forwarded to the
+ /// @brief Get the address of the given symbol in the context of the of the
+ /// module represented by the handle H. This call is forwarded to the
/// base layer's implementation.
- /// @param H The handle for the module set to search in.
+ /// @param H The handle for the module to search in.
/// @param Name The name of the symbol to search for.
/// @param ExportedSymbolsOnly If true, search only for exported symbols.
/// @return A handle for the given named symbol, if it is found in the
- /// given module set.
- JITSymbol findSymbolIn(ModuleSetHandleT H, const std::string &Name,
+ /// given module.
+ JITSymbol findSymbolIn(ModuleHandleT H, const std::string &Name,
bool ExportedSymbolsOnly) {
return BaseLayer.findSymbolIn(H, Name, ExportedSymbolsOnly);
}
@@ -94,7 +96,7 @@ public:
/// @brief Immediately emit and finalize the module set represented by the
/// given handle.
/// @param H Handle for module set to emit/finalize.
- void emitAndFinalize(ModuleSetHandleT H) {
+ void emitAndFinalize(ModuleHandleT H) {
BaseLayer.emitAndFinalize(H);
}
diff --git a/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h b/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
index 99ccd4d221a5..fadd334bed0f 100644
--- a/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
@@ -50,18 +50,18 @@ public:
/// along with the given memory manager and symbol resolver.
///
/// @return A handle for the added module.
- template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
- ModuleHandleT addModule(std::shared_ptr<Module> M,
- MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver) {
+ Expected<ModuleHandleT>
+ addModule(std::shared_ptr<Module> M,
+ std::shared_ptr<JITSymbolResolver> Resolver) {
using CompileResult = decltype(Compile(*M));
auto Obj = std::make_shared<CompileResult>(Compile(*M));
- return BaseLayer.addObject(std::move(Obj), std::move(MemMgr),
- std::move(Resolver));
+ return BaseLayer.addObject(std::move(Obj), std::move(Resolver));
}
/// @brief Remove the module associated with the handle H.
- void removeModule(ModuleHandleT H) { BaseLayer.removeObject(H); }
+ Error removeModule(ModuleHandleT H) {
+ return BaseLayer.removeObject(H);
+ }
/// @brief Search for the given named symbol.
/// @param Name The name of the symbol to search for.
@@ -87,8 +87,8 @@ public:
/// @brief Immediately emit and finalize the module represented by the given
/// handle.
/// @param H Handle for module to emit/finalize.
- void emitAndFinalize(ModuleHandleT H) {
- BaseLayer.emitAndFinalize(H);
+ Error emitAndFinalize(ModuleHandleT H) {
+ return BaseLayer.emitAndFinalize(H);
}
private:
diff --git a/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h b/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
index cf6556a33bbd..476061afda59 100644
--- a/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
@@ -42,16 +42,14 @@ public:
/// the layer below, along with the memory manager and symbol resolver.
///
/// @return A handle for the added modules.
- template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
- ModuleHandleT addModule(std::shared_ptr<Module> M,
- MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver) {
- return BaseLayer.addModule(Transform(std::move(M)), std::move(MemMgr),
- std::move(Resolver));
+ Expected<ModuleHandleT>
+ addModule(std::shared_ptr<Module> M,
+ std::shared_ptr<JITSymbolResolver> Resolver) {
+ return BaseLayer.addModule(Transform(std::move(M)), std::move(Resolver));
}
/// @brief Remove the module associated with the handle H.
- void removeModule(ModuleHandleT H) { BaseLayer.removeModule(H); }
+ Error removeModule(ModuleHandleT H) { return BaseLayer.removeModule(H); }
/// @brief Search for the given named symbol.
/// @param Name The name of the symbol to search for.
@@ -77,8 +75,8 @@ public:
/// @brief Immediately emit and finalize the module represented by the given
/// handle.
/// @param H Handle for module to emit/finalize.
- void emitAndFinalize(ModuleHandleT H) {
- BaseLayer.emitAndFinalize(H);
+ Error emitAndFinalize(ModuleHandleT H) {
+ return BaseLayer.emitAndFinalize(H);
}
/// @brief Access the transform functor directly.
diff --git a/include/llvm/ExecutionEngine/Orc/LambdaResolver.h b/include/llvm/ExecutionEngine/Orc/LambdaResolver.h
index 6868640d38e8..228392ae0d4a 100644
--- a/include/llvm/ExecutionEngine/Orc/LambdaResolver.h
+++ b/include/llvm/ExecutionEngine/Orc/LambdaResolver.h
@@ -45,7 +45,7 @@ private:
template <typename DylibLookupFtorT,
typename ExternalLookupFtorT>
-std::unique_ptr<LambdaResolver<DylibLookupFtorT, ExternalLookupFtorT>>
+std::shared_ptr<LambdaResolver<DylibLookupFtorT, ExternalLookupFtorT>>
createLambdaResolver(DylibLookupFtorT DylibLookupFtor,
ExternalLookupFtorT ExternalLookupFtor) {
using LR = LambdaResolver<DylibLookupFtorT, ExternalLookupFtorT>;
diff --git a/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h b/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
index 38769aac12af..6c951fab6185 100644
--- a/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
@@ -46,8 +46,9 @@ public:
private:
class EmissionDeferredModule {
public:
- EmissionDeferredModule() = default;
- virtual ~EmissionDeferredModule() = default;
+ EmissionDeferredModule(std::shared_ptr<Module> M,
+ std::shared_ptr<JITSymbolResolver> Resolver)
+ : M(std::move(M)), Resolver(std::move(Resolver)) {}
JITSymbol find(StringRef Name, bool ExportedSymbolsOnly, BaseLayerT &B) {
switch (EmitState) {
@@ -59,16 +60,24 @@ private:
std::string PName = Name;
JITSymbolFlags Flags = JITSymbolFlags::fromGlobalValue(*GV);
auto GetAddress =
- [this, ExportedSymbolsOnly, PName, &B]() -> JITTargetAddress {
+ [this, ExportedSymbolsOnly, PName, &B]() -> Expected<JITTargetAddress> {
if (this->EmitState == Emitting)
return 0;
else if (this->EmitState == NotEmitted) {
this->EmitState = Emitting;
- Handle = this->emitToBaseLayer(B);
+ if (auto HandleOrErr = this->emitToBaseLayer(B))
+ Handle = std::move(*HandleOrErr);
+ else
+ return HandleOrErr.takeError();
this->EmitState = Emitted;
}
- auto Sym = B.findSymbolIn(Handle, PName, ExportedSymbolsOnly);
- return Sym.getAddress();
+ if (auto Sym = B.findSymbolIn(Handle, PName, ExportedSymbolsOnly))
+ return Sym.getAddress();
+ else if (auto Err = Sym.takeError())
+ return std::move(Err);
+ else
+ llvm_unreachable("Successful symbol lookup should return "
+ "definition address here");
};
return JITSymbol(std::move(GetAddress), Flags);
} else
@@ -101,33 +110,10 @@ private:
BaseLayer.emitAndFinalize(Handle);
}
- template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
- static std::unique_ptr<EmissionDeferredModule>
- create(BaseLayerT &B, std::shared_ptr<Module> M, MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver);
-
- protected:
- virtual const GlobalValue* searchGVs(StringRef Name,
- bool ExportedSymbolsOnly) const = 0;
- virtual BaseLayerHandleT emitToBaseLayer(BaseLayerT &BaseLayer) = 0;
-
private:
- enum { NotEmitted, Emitting, Emitted } EmitState = NotEmitted;
- BaseLayerHandleT Handle;
- };
-
- template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
- class EmissionDeferredModuleImpl : public EmissionDeferredModule {
- public:
- EmissionDeferredModuleImpl(std::shared_ptr<Module> M,
- MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver)
- : M(std::move(M)), MemMgr(std::move(MemMgr)),
- Resolver(std::move(Resolver)) {}
- protected:
const GlobalValue* searchGVs(StringRef Name,
- bool ExportedSymbolsOnly) const override {
+ bool ExportedSymbolsOnly) const {
// FIXME: We could clean all this up if we had a way to reliably demangle
// names: We could just demangle name and search, rather than
// mangling everything else.
@@ -149,15 +135,13 @@ private:
return buildMangledSymbols(Name, ExportedSymbolsOnly);
}
- BaseLayerHandleT emitToBaseLayer(BaseLayerT &BaseLayer) override {
+ Expected<BaseLayerHandleT> emitToBaseLayer(BaseLayerT &BaseLayer) {
// We don't need the mangled names set any more: Once we've emitted this
// to the base layer we'll just look for symbols there.
MangledSymbols.reset();
- return BaseLayer.addModule(std::move(M), std::move(MemMgr),
- std::move(Resolver));
+ return BaseLayer.addModule(std::move(M), std::move(Resolver));
}
- private:
// If the mangled name of the given GlobalValue matches the given search
// name (and its visibility conforms to the ExportedSymbolsOnly flag) then
// return the symbol. Otherwise, add the mangled name to the Names map and
@@ -207,9 +191,10 @@ private:
return nullptr;
}
+ enum { NotEmitted, Emitting, Emitted } EmitState = NotEmitted;
+ BaseLayerHandleT Handle;
std::shared_ptr<Module> M;
- MemoryManagerPtrT MemMgr;
- SymbolResolverPtrT Resolver;
+ std::shared_ptr<JITSymbolResolver> Resolver;
mutable std::unique_ptr<StringMap<const GlobalValue*>> MangledSymbols;
};
@@ -219,6 +204,7 @@ private:
ModuleListT ModuleList;
public:
+
/// @brief Handle to a loaded module.
using ModuleHandleT = typename ModuleListT::iterator;
@@ -226,24 +212,23 @@ public:
LazyEmittingLayer(BaseLayerT &BaseLayer) : BaseLayer(BaseLayer) {}
/// @brief Add the given module to the lazy emitting layer.
- template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
- ModuleHandleT addModule(std::shared_ptr<Module> M,
- MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver) {
+ Expected<ModuleHandleT>
+ addModule(std::shared_ptr<Module> M,
+ std::shared_ptr<JITSymbolResolver> Resolver) {
return ModuleList.insert(
ModuleList.end(),
- EmissionDeferredModule::create(BaseLayer, std::move(M),
- std::move(MemMgr),
- std::move(Resolver)));
+ llvm::make_unique<EmissionDeferredModule>(std::move(M),
+ std::move(Resolver)));
}
/// @brief Remove the module represented by the given handle.
///
/// This method will free the memory associated with the given module, both
/// in this layer, and the base layer.
- void removeModule(ModuleHandleT H) {
+ Error removeModule(ModuleHandleT H) {
(*H)->removeModuleFromBaseLayer(BaseLayer);
ModuleList.erase(H);
+ return Error::success();
}
/// @brief Search for the given named symbol.
@@ -276,22 +261,11 @@ public:
/// @brief Immediately emit and finalize the module represented by the given
/// handle.
/// @param H Handle for module to emit/finalize.
- void emitAndFinalize(ModuleHandleT H) {
- (*H)->emitAndFinalize(BaseLayer);
+ Error emitAndFinalize(ModuleHandleT H) {
+ return (*H)->emitAndFinalize(BaseLayer);
}
};
-template <typename BaseLayerT>
-template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
-std::unique_ptr<typename LazyEmittingLayer<BaseLayerT>::EmissionDeferredModule>
-LazyEmittingLayer<BaseLayerT>::EmissionDeferredModule::create(
- BaseLayerT &B, std::shared_ptr<Module> M, MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver) {
- using EDS = EmissionDeferredModuleImpl<MemoryManagerPtrT, SymbolResolverPtrT>;
- return llvm::make_unique<EDS>(std::move(M), std::move(MemMgr),
- std::move(Resolver));
-}
-
} // end namespace orc
} // end namespace llvm
diff --git a/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h b/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h
index c41c1233c0d9..cb47e7520b1a 100644
--- a/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h
@@ -16,6 +16,7 @@
#include "llvm/ExecutionEngine/JITSymbol.h"
#include <algorithm>
+#include <memory>
#include <string>
namespace llvm {
@@ -42,16 +43,14 @@ public:
/// memory manager and symbol resolver.
///
/// @return A handle for the added objects.
- template <typename ObjPtrT, typename MemoryManagerPtrT,
- typename SymbolResolverPtrT>
- ObjHandleT addObject(ObjPtrT Obj, MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver) {
- return BaseLayer.addObject(Transform(std::move(Obj)), std::move(MemMgr),
- std::move(Resolver));
+ template <typename ObjectPtr>
+ Expected<ObjHandleT> addObject(ObjectPtr Obj,
+ std::shared_ptr<JITSymbolResolver> Resolver) {
+ return BaseLayer.addObject(Transform(std::move(Obj)), std::move(Resolver));
}
/// @brief Remove the object set associated with the handle H.
- void removeObject(ObjHandleT H) { BaseLayer.removeObject(H); }
+ Error removeObject(ObjHandleT H) { return BaseLayer.removeObject(H); }
/// @brief Search for the given named symbol.
/// @param Name The name of the symbol to search for.
@@ -77,7 +76,9 @@ public:
/// @brief Immediately emit and finalize the object set represented by the
/// given handle.
/// @param H Handle for object set to emit/finalize.
- void emitAndFinalize(ObjHandleT H) { BaseLayer.emitAndFinalize(H); }
+ Error emitAndFinalize(ObjHandleT H) {
+ return BaseLayer.emitAndFinalize(H);
+ }
/// @brief Map section addresses for the objects associated with the handle H.
void mapSectionAddress(ObjHandleT H, const void *LocalAddress,
diff --git a/include/llvm/ExecutionEngine/Orc/OrcError.h b/include/llvm/ExecutionEngine/Orc/OrcError.h
index cbb40fad0223..e6374b70967a 100644
--- a/include/llvm/ExecutionEngine/Orc/OrcError.h
+++ b/include/llvm/ExecutionEngine/Orc/OrcError.h
@@ -22,7 +22,8 @@ namespace orc {
enum class OrcErrorCode : int {
// RPC Errors
- RemoteAllocatorDoesNotExist = 1,
+ JITSymbolNotFound = 1,
+ RemoteAllocatorDoesNotExist,
RemoteAllocatorIdAlreadyInUse,
RemoteMProtectAddrUnrecognized,
RemoteIndirectStubsOwnerDoesNotExist,
@@ -37,6 +38,18 @@ enum class OrcErrorCode : int {
std::error_code orcError(OrcErrorCode ErrCode);
+class JITSymbolNotFound : public ErrorInfo<JITSymbolNotFound> {
+public:
+ static char ID;
+
+ JITSymbolNotFound(std::string SymbolName);
+ std::error_code convertToErrorCode() const override;
+ void log(raw_ostream &OS) const override;
+ const std::string &getSymbolName() const;
+private:
+ std::string SymbolName;
+};
+
} // End namespace orc.
} // End namespace llvm.
diff --git a/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h b/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
index 66ad36be01c8..e1016ef95f0c 100644
--- a/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
+++ b/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
@@ -228,13 +228,20 @@ private:
public:
+ /// @brief Functor for creating memory managers.
+ using MemoryManagerGetter =
+ std::function<std::shared_ptr<RuntimeDyld::MemoryManager>()>;
+
/// @brief Construct an ObjectLinkingLayer with the given NotifyLoaded,
/// and NotifyFinalized functors.
RTDyldObjectLinkingLayer(
+ MemoryManagerGetter GetMemMgr,
NotifyLoadedFtor NotifyLoaded = NotifyLoadedFtor(),
NotifyFinalizedFtor NotifyFinalized = NotifyFinalizedFtor())
- : NotifyLoaded(std::move(NotifyLoaded)),
- NotifyFinalized(std::move(NotifyFinalized)) {}
+ : GetMemMgr(GetMemMgr),
+ NotifyLoaded(std::move(NotifyLoaded)),
+ NotifyFinalized(std::move(NotifyFinalized)),
+ ProcessAllSections(false) {}
/// @brief Set the 'ProcessAllSections' flag.
///
@@ -251,12 +258,8 @@ public:
///
/// @return A handle that can be used to refer to the loaded objects (for
/// symbol searching, finalization, freeing memory, etc.).
- template <typename MemoryManagerPtrT,
- typename SymbolResolverPtrT>
- ObjHandleT addObject(ObjectPtr Obj,
- MemoryManagerPtrT MemMgr,
- SymbolResolverPtrT Resolver) {
-
+ Expected<ObjHandleT> addObject(ObjectPtr Obj,
+ std::shared_ptr<JITSymbolResolver> Resolver) {
auto Finalizer = [&](ObjHandleT H, RuntimeDyld &RTDyld,
const ObjectPtr &ObjToLoad,
std::function<void()> LOSHandleLoad) {
@@ -275,8 +278,9 @@ public:
};
auto LO =
- createLinkedObject(std::move(Obj), std::move(MemMgr), std::move(Resolver),
- std::move(Finalizer), ProcessAllSections);
+ createLinkedObject(std::move(Obj), GetMemMgr(),
+ std::move(Resolver), std::move(Finalizer),
+ ProcessAllSections);
// LOS is an owning-ptr. Keep a non-owning one so that we can set the handle
// below.
auto *LOPtr = LO.get();
@@ -295,9 +299,10 @@ public:
/// indirectly) will result in undefined behavior. If dependence tracking is
/// required to detect or resolve such issues it should be added at a higher
/// layer.
- void removeObject(ObjHandleT H) {
+ Error removeObject(ObjHandleT H) {
// How do we invalidate the symbols in H?
LinkedObjList.erase(H);
+ return Error::success();
}
/// @brief Search for the given named symbol.
@@ -334,13 +339,15 @@ public:
/// @brief Immediately emit and finalize the object set represented by the
/// given handle.
/// @param H Handle for object set to emit/finalize.
- void emitAndFinalize(ObjHandleT H) {
+ Error emitAndFinalize(ObjHandleT H) {
(*H)->finalize();
+ return Error::success();
}
private:
LinkedObjectListT LinkedObjList;
+ MemoryManagerGetter GetMemMgr;
NotifyLoadedFtor NotifyLoaded;
NotifyFinalizedFtor NotifyFinalized;
bool ProcessAllSections = false;
diff --git a/include/llvm/ExecutionEngine/RuntimeDyld.h b/include/llvm/ExecutionEngine/RuntimeDyld.h
index 1925489f7952..56aa04ce694a 100644
--- a/include/llvm/ExecutionEngine/RuntimeDyld.h
+++ b/include/llvm/ExecutionEngine/RuntimeDyld.h
@@ -88,21 +88,6 @@ public:
ObjSectionToIDMap ObjSecToIDMap;
};
- template <typename Derived> struct LoadedObjectInfoHelper : LoadedObjectInfo {
- protected:
- LoadedObjectInfoHelper(const LoadedObjectInfoHelper &) = default;
- LoadedObjectInfoHelper() = default;
-
- public:
- LoadedObjectInfoHelper(RuntimeDyldImpl &RTDyld,
- LoadedObjectInfo::ObjSectionToIDMap ObjSecToIDMap)
- : LoadedObjectInfo(RTDyld, std::move(ObjSecToIDMap)) {}
-
- std::unique_ptr<llvm::LoadedObjectInfo> clone() const override {
- return llvm::make_unique<Derived>(static_cast<const Derived &>(*this));
- }
- };
-
/// \brief Memory Management.
class MemoryManager {
friend class RuntimeDyld;
diff --git a/include/llvm/IR/Constants.h b/include/llvm/IR/Constants.h
index 8b3a90fa065b..2e72c41ccee3 100644
--- a/include/llvm/IR/Constants.h
+++ b/include/llvm/IR/Constants.h
@@ -680,11 +680,6 @@ class ConstantDataArray final : public ConstantDataSequential {
explicit ConstantDataArray(Type *ty, const char *Data)
: ConstantDataSequential(ty, ConstantDataArrayVal, Data) {}
- /// Allocate space for exactly zero operands.
- void *operator new(size_t s) {
- return User::operator new(s, 0);
- }
-
public:
ConstantDataArray(const ConstantDataArray &) = delete;
@@ -739,11 +734,6 @@ class ConstantDataVector final : public ConstantDataSequential {
explicit ConstantDataVector(Type *ty, const char *Data)
: ConstantDataSequential(ty, ConstantDataVectorVal, Data) {}
- // allocate space for exactly zero operands.
- void *operator new(size_t s) {
- return User::operator new(s, 0);
- }
-
public:
ConstantDataVector(const ConstantDataVector &) = delete;
diff --git a/include/llvm/IR/IRBuilder.h b/include/llvm/IR/IRBuilder.h
index ec33f82f7022..5344a93efb33 100644
--- a/include/llvm/IR/IRBuilder.h
+++ b/include/llvm/IR/IRBuilder.h
@@ -1062,7 +1062,7 @@ public:
Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
if (Constant *RC = dyn_cast<Constant>(RHS)) {
- if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isAllOnesValue())
+ if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isMinusOne())
return LHS; // LHS & -1 -> LHS
if (Constant *LC = dyn_cast<Constant>(LHS))
return Insert(Folder.CreateAnd(LC, RC), Name);
@@ -1203,22 +1203,22 @@ public:
return SI;
}
FenceInst *CreateFence(AtomicOrdering Ordering,
- SynchronizationScope SynchScope = CrossThread,
+ SyncScope::ID SSID = SyncScope::System,
const Twine &Name = "") {
- return Insert(new FenceInst(Context, Ordering, SynchScope), Name);
+ return Insert(new FenceInst(Context, Ordering, SSID), Name);
}
AtomicCmpXchgInst *
CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope = CrossThread) {
+ SyncScope::ID SSID = SyncScope::System) {
return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering,
- FailureOrdering, SynchScope));
+ FailureOrdering, SSID));
}
AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val,
AtomicOrdering Ordering,
- SynchronizationScope SynchScope = CrossThread) {
- return Insert(new AtomicRMWInst(Op, Ptr, Val, Ordering, SynchScope));
+ SyncScope::ID SSID = SyncScope::System) {
+ return Insert(new AtomicRMWInst(Op, Ptr, Val, Ordering, SSID));
}
Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList,
const Twine &Name = "") {
@@ -1517,11 +1517,9 @@ public:
const Twine &Name = "") {
if (V->getType() == DestTy)
return V;
- if (V->getType()->getScalarType()->isPointerTy() &&
- DestTy->getScalarType()->isIntegerTy())
+ if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy())
return CreatePtrToInt(V, DestTy, Name);
- if (V->getType()->getScalarType()->isIntegerTy() &&
- DestTy->getScalarType()->isPointerTy())
+ if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy())
return CreateIntToPtr(V, DestTy, Name);
return CreateBitCast(V, DestTy, Name);
diff --git a/include/llvm/IR/Instructions.h b/include/llvm/IR/Instructions.h
index dc5f37450b48..60ae98869e55 100644
--- a/include/llvm/IR/Instructions.h
+++ b/include/llvm/IR/Instructions.h
@@ -52,11 +52,6 @@ class ConstantInt;
class DataLayout;
class LLVMContext;
-enum SynchronizationScope {
- SingleThread = 0,
- CrossThread = 1
-};
-
//===----------------------------------------------------------------------===//
// AllocaInst Class
//===----------------------------------------------------------------------===//
@@ -195,17 +190,16 @@ public:
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
unsigned Align, BasicBlock *InsertAtEnd);
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
- AtomicOrdering Order, SynchronizationScope SynchScope = CrossThread,
+ AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
Instruction *InsertBefore = nullptr)
: LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
- NameStr, isVolatile, Align, Order, SynchScope, InsertBefore) {}
+ NameStr, isVolatile, Align, Order, SSID, InsertBefore) {}
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope = CrossThread,
+ SyncScope::ID SSID = SyncScope::System,
Instruction *InsertBefore = nullptr);
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
- unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope,
+ unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
BasicBlock *InsertAtEnd);
LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore);
LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd);
@@ -235,34 +229,34 @@ public:
void setAlignment(unsigned Align);
- /// Returns the ordering effect of this fence.
+ /// Returns the ordering constraint of this load instruction.
AtomicOrdering getOrdering() const {
return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
}
- /// Set the ordering constraint on this load. May not be Release or
- /// AcquireRelease.
+ /// Sets the ordering constraint of this load instruction. May not be Release
+ /// or AcquireRelease.
void setOrdering(AtomicOrdering Ordering) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
((unsigned)Ordering << 7));
}
- SynchronizationScope getSynchScope() const {
- return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1);
+ /// Returns the synchronization scope ID of this load instruction.
+ SyncScope::ID getSyncScopeID() const {
+ return SSID;
}
- /// Specify whether this load is ordered with respect to all
- /// concurrently executing threads, or only with respect to signal handlers
- /// executing in the same thread.
- void setSynchScope(SynchronizationScope xthread) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) |
- (xthread << 6));
+ /// Sets the synchronization scope ID of this load instruction.
+ void setSyncScopeID(SyncScope::ID SSID) {
+ this->SSID = SSID;
}
+ /// Sets the ordering constraint and the synchronization scope ID of this load
+ /// instruction.
void setAtomic(AtomicOrdering Ordering,
- SynchronizationScope SynchScope = CrossThread) {
+ SyncScope::ID SSID = SyncScope::System) {
setOrdering(Ordering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
}
bool isSimple() const { return !isAtomic() && !isVolatile(); }
@@ -297,6 +291,11 @@ private:
void setInstructionSubclassData(unsigned short D) {
Instruction::setInstructionSubclassData(D);
}
+
+ /// The synchronization scope ID of this load instruction. Not quite enough
+ /// room in SubClassData for everything, so synchronization scope ID gets its
+ /// own field.
+ SyncScope::ID SSID;
};
//===----------------------------------------------------------------------===//
@@ -325,11 +324,10 @@ public:
unsigned Align, BasicBlock *InsertAtEnd);
StoreInst(Value *Val, Value *Ptr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope = CrossThread,
+ SyncScope::ID SSID = SyncScope::System,
Instruction *InsertBefore = nullptr);
StoreInst(Value *Val, Value *Ptr, bool isVolatile,
- unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope,
+ unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
BasicBlock *InsertAtEnd);
// allocate space for exactly two operands
@@ -356,34 +354,34 @@ public:
void setAlignment(unsigned Align);
- /// Returns the ordering effect of this store.
+ /// Returns the ordering constraint of this store instruction.
AtomicOrdering getOrdering() const {
return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
}
- /// Set the ordering constraint on this store. May not be Acquire or
- /// AcquireRelease.
+ /// Sets the ordering constraint of this store instruction. May not be
+ /// Acquire or AcquireRelease.
void setOrdering(AtomicOrdering Ordering) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
((unsigned)Ordering << 7));
}
- SynchronizationScope getSynchScope() const {
- return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1);
+ /// Returns the synchronization scope ID of this store instruction.
+ SyncScope::ID getSyncScopeID() const {
+ return SSID;
}
- /// Specify whether this store instruction is ordered with respect to all
- /// concurrently executing threads, or only with respect to signal handlers
- /// executing in the same thread.
- void setSynchScope(SynchronizationScope xthread) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) |
- (xthread << 6));
+ /// Sets the synchronization scope ID of this store instruction.
+ void setSyncScopeID(SyncScope::ID SSID) {
+ this->SSID = SSID;
}
+ /// Sets the ordering constraint and the synchronization scope ID of this
+ /// store instruction.
void setAtomic(AtomicOrdering Ordering,
- SynchronizationScope SynchScope = CrossThread) {
+ SyncScope::ID SSID = SyncScope::System) {
setOrdering(Ordering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
}
bool isSimple() const { return !isAtomic() && !isVolatile(); }
@@ -421,6 +419,11 @@ private:
void setInstructionSubclassData(unsigned short D) {
Instruction::setInstructionSubclassData(D);
}
+
+ /// The synchronization scope ID of this store instruction. Not quite enough
+ /// room in SubClassData for everything, so synchronization scope ID gets its
+ /// own field.
+ SyncScope::ID SSID;
};
template <>
@@ -435,7 +438,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)
/// An instruction for ordering other memory operations.
class FenceInst : public Instruction {
- void Init(AtomicOrdering Ordering, SynchronizationScope SynchScope);
+ void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
@@ -447,10 +450,9 @@ public:
// Ordering may only be Acquire, Release, AcquireRelease, or
// SequentiallyConsistent.
FenceInst(LLVMContext &C, AtomicOrdering Ordering,
- SynchronizationScope SynchScope = CrossThread,
+ SyncScope::ID SSID = SyncScope::System,
Instruction *InsertBefore = nullptr);
- FenceInst(LLVMContext &C, AtomicOrdering Ordering,
- SynchronizationScope SynchScope,
+ FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
BasicBlock *InsertAtEnd);
// allocate space for exactly zero operands
@@ -458,28 +460,26 @@ public:
return User::operator new(s, 0);
}
- /// Returns the ordering effect of this fence.
+ /// Returns the ordering constraint of this fence instruction.
AtomicOrdering getOrdering() const {
return AtomicOrdering(getSubclassDataFromInstruction() >> 1);
}
- /// Set the ordering constraint on this fence. May only be Acquire, Release,
- /// AcquireRelease, or SequentiallyConsistent.
+ /// Sets the ordering constraint of this fence instruction. May only be
+ /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
void setOrdering(AtomicOrdering Ordering) {
setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
((unsigned)Ordering << 1));
}
- SynchronizationScope getSynchScope() const {
- return SynchronizationScope(getSubclassDataFromInstruction() & 1);
+ /// Returns the synchronization scope ID of this fence instruction.
+ SyncScope::ID getSyncScopeID() const {
+ return SSID;
}
- /// Specify whether this fence orders other operations with respect to all
- /// concurrently executing threads, or only with respect to signal handlers
- /// executing in the same thread.
- void setSynchScope(SynchronizationScope xthread) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
- xthread);
+ /// Sets the synchronization scope ID of this fence instruction.
+ void setSyncScopeID(SyncScope::ID SSID) {
+ this->SSID = SSID;
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -496,6 +496,11 @@ private:
void setInstructionSubclassData(unsigned short D) {
Instruction::setInstructionSubclassData(D);
}
+
+ /// The synchronization scope ID of this fence instruction. Not quite enough
+ /// room in SubClassData for everything, so synchronization scope ID gets its
+ /// own field.
+ SyncScope::ID SSID;
};
//===----------------------------------------------------------------------===//
@@ -509,7 +514,7 @@ private:
class AtomicCmpXchgInst : public Instruction {
void Init(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope);
+ SyncScope::ID SSID);
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
@@ -521,13 +526,11 @@ public:
AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope,
- Instruction *InsertBefore = nullptr);
+ SyncScope::ID SSID, Instruction *InsertBefore = nullptr);
AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope,
- BasicBlock *InsertAtEnd);
+ SyncScope::ID SSID, BasicBlock *InsertAtEnd);
// allocate space for exactly three operands
void *operator new(size_t s) {
@@ -561,7 +564,12 @@ public:
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
- /// Set the ordering constraint on this cmpxchg.
+ /// Returns the success ordering constraint of this cmpxchg instruction.
+ AtomicOrdering getSuccessOrdering() const {
+ return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
+ }
+
+ /// Sets the success ordering constraint of this cmpxchg instruction.
void setSuccessOrdering(AtomicOrdering Ordering) {
assert(Ordering != AtomicOrdering::NotAtomic &&
"CmpXchg instructions can only be atomic.");
@@ -569,6 +577,12 @@ public:
((unsigned)Ordering << 2));
}
+ /// Returns the failure ordering constraint of this cmpxchg instruction.
+ AtomicOrdering getFailureOrdering() const {
+ return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7);
+ }
+
+ /// Sets the failure ordering constraint of this cmpxchg instruction.
void setFailureOrdering(AtomicOrdering Ordering) {
assert(Ordering != AtomicOrdering::NotAtomic &&
"CmpXchg instructions can only be atomic.");
@@ -576,28 +590,14 @@ public:
((unsigned)Ordering << 5));
}
- /// Specify whether this cmpxchg is atomic and orders other operations with
- /// respect to all concurrently executing threads, or only with respect to
- /// signal handlers executing in the same thread.
- void setSynchScope(SynchronizationScope SynchScope) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~2) |
- (SynchScope << 1));
- }
-
- /// Returns the ordering constraint on this cmpxchg.
- AtomicOrdering getSuccessOrdering() const {
- return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
- }
-
- /// Returns the ordering constraint on this cmpxchg.
- AtomicOrdering getFailureOrdering() const {
- return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7);
+ /// Returns the synchronization scope ID of this cmpxchg instruction.
+ SyncScope::ID getSyncScopeID() const {
+ return SSID;
}
- /// Returns whether this cmpxchg is atomic between threads or only within a
- /// single thread.
- SynchronizationScope getSynchScope() const {
- return SynchronizationScope((getSubclassDataFromInstruction() & 2) >> 1);
+ /// Sets the synchronization scope ID of this cmpxchg instruction.
+ void setSyncScopeID(SyncScope::ID SSID) {
+ this->SSID = SSID;
}
Value *getPointerOperand() { return getOperand(0); }
@@ -652,6 +652,11 @@ private:
void setInstructionSubclassData(unsigned short D) {
Instruction::setInstructionSubclassData(D);
}
+
+ /// The synchronization scope ID of this cmpxchg instruction. Not quite
+ /// enough room in SubClassData for everything, so synchronization scope ID
+ /// gets its own field.
+ SyncScope::ID SSID;
};
template <>
@@ -711,10 +716,10 @@ public:
};
AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
- AtomicOrdering Ordering, SynchronizationScope SynchScope,
+ AtomicOrdering Ordering, SyncScope::ID SSID,
Instruction *InsertBefore = nullptr);
AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
- AtomicOrdering Ordering, SynchronizationScope SynchScope,
+ AtomicOrdering Ordering, SyncScope::ID SSID,
BasicBlock *InsertAtEnd);
// allocate space for exactly two operands
@@ -748,7 +753,12 @@ public:
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
- /// Set the ordering constraint on this RMW.
+ /// Returns the ordering constraint of this rmw instruction.
+ AtomicOrdering getOrdering() const {
+ return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
+ }
+
+ /// Sets the ordering constraint of this rmw instruction.
void setOrdering(AtomicOrdering Ordering) {
assert(Ordering != AtomicOrdering::NotAtomic &&
"atomicrmw instructions can only be atomic.");
@@ -756,23 +766,14 @@ public:
((unsigned)Ordering << 2));
}
- /// Specify whether this RMW orders other operations with respect to all
- /// concurrently executing threads, or only with respect to signal handlers
- /// executing in the same thread.
- void setSynchScope(SynchronizationScope SynchScope) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~2) |
- (SynchScope << 1));
+ /// Returns the synchronization scope ID of this rmw instruction.
+ SyncScope::ID getSyncScopeID() const {
+ return SSID;
}
- /// Returns the ordering constraint on this RMW.
- AtomicOrdering getOrdering() const {
- return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
- }
-
- /// Returns whether this RMW is atomic between threads or only within a
- /// single thread.
- SynchronizationScope getSynchScope() const {
- return SynchronizationScope((getSubclassDataFromInstruction() & 2) >> 1);
+ /// Sets the synchronization scope ID of this rmw instruction.
+ void setSyncScopeID(SyncScope::ID SSID) {
+ this->SSID = SSID;
}
Value *getPointerOperand() { return getOperand(0); }
@@ -797,13 +798,18 @@ public:
private:
void Init(BinOp Operation, Value *Ptr, Value *Val,
- AtomicOrdering Ordering, SynchronizationScope SynchScope);
+ AtomicOrdering Ordering, SyncScope::ID SSID);
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
void setInstructionSubclassData(unsigned short D) {
Instruction::setInstructionSubclassData(D);
}
+
+ /// The synchronization scope ID of this rmw instruction. Not quite enough
+ /// room in SubClassData for everything, so synchronization scope ID gets its
+ /// own field.
+ SyncScope::ID SSID;
};
template <>
@@ -1101,8 +1107,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
/// Represent an integer comparison operator.
class ICmpInst: public CmpInst {
void AssertOK() {
- assert(getPredicate() >= CmpInst::FIRST_ICMP_PREDICATE &&
- getPredicate() <= CmpInst::LAST_ICMP_PREDICATE &&
+ assert(isIntPredicate() &&
"Invalid ICmp predicate value");
assert(getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to ICmp instruction are not of the same type!");
@@ -1244,8 +1249,7 @@ public:
/// Represents a floating point comparison operator.
class FCmpInst: public CmpInst {
void AssertOK() {
- assert(getPredicate() <= FCmpInst::LAST_FCMP_PREDICATE &&
- "Invalid FCmp predicate value");
+ assert(isFPPredicate() && "Invalid FCmp predicate value");
assert(getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to FCmp instruction are not of the same type!");
// Check that the operands are the right type
diff --git a/include/llvm/IR/IntrinsicInst.h b/include/llvm/IR/IntrinsicInst.h
index 944af57a7800..f55d17ec72c8 100644
--- a/include/llvm/IR/IntrinsicInst.h
+++ b/include/llvm/IR/IntrinsicInst.h
@@ -296,6 +296,175 @@ namespace llvm {
}
};
+ class ElementUnorderedAtomicMemMoveInst : public IntrinsicInst {
+ private:
+ enum { ARG_DEST = 0, ARG_SOURCE = 1, ARG_LENGTH = 2, ARG_ELEMENTSIZE = 3 };
+
+ public:
+ Value *getRawDest() const {
+ return const_cast<Value *>(getArgOperand(ARG_DEST));
+ }
+ const Use &getRawDestUse() const { return getArgOperandUse(ARG_DEST); }
+ Use &getRawDestUse() { return getArgOperandUse(ARG_DEST); }
+
+ /// Return the arguments to the instruction.
+ Value *getRawSource() const {
+ return const_cast<Value *>(getArgOperand(ARG_SOURCE));
+ }
+ const Use &getRawSourceUse() const { return getArgOperandUse(ARG_SOURCE); }
+ Use &getRawSourceUse() { return getArgOperandUse(ARG_SOURCE); }
+
+ Value *getLength() const {
+ return const_cast<Value *>(getArgOperand(ARG_LENGTH));
+ }
+ const Use &getLengthUse() const { return getArgOperandUse(ARG_LENGTH); }
+ Use &getLengthUse() { return getArgOperandUse(ARG_LENGTH); }
+
+ bool isVolatile() const { return false; }
+
+ Value *getRawElementSizeInBytes() const {
+ return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE));
+ }
+
+ ConstantInt *getElementSizeInBytesCst() const {
+ return cast<ConstantInt>(getRawElementSizeInBytes());
+ }
+
+ uint32_t getElementSizeInBytes() const {
+ return getElementSizeInBytesCst()->getZExtValue();
+ }
+
+ /// This is just like getRawDest, but it strips off any cast
+ /// instructions that feed it, giving the original input. The returned
+ /// value is guaranteed to be a pointer.
+ Value *getDest() const { return getRawDest()->stripPointerCasts(); }
+
+ /// This is just like getRawSource, but it strips off any cast
+ /// instructions that feed it, giving the original input. The returned
+ /// value is guaranteed to be a pointer.
+ Value *getSource() const { return getRawSource()->stripPointerCasts(); }
+
+ unsigned getDestAddressSpace() const {
+ return cast<PointerType>(getRawDest()->getType())->getAddressSpace();
+ }
+
+ unsigned getSourceAddressSpace() const {
+ return cast<PointerType>(getRawSource()->getType())->getAddressSpace();
+ }
+
+ /// Set the specified arguments of the instruction.
+ void setDest(Value *Ptr) {
+ assert(getRawDest()->getType() == Ptr->getType() &&
+ "setDest called with pointer of wrong type!");
+ setArgOperand(ARG_DEST, Ptr);
+ }
+
+ void setSource(Value *Ptr) {
+ assert(getRawSource()->getType() == Ptr->getType() &&
+ "setSource called with pointer of wrong type!");
+ setArgOperand(ARG_SOURCE, Ptr);
+ }
+
+ void setLength(Value *L) {
+ assert(getLength()->getType() == L->getType() &&
+ "setLength called with value of wrong type!");
+ setArgOperand(ARG_LENGTH, L);
+ }
+
+ void setElementSizeInBytes(Constant *V) {
+ assert(V->getType() == Type::getInt8Ty(getContext()) &&
+ "setElementSizeInBytes called with value of wrong type!");
+ setArgOperand(ARG_ELEMENTSIZE, V);
+ }
+
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::memmove_element_unordered_atomic;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ };
+
+ /// This class represents atomic memset intrinsic
+ /// TODO: Integrate this class into MemIntrinsic hierarchy; for now this is
+ /// C&P of all methods from that hierarchy
+ class ElementUnorderedAtomicMemSetInst : public IntrinsicInst {
+ private:
+ enum { ARG_DEST = 0, ARG_VALUE = 1, ARG_LENGTH = 2, ARG_ELEMENTSIZE = 3 };
+
+ public:
+ Value *getRawDest() const {
+ return const_cast<Value *>(getArgOperand(ARG_DEST));
+ }
+ const Use &getRawDestUse() const { return getArgOperandUse(ARG_DEST); }
+ Use &getRawDestUse() { return getArgOperandUse(ARG_DEST); }
+
+ Value *getValue() const { return const_cast<Value*>(getArgOperand(ARG_VALUE)); }
+ const Use &getValueUse() const { return getArgOperandUse(ARG_VALUE); }
+ Use &getValueUse() { return getArgOperandUse(ARG_VALUE); }
+
+ Value *getLength() const {
+ return const_cast<Value *>(getArgOperand(ARG_LENGTH));
+ }
+ const Use &getLengthUse() const { return getArgOperandUse(ARG_LENGTH); }
+ Use &getLengthUse() { return getArgOperandUse(ARG_LENGTH); }
+
+ bool isVolatile() const { return false; }
+
+ Value *getRawElementSizeInBytes() const {
+ return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE));
+ }
+
+ ConstantInt *getElementSizeInBytesCst() const {
+ return cast<ConstantInt>(getRawElementSizeInBytes());
+ }
+
+ uint32_t getElementSizeInBytes() const {
+ return getElementSizeInBytesCst()->getZExtValue();
+ }
+
+ /// This is just like getRawDest, but it strips off any cast
+ /// instructions that feed it, giving the original input. The returned
+ /// value is guaranteed to be a pointer.
+ Value *getDest() const { return getRawDest()->stripPointerCasts(); }
+
+ unsigned getDestAddressSpace() const {
+ return cast<PointerType>(getRawDest()->getType())->getAddressSpace();
+ }
+
+ /// Set the specified arguments of the instruction.
+ void setDest(Value *Ptr) {
+ assert(getRawDest()->getType() == Ptr->getType() &&
+ "setDest called with pointer of wrong type!");
+ setArgOperand(ARG_DEST, Ptr);
+ }
+
+ void setValue(Value *Val) {
+ assert(getValue()->getType() == Val->getType() &&
+ "setValue called with value of wrong type!");
+ setArgOperand(ARG_VALUE, Val);
+ }
+
+ void setLength(Value *L) {
+ assert(getLength()->getType() == L->getType() &&
+ "setLength called with value of wrong type!");
+ setArgOperand(ARG_LENGTH, L);
+ }
+
+ void setElementSizeInBytes(Constant *V) {
+ assert(V->getType() == Type::getInt8Ty(getContext()) &&
+ "setElementSizeInBytes called with value of wrong type!");
+ setArgOperand(ARG_ELEMENTSIZE, V);
+ }
+
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::memset_element_unordered_atomic;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ };
+
/// This is the common base class for memset/memcpy/memmove.
class MemIntrinsic : public IntrinsicInst {
public:
diff --git a/include/llvm/IR/Intrinsics.td b/include/llvm/IR/Intrinsics.td
index 45936a6e9b66..14c88e519435 100644
--- a/include/llvm/IR/Intrinsics.td
+++ b/include/llvm/IR/Intrinsics.td
@@ -873,6 +873,22 @@ def int_memcpy_element_unordered_atomic
ReadOnly<1>
]>;
+// @llvm.memmove.element.unordered.atomic.*(dest, src, length, elementsize)
+def int_memmove_element_unordered_atomic
+ : Intrinsic<[],
+ [
+ llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i32_ty
+ ],
+ [
+ IntrArgMemOnly, NoCapture<0>, NoCapture<1>, WriteOnly<0>,
+ ReadOnly<1>
+ ]>;
+
+// @llvm.memset.element.unordered.atomic.*(dest, value, length, elementsize)
+def int_memset_element_unordered_atomic
+ : Intrinsic<[], [ llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty, llvm_i32_ty ],
+ [ IntrArgMemOnly, NoCapture<0>, WriteOnly<0> ]>;
+
//===------------------------ Reduction Intrinsics ------------------------===//
//
def int_experimental_vector_reduce_fadd : Intrinsic<[llvm_anyfloat_ty],
diff --git a/include/llvm/IR/LLVMContext.h b/include/llvm/IR/LLVMContext.h
index b27abad618c9..4cb77701f762 100644
--- a/include/llvm/IR/LLVMContext.h
+++ b/include/llvm/IR/LLVMContext.h
@@ -42,6 +42,24 @@ class Output;
} // end namespace yaml
+namespace SyncScope {
+
+typedef uint8_t ID;
+
+/// Known synchronization scope IDs, which always have the same value. All
+/// synchronization scope IDs that LLVM has special knowledge of are listed
+/// here. Additionally, this scheme allows LLVM to efficiently check for
+/// specific synchronization scope ID without comparing strings.
+enum {
+ /// Synchronized with respect to signal handlers executing in the same thread.
+ SingleThread = 0,
+
+ /// Synchronized with respect to all concurrently executing threads.
+ System = 1
+};
+
+} // end namespace SyncScope
+
/// This is an important class for using LLVM in a threaded context. It
/// (opaquely) owns and manages the core "global" data of LLVM's core
/// infrastructure, including the type and constant uniquing tables.
@@ -111,6 +129,16 @@ public:
/// tag registered with an LLVMContext has an unique ID.
uint32_t getOperandBundleTagID(StringRef Tag) const;
+ /// getOrInsertSyncScopeID - Maps synchronization scope name to
+ /// synchronization scope ID. Every synchronization scope registered with
+ /// LLVMContext has unique ID except pre-defined ones.
+ SyncScope::ID getOrInsertSyncScopeID(StringRef SSN);
+
+ /// getSyncScopeNames - Populates client supplied SmallVector with
+ /// synchronization scope names registered with LLVMContext. Synchronization
+ /// scope names are ordered by increasing synchronization scope IDs.
+ void getSyncScopeNames(SmallVectorImpl<StringRef> &SSNs) const;
+
/// Define the GC for a function
void setGC(const Function &Fn, std::string GCName);
diff --git a/include/llvm/IR/Module.h b/include/llvm/IR/Module.h
index d47d82a57bff..196e32e3615c 100644
--- a/include/llvm/IR/Module.h
+++ b/include/llvm/IR/Module.h
@@ -249,7 +249,7 @@ public:
/// when other randomness consuming passes are added or removed. In
/// addition, the random stream will be reproducible across LLVM
/// versions when the pass does not change.
- RandomNumberGenerator *createRNG(const Pass* P) const;
+ std::unique_ptr<RandomNumberGenerator> createRNG(const Pass* P) const;
/// @}
/// @name Module Level Mutators
diff --git a/include/llvm/IR/ModuleSummaryIndex.h b/include/llvm/IR/ModuleSummaryIndex.h
index b43d58865862..4aa8a0199ab1 100644
--- a/include/llvm/IR/ModuleSummaryIndex.h
+++ b/include/llvm/IR/ModuleSummaryIndex.h
@@ -47,7 +47,13 @@ template <typename T> struct MappingTraits;
/// \brief Class to accumulate and hold information about a callee.
struct CalleeInfo {
- enum class HotnessType : uint8_t { Unknown = 0, Cold = 1, None = 2, Hot = 3 };
+ enum class HotnessType : uint8_t {
+ Unknown = 0,
+ Cold = 1,
+ None = 2,
+ Hot = 3,
+ Critical = 4
+ };
HotnessType Hotness = HotnessType::Unknown;
CalleeInfo() = default;
@@ -516,7 +522,7 @@ using ModulePathStringTableTy = StringMap<std::pair<uint64_t, ModuleHash>>;
/// Map of global value GUID to its summary, used to identify values defined in
/// a particular module, and provide efficient access to their summary.
-using GVSummaryMapTy = std::map<GlobalValue::GUID, GlobalValueSummary *>;
+using GVSummaryMapTy = DenseMap<GlobalValue::GUID, GlobalValueSummary *>;
/// Class to hold module path string table and global value map,
/// and encapsulate methods for operating on them.
diff --git a/include/llvm/IR/PassManager.h b/include/llvm/IR/PassManager.h
index d03b7b65f81e..393175675034 100644
--- a/include/llvm/IR/PassManager.h
+++ b/include/llvm/IR/PassManager.h
@@ -162,6 +162,14 @@ public:
return PA;
}
+ /// \brief Construct a preserved analyses object with a single preserved set.
+ template <typename AnalysisSetT>
+ static PreservedAnalyses allInSet() {
+ PreservedAnalyses PA;
+ PA.preserveSet<AnalysisSetT>();
+ return PA;
+ }
+
/// Mark an analysis as preserved.
template <typename AnalysisT> void preserve() { preserve(AnalysisT::ID()); }
@@ -1062,10 +1070,27 @@ public:
const AnalysisManagerT &getManager() const { return *AM; }
- /// \brief Handle invalidation by ignoring it; this pass is immutable.
+ /// When invalidation occurs, remove any registered invalidation events.
bool invalidate(
- IRUnitT &, const PreservedAnalyses &,
- typename AnalysisManager<IRUnitT, ExtraArgTs...>::Invalidator &) {
+ IRUnitT &IRUnit, const PreservedAnalyses &PA,
+ typename AnalysisManager<IRUnitT, ExtraArgTs...>::Invalidator &Inv) {
+ // Loop over the set of registered outer invalidation mappings and if any
+ // of them map to an analysis that is now invalid, clear it out.
+ SmallVector<AnalysisKey *, 4> DeadKeys;
+ for (auto &KeyValuePair : OuterAnalysisInvalidationMap) {
+ AnalysisKey *OuterID = KeyValuePair.first;
+ auto &InnerIDs = KeyValuePair.second;
+ InnerIDs.erase(llvm::remove_if(InnerIDs, [&](AnalysisKey *InnerID) {
+ return Inv.invalidate(InnerID, IRUnit, PA); }),
+ InnerIDs.end());
+ if (InnerIDs.empty())
+ DeadKeys.push_back(OuterID);
+ }
+
+ for (auto OuterID : DeadKeys)
+ OuterAnalysisInvalidationMap.erase(OuterID);
+
+ // The proxy itself remains valid regardless of anything else.
return false;
}
diff --git a/include/llvm/IR/PatternMatch.h b/include/llvm/IR/PatternMatch.h
index 5b69e7855cc7..acb895211644 100644
--- a/include/llvm/IR/PatternMatch.h
+++ b/include/llvm/IR/PatternMatch.h
@@ -158,12 +158,18 @@ struct match_neg_zero {
/// zero
inline match_neg_zero m_NegZero() { return match_neg_zero(); }
+struct match_any_zero {
+ template <typename ITy> bool match(ITy *V) {
+ if (const auto *C = dyn_cast<Constant>(V))
+ return C->isZeroValue();
+ return false;
+ }
+};
+
/// \brief - Match an arbitrary zero/null constant. This includes
/// zero_initializer for vectors and ConstantPointerNull for pointers. For
/// floating point constants, this will match negative zero and positive zero
-inline match_combine_or<match_zero, match_neg_zero> m_AnyZero() {
- return m_CombineOr(m_Zero(), m_NegZero());
-}
+inline match_any_zero m_AnyZero() { return match_any_zero(); }
struct match_nan {
template <typename ITy> bool match(ITy *V) {
@@ -176,6 +182,39 @@ struct match_nan {
/// Match an arbitrary NaN constant. This includes quiet and signalling nans.
inline match_nan m_NaN() { return match_nan(); }
+struct match_one {
+ template <typename ITy> bool match(ITy *V) {
+ if (const auto *C = dyn_cast<Constant>(V))
+ return C->isOneValue();
+ return false;
+ }
+};
+
+/// \brief Match an integer 1 or a vector with all elements equal to 1.
+inline match_one m_One() { return match_one(); }
+
+struct match_all_ones {
+ template <typename ITy> bool match(ITy *V) {
+ if (const auto *C = dyn_cast<Constant>(V))
+ return C->isAllOnesValue();
+ return false;
+ }
+};
+
+/// \brief Match an integer or vector with all bits set to true.
+inline match_all_ones m_AllOnes() { return match_all_ones(); }
+
+struct match_sign_mask {
+ template <typename ITy> bool match(ITy *V) {
+ if (const auto *C = dyn_cast<Constant>(V))
+ return C->isMinSignedValue();
+ return false;
+ }
+};
+
+/// \brief Match an integer or vector with only the sign bit(s) set.
+inline match_sign_mask m_SignMask() { return match_sign_mask(); }
+
struct apint_match {
const APInt *&Res;
@@ -259,34 +298,6 @@ template <typename Predicate> struct api_pred_ty : public Predicate {
}
};
-struct is_one {
- bool isValue(const APInt &C) { return C.isOneValue(); }
-};
-
-/// \brief Match an integer 1 or a vector with all elements equal to 1.
-inline cst_pred_ty<is_one> m_One() { return cst_pred_ty<is_one>(); }
-inline api_pred_ty<is_one> m_One(const APInt *&V) { return V; }
-
-struct is_all_ones {
- bool isValue(const APInt &C) { return C.isAllOnesValue(); }
-};
-
-/// \brief Match an integer or vector with all bits set to true.
-inline cst_pred_ty<is_all_ones> m_AllOnes() {
- return cst_pred_ty<is_all_ones>();
-}
-inline api_pred_ty<is_all_ones> m_AllOnes(const APInt *&V) { return V; }
-
-struct is_sign_mask {
- bool isValue(const APInt &C) { return C.isSignMask(); }
-};
-
-/// \brief Match an integer or vector with only the sign bit(s) set.
-inline cst_pred_ty<is_sign_mask> m_SignMask() {
- return cst_pred_ty<is_sign_mask>();
-}
-inline api_pred_ty<is_sign_mask> m_SignMask(const APInt *&V) { return V; }
-
struct is_power2 {
bool isValue(const APInt &C) { return C.isPowerOf2(); }
};
diff --git a/include/llvm/IR/SafepointIRVerifier.h b/include/llvm/IR/SafepointIRVerifier.h
new file mode 100644
index 000000000000..092050d1d207
--- /dev/null
+++ b/include/llvm/IR/SafepointIRVerifier.h
@@ -0,0 +1,35 @@
+//===- SafepointIRVerifier.h - Checks for GC relocation problems *- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a verifier which is useful for enforcing the relocation
+// properties required by a relocating GC. Specifically, it looks for uses of
+// the unrelocated value of pointer SSA values after a possible safepoint. It
+// attempts to report no false negatives, but may end up reporting false
+// positives in rare cases (see the note at the top of the corresponding cpp
+// file.)
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_SAFEPOINT_IR_VERIFIER
+#define LLVM_IR_SAFEPOINT_IR_VERIFIER
+
+namespace llvm {
+
+class Function;
+class FunctionPass;
+
+/// Run the safepoint verifier over a single function. Crashes on failure.
+void verifySafepointIR(Function &F);
+
+/// Create an instance of the safepoint verifier pass which can be added to
+/// a pass pipeline to check for relocation bugs.
+FunctionPass *createSafepointIRVerifierPass();
+}
+
+#endif // LLVM_IR_SAFEPOINT_IR_VERIFIER
diff --git a/include/llvm/IR/Type.h b/include/llvm/IR/Type.h
index b37b59288e3f..ef7801266777 100644
--- a/include/llvm/IR/Type.h
+++ b/include/llvm/IR/Type.h
@@ -202,6 +202,12 @@ public:
/// Return true if this is an integer type or a vector of integer types.
bool isIntOrIntVectorTy() const { return getScalarType()->isIntegerTy(); }
+ /// Return true if this is an integer type or a vector of integer types of
+ /// the given width.
+ bool isIntOrIntVectorTy(unsigned BitWidth) const {
+ return getScalarType()->isIntegerTy(BitWidth);
+ }
+
/// True if this is an instance of FunctionType.
bool isFunctionTy() const { return getTypeID() == FunctionTyID; }
diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h
index aab14070dbda..39ac4649b70d 100644
--- a/include/llvm/InitializePasses.h
+++ b/include/llvm/InitializePasses.h
@@ -303,6 +303,7 @@ void initializeProfileSummaryInfoWrapperPassPass(PassRegistry&);
void initializePromoteLegacyPassPass(PassRegistry&);
void initializePruneEHPass(PassRegistry&);
void initializeRABasicPass(PassRegistry&);
+void initializeRAFastPass(PassRegistry&);
void initializeRAGreedyPass(PassRegistry&);
void initializeReassociateLegacyPassPass(PassRegistry&);
void initializeRegBankSelectPass(PassRegistry&);
@@ -318,6 +319,7 @@ void initializeResetMachineFunctionPass(PassRegistry&);
void initializeReversePostOrderFunctionAttrsLegacyPassPass(PassRegistry&);
void initializeRewriteStatepointsForGCPass(PassRegistry&);
void initializeRewriteSymbolsLegacyPassPass(PassRegistry&);
+void initializeSafepointIRVerifierPass(PassRegistry&);
void initializeSCCPLegacyPassPass(PassRegistry&);
void initializeSCEVAAWrapperPassPass(PassRegistry&);
void initializeSLPVectorizerPass(PassRegistry&);
diff --git a/include/llvm/MC/MCAsmBackend.h b/include/llvm/MC/MCAsmBackend.h
index c9c43a22da5d..5a8e29d08ad2 100644
--- a/include/llvm/MC/MCAsmBackend.h
+++ b/include/llvm/MC/MCAsmBackend.h
@@ -73,7 +73,7 @@ public:
/// reported via \p Ctx.
virtual void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target, MutableArrayRef<char> Data,
- uint64_t Value, bool IsPCRel) const = 0;
+ uint64_t Value, bool IsResolved) const = 0;
/// @}
diff --git a/include/llvm/MC/MCDisassembler/MCDisassembler.h b/include/llvm/MC/MCDisassembler/MCDisassembler.h
index 5e626f186986..7f09c05ccf2a 100644
--- a/include/llvm/MC/MCDisassembler/MCDisassembler.h
+++ b/include/llvm/MC/MCDisassembler/MCDisassembler.h
@@ -68,6 +68,7 @@ public:
/// an invalid instruction.
/// \param Address - The address, in the memory space of region, of the first
/// byte of the instruction.
+ /// \param Bytes - A reference to the actual bytes of the instruction.
/// \param VStream - The stream to print warnings and diagnostic messages on.
/// \param CStream - The stream to print comments and annotations on.
/// \return - MCDisassembler::Success if the instruction is valid,
diff --git a/include/llvm/MC/MCMachObjectWriter.h b/include/llvm/MC/MCMachObjectWriter.h
index 2d2480a27223..42dc90da3049 100644
--- a/include/llvm/MC/MCMachObjectWriter.h
+++ b/include/llvm/MC/MCMachObjectWriter.h
@@ -233,8 +233,7 @@ public:
void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment *Fragment, const MCFixup &Fixup,
- MCValue Target, bool &IsPCRel,
- uint64_t &FixedValue) override;
+ MCValue Target, uint64_t &FixedValue) override;
void bindIndirectSymbols(MCAssembler &Asm);
diff --git a/include/llvm/MC/MCObjectWriter.h b/include/llvm/MC/MCObjectWriter.h
index 86bcbb6861d7..cd90690fb186 100644
--- a/include/llvm/MC/MCObjectWriter.h
+++ b/include/llvm/MC/MCObjectWriter.h
@@ -86,7 +86,7 @@ public:
virtual void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment *Fragment,
const MCFixup &Fixup, MCValue Target,
- bool &IsPCRel, uint64_t &FixedValue) = 0;
+ uint64_t &FixedValue) = 0;
/// Check whether the difference (A - B) between two symbol references is
/// fully resolved.
diff --git a/include/llvm/MC/MCSymbolWasm.h b/include/llvm/MC/MCSymbolWasm.h
index 7ea89629efda..9bae6c582faa 100644
--- a/include/llvm/MC/MCSymbolWasm.h
+++ b/include/llvm/MC/MCSymbolWasm.h
@@ -21,6 +21,8 @@ private:
std::string ModuleName;
SmallVector<wasm::ValType, 1> Returns;
SmallVector<wasm::ValType, 4> Params;
+ bool ParamsSet = false;
+ bool ReturnsSet = false;
/// An expression describing how to calculate the size of a symbol. If a
/// symbol has no size this field will be NULL.
@@ -45,15 +47,23 @@ public:
const StringRef getModuleName() const { return ModuleName; }
- const SmallVector<wasm::ValType, 1> &getReturns() const { return Returns; }
+ const SmallVector<wasm::ValType, 1> &getReturns() const {
+ assert(ReturnsSet);
+ return Returns;
+ }
void setReturns(SmallVectorImpl<wasm::ValType> &&Rets) {
+ ReturnsSet = true;
Returns = std::move(Rets);
}
- const SmallVector<wasm::ValType, 4> &getParams() const { return Params; }
+ const SmallVector<wasm::ValType, 4> &getParams() const {
+ assert(ParamsSet);
+ return Params;
+ }
void setParams(SmallVectorImpl<wasm::ValType> &&Pars) {
+ ParamsSet = true;
Params = std::move(Pars);
}
};
diff --git a/include/llvm/Object/COFF.h b/include/llvm/Object/COFF.h
index 78e0b5f6ed30..89c1ba6be35f 100644
--- a/include/llvm/Object/COFF.h
+++ b/include/llvm/Object/COFF.h
@@ -698,6 +698,9 @@ struct coff_resource_dir_entry {
uint32_t getNameOffset() const {
return maskTrailingOnes<uint32_t>(31) & NameOffset;
}
+ // Even though the PE/COFF spec doesn't mention this, the high bit of a name
+ // offset is set.
+ void setNameOffset(uint32_t Offset) { NameOffset = Offset | (1 << 31); }
} Identifier;
union {
support::ulittle32_t DataEntryOffset;
diff --git a/include/llvm/Object/Wasm.h b/include/llvm/Object/Wasm.h
index 5c8445f10f44..07ee4a4d6c4d 100644
--- a/include/llvm/Object/Wasm.h
+++ b/include/llvm/Object/Wasm.h
@@ -61,7 +61,7 @@ public:
void print(raw_ostream &Out) const {
Out << "Name=" << Name << ", Type=" << static_cast<int>(Type)
- << ", Flags=" << Flags;
+ << ", Flags=" << Flags << " ElemIndex=" << ElementIndex;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -69,8 +69,7 @@ public:
#endif
};
-class WasmSection {
-public:
+struct WasmSection {
WasmSection() = default;
uint32_t Type = 0; // Section type (See below)
@@ -80,6 +79,11 @@ public:
std::vector<wasm::WasmRelocation> Relocations; // Relocations for this section
};
+struct WasmSegment {
+ uint32_t SectionOffset;
+ wasm::WasmDataSegment Data;
+};
+
class WasmObjectFile : public ObjectFile {
public:
@@ -110,7 +114,7 @@ public:
return ElemSegments;
}
- const std::vector<wasm::WasmDataSegment>& dataSegments() const {
+ const std::vector<WasmSegment>& dataSegments() const {
return DataSegments;
}
@@ -210,7 +214,7 @@ private:
std::vector<wasm::WasmImport> Imports;
std::vector<wasm::WasmExport> Exports;
std::vector<wasm::WasmElemSegment> ElemSegments;
- std::vector<wasm::WasmDataSegment> DataSegments;
+ std::vector<WasmSegment> DataSegments;
std::vector<wasm::WasmFunction> Functions;
std::vector<WasmSymbol> Symbols;
ArrayRef<uint8_t> CodeSection;
diff --git a/include/llvm/ObjectYAML/WasmYAML.h b/include/llvm/ObjectYAML/WasmYAML.h
index 6bf08d340eeb..709ad8ec3b77 100644
--- a/include/llvm/ObjectYAML/WasmYAML.h
+++ b/include/llvm/ObjectYAML/WasmYAML.h
@@ -98,7 +98,8 @@ struct Relocation {
};
struct DataSegment {
- uint32_t Index;
+ uint32_t MemoryIndex;
+ uint32_t SectionOffset;
wasm::WasmInitExpr Offset;
yaml::BinaryRef Content;
};
diff --git a/include/llvm/Option/OptTable.h b/include/llvm/Option/OptTable.h
index 3e7b019a0d4e..a35e182f00e5 100644
--- a/include/llvm/Option/OptTable.h
+++ b/include/llvm/Option/OptTable.h
@@ -140,7 +140,8 @@ public:
// to start with.
///
/// \return The vector of flags which start with Cur.
- std::vector<std::string> findByPrefix(StringRef Cur) const;
+ std::vector<std::string> findByPrefix(StringRef Cur,
+ unsigned short DisableFlags) const;
/// \brief Parse a single argument; returning the new argument and
/// updating Index.
diff --git a/include/llvm/Passes/PassBuilder.h b/include/llvm/Passes/PassBuilder.h
index ff1958397331..33433f6b4a10 100644
--- a/include/llvm/Passes/PassBuilder.h
+++ b/include/llvm/Passes/PassBuilder.h
@@ -46,6 +46,19 @@ class PassBuilder {
Optional<PGOOptions> PGOOpt;
public:
+ /// \brief A struct to capture parsed pass pipeline names.
+ ///
+ /// A pipeline is defined as a series of names, each of which may in itself
+ /// recursively contain a nested pipeline. A name is either the name of a pass
+ /// (e.g. "instcombine") or the name of a pipeline type (e.g. "cgscc"). If the
+ /// name is the name of a pass, the InnerPipeline is empty, since passes
+ /// cannot contain inner pipelines. See parsePassPipeline() for a more
+ /// detailed description of the textual pipeline format.
+ struct PipelineElement {
+ StringRef Name;
+ std::vector<PipelineElement> InnerPipeline;
+ };
+
/// \brief LLVM-provided high-level optimization levels.
///
/// This enumerates the LLVM-provided high-level optimization levels. Each
@@ -188,9 +201,14 @@ public:
/// only intended for use when attempting to optimize code. If frontends
/// require some transformations for semantic reasons, they should explicitly
/// build them.
+ ///
+ /// \p PrepareForThinLTO indicates whether this is invoked in
+ /// PrepareForThinLTO phase. Special handling is needed for sample PGO to
+ /// ensure profile accurate in the backend profile annotation phase.
FunctionPassManager
buildFunctionSimplificationPipeline(OptimizationLevel Level,
- bool DebugLogging = false);
+ bool DebugLogging = false,
+ bool PrepareForThinLTO = false);
/// Construct the core LLVM module canonicalization and simplification
/// pipeline.
@@ -205,9 +223,14 @@ public:
/// only intended for use when attempting to optimize code. If frontends
/// require some transformations for semantic reasons, they should explicitly
/// build them.
+ ///
+ /// \p PrepareForThinLTO indicates whether this is invoked in
+ /// PrepareForThinLTO phase. Special handling is needed for sample PGO to
+ /// ensure profile accurate in the backend profile annotation phase.
ModulePassManager
buildModuleSimplificationPipeline(OptimizationLevel Level,
- bool DebugLogging = false);
+ bool DebugLogging = false,
+ bool PrepareForThinLTO = false);
/// Construct the core LLVM module optimization pipeline.
///
@@ -302,7 +325,8 @@ public:
/// registered.
AAManager buildDefaultAAPipeline();
- /// \brief Parse a textual pass pipeline description into a \c ModulePassManager.
+ /// \brief Parse a textual pass pipeline description into a \c
+ /// ModulePassManager.
///
/// The format of the textual pass pipeline description looks something like:
///
@@ -312,8 +336,8 @@ public:
/// are comma separated. As a special shortcut, if the very first pass is not
/// a module pass (as a module pass manager is), this will automatically form
/// the shortest stack of pass managers that allow inserting that first pass.
- /// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop passes
- /// 'lpassN', all of these are valid:
+ /// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop
+ /// passes 'lpassN', all of these are valid:
///
/// fpass1,fpass2,fpass3
/// cgpass1,cgpass2,cgpass3
@@ -326,13 +350,28 @@ public:
/// module(function(loop(lpass1,lpass2,lpass3)))
///
/// This shortcut is especially useful for debugging and testing small pass
- /// combinations. Note that these shortcuts don't introduce any other magic. If
- /// the sequence of passes aren't all the exact same kind of pass, it will be
- /// an error. You cannot mix different levels implicitly, you must explicitly
- /// form a pass manager in which to nest passes.
+ /// combinations. Note that these shortcuts don't introduce any other magic.
+ /// If the sequence of passes aren't all the exact same kind of pass, it will
+ /// be an error. You cannot mix different levels implicitly, you must
+ /// explicitly form a pass manager in which to nest passes.
bool parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText,
bool VerifyEachPass = true, bool DebugLogging = false);
+ /// {{@ Parse a textual pass pipeline description into a specific PassManager
+ ///
+ /// Automatic deduction of an appropriate pass manager stack is not supported.
+ /// For example, to insert a loop pass 'lpass' into a FunctinoPassManager,
+ /// this is the valid pipeline text:
+ ///
+ /// function(lpass)
+ bool parsePassPipeline(CGSCCPassManager &CGPM, StringRef PipelineText,
+ bool VerifyEachPass = true, bool DebugLogging = false);
+ bool parsePassPipeline(FunctionPassManager &FPM, StringRef PipelineText,
+ bool VerifyEachPass = true, bool DebugLogging = false);
+ bool parsePassPipeline(LoopPassManager &LPM, StringRef PipelineText,
+ bool VerifyEachPass = true, bool DebugLogging = false);
+ /// @}}
+
/// Parse a textual alias analysis pipeline into the provided AA manager.
///
/// The format of the textual AA pipeline is a comma separated list of AA
@@ -350,13 +389,139 @@ public:
/// returns false.
bool parseAAPipeline(AAManager &AA, StringRef PipelineText);
-private:
- /// A struct to capture parsed pass pipeline names.
- struct PipelineElement {
- StringRef Name;
- std::vector<PipelineElement> InnerPipeline;
- };
+ /// \brief Register a callback for a default optimizer pipeline extension
+ /// point
+ ///
+ /// This extension point allows adding passes that perform peephole
+ /// optimizations similar to the instruction combiner. These passes will be
+ /// inserted after each instance of the instruction combiner pass.
+ void registerPeepholeEPCallback(
+ const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
+ PeepholeEPCallbacks.push_back(C);
+ }
+
+ /// \brief Register a callback for a default optimizer pipeline extension
+ /// point
+ ///
+ /// This extension point allows adding late loop canonicalization and
+ /// simplification passes. This is the last point in the loop optimization
+ /// pipeline before loop deletion. Each pass added
+ /// here must be an instance of LoopPass.
+ /// This is the place to add passes that can remove loops, such as target-
+ /// specific loop idiom recognition.
+ void registerLateLoopOptimizationsEPCallback(
+ const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
+ LateLoopOptimizationsEPCallbacks.push_back(C);
+ }
+
+ /// \brief Register a callback for a default optimizer pipeline extension
+ /// point
+ ///
+ /// This extension point allows adding loop passes to the end of the loop
+ /// optimizer.
+ void registerLoopOptimizerEndEPCallback(
+ const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
+ LoopOptimizerEndEPCallbacks.push_back(C);
+ }
+
+ /// \brief Register a callback for a default optimizer pipeline extension
+ /// point
+ ///
+ /// This extension point allows adding optimization passes after most of the
+ /// main optimizations, but before the last cleanup-ish optimizations.
+ void registerScalarOptimizerLateEPCallback(
+ const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
+ ScalarOptimizerLateEPCallbacks.push_back(C);
+ }
+
+ /// \brief Register a callback for a default optimizer pipeline extension
+ /// point
+ ///
+ /// This extension point allows adding CallGraphSCC passes at the end of the
+ /// main CallGraphSCC passes and before any function simplification passes run
+ /// by CGPassManager.
+ void registerCGSCCOptimizerLateEPCallback(
+ const std::function<void(CGSCCPassManager &, OptimizationLevel)> &C) {
+ CGSCCOptimizerLateEPCallbacks.push_back(C);
+ }
+
+ /// \brief Register a callback for a default optimizer pipeline extension
+ /// point
+ ///
+ /// This extension point allows adding optimization passes before the
+ /// vectorizer and other highly target specific optimization passes are
+ /// executed.
+ void registerVectorizerStartEPCallback(
+ const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
+ VectorizerStartEPCallbacks.push_back(C);
+ }
+
+ /// \brief Register a callback for parsing an AliasAnalysis Name to populate
+ /// the given AAManager \p AA
+ void registerParseAACallback(
+ const std::function<bool(StringRef Name, AAManager &AA)> &C) {
+ AAParsingCallbacks.push_back(C);
+ }
+
+ /// {{@ Register callbacks for analysis registration with this PassBuilder
+ /// instance.
+ /// Callees register their analyses with the given AnalysisManager objects.
+ void registerAnalysisRegistrationCallback(
+ const std::function<void(CGSCCAnalysisManager &)> &C) {
+ CGSCCAnalysisRegistrationCallbacks.push_back(C);
+ }
+ void registerAnalysisRegistrationCallback(
+ const std::function<void(FunctionAnalysisManager &)> &C) {
+ FunctionAnalysisRegistrationCallbacks.push_back(C);
+ }
+ void registerAnalysisRegistrationCallback(
+ const std::function<void(LoopAnalysisManager &)> &C) {
+ LoopAnalysisRegistrationCallbacks.push_back(C);
+ }
+ void registerAnalysisRegistrationCallback(
+ const std::function<void(ModuleAnalysisManager &)> &C) {
+ ModuleAnalysisRegistrationCallbacks.push_back(C);
+ }
+ /// @}}
+
+ /// {{@ Register pipeline parsing callbacks with this pass builder instance.
+ /// Using these callbacks, callers can parse both a single pass name, as well
+ /// as entire sub-pipelines, and populate the PassManager instance
+ /// accordingly.
+ void registerPipelineParsingCallback(
+ const std::function<bool(StringRef Name, CGSCCPassManager &,
+ ArrayRef<PipelineElement>)> &C) {
+ CGSCCPipelineParsingCallbacks.push_back(C);
+ }
+ void registerPipelineParsingCallback(
+ const std::function<bool(StringRef Name, FunctionPassManager &,
+ ArrayRef<PipelineElement>)> &C) {
+ FunctionPipelineParsingCallbacks.push_back(C);
+ }
+ void registerPipelineParsingCallback(
+ const std::function<bool(StringRef Name, LoopPassManager &,
+ ArrayRef<PipelineElement>)> &C) {
+ LoopPipelineParsingCallbacks.push_back(C);
+ }
+ void registerPipelineParsingCallback(
+ const std::function<bool(StringRef Name, ModulePassManager &,
+ ArrayRef<PipelineElement>)> &C) {
+ ModulePipelineParsingCallbacks.push_back(C);
+ }
+ /// @}}
+
+ /// \brief Register a callback for a top-level pipeline entry.
+ ///
+ /// If the PassManager type is not given at the top level of the pipeline
+ /// text, this Callback should be used to determine the appropriate stack of
+ /// PassManagers and populate the passed ModulePassManager.
+ void registerParseTopLevelPipelineCallback(
+ const std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
+ bool VerifyEachPass, bool DebugLogging)> &C) {
+ TopLevelPipelineParsingCallbacks.push_back(C);
+ }
+private:
static Optional<std::vector<PipelineElement>>
parsePipelineText(StringRef Text);
@@ -382,7 +547,106 @@ private:
bool parseModulePassPipeline(ModulePassManager &MPM,
ArrayRef<PipelineElement> Pipeline,
bool VerifyEachPass, bool DebugLogging);
+
+ void addPGOInstrPasses(ModulePassManager &MPM, bool DebugLogging,
+ OptimizationLevel Level, bool RunProfileGen,
+ std::string ProfileGenFile,
+ std::string ProfileUseFile);
+
+ void invokePeepholeEPCallbacks(FunctionPassManager &, OptimizationLevel);
+
+ // Extension Point callbacks
+ SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
+ PeepholeEPCallbacks;
+ SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
+ LateLoopOptimizationsEPCallbacks;
+ SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
+ LoopOptimizerEndEPCallbacks;
+ SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
+ ScalarOptimizerLateEPCallbacks;
+ SmallVector<std::function<void(CGSCCPassManager &, OptimizationLevel)>, 2>
+ CGSCCOptimizerLateEPCallbacks;
+ SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
+ VectorizerStartEPCallbacks;
+ // Module callbacks
+ SmallVector<std::function<void(ModuleAnalysisManager &)>, 2>
+ ModuleAnalysisRegistrationCallbacks;
+ SmallVector<std::function<bool(StringRef, ModulePassManager &,
+ ArrayRef<PipelineElement>)>,
+ 2>
+ ModulePipelineParsingCallbacks;
+ SmallVector<std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
+ bool VerifyEachPass, bool DebugLogging)>,
+ 2>
+ TopLevelPipelineParsingCallbacks;
+ // CGSCC callbacks
+ SmallVector<std::function<void(CGSCCAnalysisManager &)>, 2>
+ CGSCCAnalysisRegistrationCallbacks;
+ SmallVector<std::function<bool(StringRef, CGSCCPassManager &,
+ ArrayRef<PipelineElement>)>,
+ 2>
+ CGSCCPipelineParsingCallbacks;
+ // Function callbacks
+ SmallVector<std::function<void(FunctionAnalysisManager &)>, 2>
+ FunctionAnalysisRegistrationCallbacks;
+ SmallVector<std::function<bool(StringRef, FunctionPassManager &,
+ ArrayRef<PipelineElement>)>,
+ 2>
+ FunctionPipelineParsingCallbacks;
+ // Loop callbacks
+ SmallVector<std::function<void(LoopAnalysisManager &)>, 2>
+ LoopAnalysisRegistrationCallbacks;
+ SmallVector<std::function<bool(StringRef, LoopPassManager &,
+ ArrayRef<PipelineElement>)>,
+ 2>
+ LoopPipelineParsingCallbacks;
+ // AA callbacks
+ SmallVector<std::function<bool(StringRef Name, AAManager &AA)>, 2>
+ AAParsingCallbacks;
};
+
+/// This utility template takes care of adding require<> and invalidate<>
+/// passes for an analysis to a given \c PassManager. It is intended to be used
+/// during parsing of a pass pipeline when parsing a single PipelineName.
+/// When registering a new function analysis FancyAnalysis with the pass
+/// pipeline name "fancy-analysis", a matching ParsePipelineCallback could look
+/// like this:
+///
+/// static bool parseFunctionPipeline(StringRef Name, FunctionPassManager &FPM,
+/// ArrayRef<PipelineElement> P) {
+/// if (parseAnalysisUtilityPasses<FancyAnalysis>("fancy-analysis", Name,
+/// FPM))
+/// return true;
+/// return false;
+/// }
+template <typename AnalysisT, typename IRUnitT, typename AnalysisManagerT,
+ typename... ExtraArgTs>
+bool parseAnalysisUtilityPasses(
+ StringRef AnalysisName, StringRef PipelineName,
+ PassManager<IRUnitT, AnalysisManagerT, ExtraArgTs...> &PM) {
+ if (!PipelineName.endswith(">"))
+ return false;
+ // See if this is an invalidate<> pass name
+ if (PipelineName.startswith("invalidate<")) {
+ PipelineName = PipelineName.substr(11, PipelineName.size() - 12);
+ if (PipelineName != AnalysisName)
+ return false;
+ PM.addPass(InvalidateAnalysisPass<AnalysisT>());
+ return true;
+ }
+
+ // See if this is a require<> pass name
+ if (PipelineName.startswith("require<")) {
+ PipelineName = PipelineName.substr(8, PipelineName.size() - 9);
+ if (PipelineName != AnalysisName)
+ return false;
+ PM.addPass(RequireAnalysisPass<AnalysisT, IRUnitT, AnalysisManagerT,
+ ExtraArgTs...>());
+ return true;
+ }
+
+ return false;
+}
}
#endif
diff --git a/include/llvm/ProfileData/InstrProf.h b/include/llvm/ProfileData/InstrProf.h
index a6b2850ccd22..772187f70153 100644
--- a/include/llvm/ProfileData/InstrProf.h
+++ b/include/llvm/ProfileData/InstrProf.h
@@ -249,9 +249,8 @@ void annotateValueSite(Module &M, Instruction &Inst,
/// Same as the above interface but using an ArrayRef, as well as \p Sum.
void annotateValueSite(Module &M, Instruction &Inst,
- ArrayRef<InstrProfValueData> VDs,
- uint64_t Sum, InstrProfValueKind ValueKind,
- uint32_t MaxMDCount);
+ ArrayRef<InstrProfValueData> VDs, uint64_t Sum,
+ InstrProfValueKind ValueKind, uint32_t MaxMDCount);
/// Extract the value profile data from \p Inst which is annotated with
/// value profile meta data. Return false if there is no value data annotated,
@@ -582,34 +581,27 @@ struct InstrProfValueSiteRecord {
/// Merge data from another InstrProfValueSiteRecord
/// Optionally scale merged counts by \p Weight.
- void merge(SoftInstrProfErrors &SIPE, InstrProfValueSiteRecord &Input,
- uint64_t Weight = 1);
+ void merge(InstrProfValueSiteRecord &Input, uint64_t Weight,
+ function_ref<void(instrprof_error)> Warn);
/// Scale up value profile data counts.
- void scale(SoftInstrProfErrors &SIPE, uint64_t Weight);
+ void scale(uint64_t Weight, function_ref<void(instrprof_error)> Warn);
};
/// Profiling information for a single function.
struct InstrProfRecord {
- StringRef Name;
- uint64_t Hash;
std::vector<uint64_t> Counts;
- SoftInstrProfErrors SIPE;
InstrProfRecord() = default;
- InstrProfRecord(StringRef Name, uint64_t Hash, std::vector<uint64_t> Counts)
- : Name(Name), Hash(Hash), Counts(std::move(Counts)) {}
+ InstrProfRecord(std::vector<uint64_t> Counts) : Counts(std::move(Counts)) {}
InstrProfRecord(InstrProfRecord &&) = default;
InstrProfRecord(const InstrProfRecord &RHS)
- : Name(RHS.Name), Hash(RHS.Hash), Counts(RHS.Counts), SIPE(RHS.SIPE),
+ : Counts(RHS.Counts),
ValueData(RHS.ValueData
? llvm::make_unique<ValueProfData>(*RHS.ValueData)
: nullptr) {}
InstrProfRecord &operator=(InstrProfRecord &&) = default;
InstrProfRecord &operator=(const InstrProfRecord &RHS) {
- Name = RHS.Name;
- Hash = RHS.Hash;
Counts = RHS.Counts;
- SIPE = RHS.SIPE;
if (!RHS.ValueData) {
ValueData = nullptr;
return *this;
@@ -626,7 +618,6 @@ struct InstrProfRecord {
/// Return the number of value profile kinds with non-zero number
/// of profile sites.
inline uint32_t getNumValueKinds() const;
-
/// Return the number of instrumented sites for ValueKind.
inline uint32_t getNumValueSites(uint32_t ValueKind) const;
@@ -661,11 +652,12 @@ struct InstrProfRecord {
/// Merge the counts in \p Other into this one.
/// Optionally scale merged counts by \p Weight.
- void merge(InstrProfRecord &Other, uint64_t Weight = 1);
+ void merge(InstrProfRecord &Other, uint64_t Weight,
+ function_ref<void(instrprof_error)> Warn);
/// Scale up profile counts (including value profile data) by
/// \p Weight.
- void scale(uint64_t Weight);
+ void scale(uint64_t Weight, function_ref<void(instrprof_error)> Warn);
/// Sort value profile data (per site) by count.
void sortValueData() {
@@ -683,9 +675,6 @@ struct InstrProfRecord {
/// Clear value data entries
void clearValueData() { ValueData = nullptr; }
- /// Get the error contained within the record's soft error counter.
- Error takeError() { return SIPE.takeError(); }
-
private:
struct ValueProfData {
std::vector<InstrProfValueSiteRecord> IndirectCallSites;
@@ -737,11 +726,23 @@ private:
// Merge Value Profile data from Src record to this record for ValueKind.
// Scale merged value counts by \p Weight.
- void mergeValueProfData(uint32_t ValueKind, InstrProfRecord &Src,
- uint64_t Weight);
+ void mergeValueProfData(uint32_t ValkeKind, InstrProfRecord &Src,
+ uint64_t Weight,
+ function_ref<void(instrprof_error)> Warn);
// Scale up value profile data count.
- void scaleValueProfData(uint32_t ValueKind, uint64_t Weight);
+ void scaleValueProfData(uint32_t ValueKind, uint64_t Weight,
+ function_ref<void(instrprof_error)> Warn);
+};
+
+struct NamedInstrProfRecord : InstrProfRecord {
+ StringRef Name;
+ uint64_t Hash;
+
+ NamedInstrProfRecord() = default;
+ NamedInstrProfRecord(StringRef Name, uint64_t Hash,
+ std::vector<uint64_t> Counts)
+ : InstrProfRecord(std::move(Counts)), Name(Name), Hash(Hash) {}
};
uint32_t InstrProfRecord::getNumValueKinds() const {
@@ -753,11 +754,8 @@ uint32_t InstrProfRecord::getNumValueKinds() const {
uint32_t InstrProfRecord::getNumValueData(uint32_t ValueKind) const {
uint32_t N = 0;
- const std::vector<InstrProfValueSiteRecord> &SiteRecords =
- getValueSitesForKind(ValueKind);
- for (auto &SR : SiteRecords) {
+ for (auto &SR : getValueSitesForKind(ValueKind))
N += SR.ValueData.size();
- }
return N;
}
diff --git a/include/llvm/ProfileData/InstrProfReader.h b/include/llvm/ProfileData/InstrProfReader.h
index 8163ca159209..424360e0f765 100644
--- a/include/llvm/ProfileData/InstrProfReader.h
+++ b/include/llvm/ProfileData/InstrProfReader.h
@@ -40,9 +40,9 @@ class InstrProfReader;
/// A file format agnostic iterator over profiling data.
class InstrProfIterator : public std::iterator<std::input_iterator_tag,
- InstrProfRecord> {
+ NamedInstrProfRecord> {
InstrProfReader *Reader = nullptr;
- InstrProfRecord Record;
+ value_type Record;
void Increment();
@@ -53,12 +53,12 @@ public:
InstrProfIterator &operator++() { Increment(); return *this; }
bool operator==(const InstrProfIterator &RHS) { return Reader == RHS.Reader; }
bool operator!=(const InstrProfIterator &RHS) { return Reader != RHS.Reader; }
- InstrProfRecord &operator*() { return Record; }
- InstrProfRecord *operator->() { return &Record; }
+ value_type &operator*() { return Record; }
+ value_type *operator->() { return &Record; }
};
/// Base class and interface for reading profiling data of any known instrprof
-/// format. Provides an iterator over InstrProfRecords.
+/// format. Provides an iterator over NamedInstrProfRecords.
class InstrProfReader {
instrprof_error LastError = instrprof_error::success;
@@ -70,7 +70,7 @@ public:
virtual Error readHeader() = 0;
/// Read a single record.
- virtual Error readNextRecord(InstrProfRecord &Record) = 0;
+ virtual Error readNextRecord(NamedInstrProfRecord &Record) = 0;
/// Iterator over profile data.
InstrProfIterator begin() { return InstrProfIterator(this); }
@@ -161,7 +161,7 @@ public:
Error readHeader() override;
/// Read a single record.
- Error readNextRecord(InstrProfRecord &Record) override;
+ Error readNextRecord(NamedInstrProfRecord &Record) override;
InstrProfSymtab &getSymtab() override {
assert(Symtab.get());
@@ -209,7 +209,7 @@ public:
static bool hasFormat(const MemoryBuffer &DataBuffer);
Error readHeader() override;
- Error readNextRecord(InstrProfRecord &Record) override;
+ Error readNextRecord(NamedInstrProfRecord &Record) override;
bool isIRLevelProfile() const override {
return (Version & VARIANT_MASK_IR_PROF) != 0;
@@ -243,8 +243,8 @@ private:
return 7 & (sizeof(uint64_t) - SizeInBytes % sizeof(uint64_t));
}
- Error readName(InstrProfRecord &Record);
- Error readFuncHash(InstrProfRecord &Record);
+ Error readName(NamedInstrProfRecord &Record);
+ Error readFuncHash(NamedInstrProfRecord &Record);
Error readRawCounts(InstrProfRecord &Record);
Error readValueProfilingData(InstrProfRecord &Record);
bool atEnd() const { return Data == DataEnd; }
@@ -281,7 +281,7 @@ enum class HashT : uint32_t;
/// Trait for lookups into the on-disk hash table for the binary instrprof
/// format.
class InstrProfLookupTrait {
- std::vector<InstrProfRecord> DataBuffer;
+ std::vector<NamedInstrProfRecord> DataBuffer;
IndexedInstrProf::HashT HashType;
unsigned FormatVersion;
// Endianness of the input value profile data.
@@ -293,7 +293,7 @@ public:
InstrProfLookupTrait(IndexedInstrProf::HashT HashType, unsigned FormatVersion)
: HashType(HashType), FormatVersion(FormatVersion) {}
- using data_type = ArrayRef<InstrProfRecord>;
+ using data_type = ArrayRef<NamedInstrProfRecord>;
using internal_key_type = StringRef;
using external_key_type = StringRef;
@@ -334,11 +334,11 @@ struct InstrProfReaderIndexBase {
// Read all the profile records with the same key pointed to the current
// iterator.
- virtual Error getRecords(ArrayRef<InstrProfRecord> &Data) = 0;
+ virtual Error getRecords(ArrayRef<NamedInstrProfRecord> &Data) = 0;
// Read all the profile records with the key equal to FuncName
virtual Error getRecords(StringRef FuncName,
- ArrayRef<InstrProfRecord> &Data) = 0;
+ ArrayRef<NamedInstrProfRecord> &Data) = 0;
virtual void advanceToNextKey() = 0;
virtual bool atEnd() const = 0;
virtual void setValueProfDataEndianness(support::endianness Endianness) = 0;
@@ -364,9 +364,9 @@ public:
IndexedInstrProf::HashT HashType, uint64_t Version);
~InstrProfReaderIndex() override = default;
- Error getRecords(ArrayRef<InstrProfRecord> &Data) override;
+ Error getRecords(ArrayRef<NamedInstrProfRecord> &Data) override;
Error getRecords(StringRef FuncName,
- ArrayRef<InstrProfRecord> &Data) override;
+ ArrayRef<NamedInstrProfRecord> &Data) override;
void advanceToNextKey() override { RecordIterator++; }
bool atEnd() const override {
@@ -419,10 +419,9 @@ public:
/// Read the file header.
Error readHeader() override;
/// Read a single record.
- Error readNextRecord(InstrProfRecord &Record) override;
+ Error readNextRecord(NamedInstrProfRecord &Record) override;
- /// Return the pointer to InstrProfRecord associated with FuncName
- /// and FuncHash
+ /// Return the NamedInstrProfRecord associated with FuncName and FuncHash
Expected<InstrProfRecord> getInstrProfRecord(StringRef FuncName,
uint64_t FuncHash);
diff --git a/include/llvm/ProfileData/InstrProfWriter.h b/include/llvm/ProfileData/InstrProfWriter.h
index fff10af30295..8107ab386fe2 100644
--- a/include/llvm/ProfileData/InstrProfWriter.h
+++ b/include/llvm/ProfileData/InstrProfWriter.h
@@ -33,7 +33,7 @@ class raw_fd_ostream;
class InstrProfWriter {
public:
- using ProfilingData = SmallDenseMap<uint64_t, InstrProfRecord, 1>;
+ using ProfilingData = SmallDenseMap<uint64_t, InstrProfRecord>;
enum ProfKind { PF_Unknown = 0, PF_FE, PF_IRLevel };
private:
@@ -50,10 +50,15 @@ public:
/// Add function counts for the given function. If there are already counts
/// for this function and the hash and number of counts match, each counter is
/// summed. Optionally scale counts by \p Weight.
- Error addRecord(InstrProfRecord &&I, uint64_t Weight = 1);
+ void addRecord(NamedInstrProfRecord &&I, uint64_t Weight,
+ function_ref<void(Error)> Warn);
+ void addRecord(NamedInstrProfRecord &&I, function_ref<void(Error)> Warn) {
+ addRecord(std::move(I), 1, Warn);
+ }
/// Merge existing function counts from the given writer.
- Error mergeRecordsFromWriter(InstrProfWriter &&IPW);
+ void mergeRecordsFromWriter(InstrProfWriter &&IPW,
+ function_ref<void(Error)> Warn);
/// Write the profile to \c OS
void write(raw_fd_ostream &OS);
@@ -62,7 +67,8 @@ public:
Error writeText(raw_fd_ostream &OS);
/// Write \c Record in text format to \c OS
- static void writeRecordInText(const InstrProfRecord &Record,
+ static void writeRecordInText(StringRef Name, uint64_t Hash,
+ const InstrProfRecord &Counters,
InstrProfSymtab &Symtab, raw_fd_ostream &OS);
/// Write the profile, returning the raw data. For testing.
@@ -85,6 +91,8 @@ public:
void setOutputSparse(bool Sparse);
private:
+ void addRecord(StringRef Name, uint64_t Hash, InstrProfRecord &&I,
+ uint64_t Weight, function_ref<void(Error)> Warn);
bool shouldEncodeData(const ProfilingData &PD);
void writeImpl(ProfOStream &OS);
};
diff --git a/include/llvm/ProfileData/ProfileCommon.h b/include/llvm/ProfileData/ProfileCommon.h
index 987e3160ccae..51b065bcdb70 100644
--- a/include/llvm/ProfileData/ProfileCommon.h
+++ b/include/llvm/ProfileData/ProfileCommon.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/ProfileSummary.h"
+#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/Error.h"
#include <algorithm>
#include <cstdint>
@@ -27,8 +28,6 @@
namespace llvm {
-struct InstrProfRecord;
-
namespace sampleprof {
class FunctionSamples;
diff --git a/include/llvm/Support/BlockFrequency.h b/include/llvm/Support/BlockFrequency.h
index 1b45cc52973f..2e75cbdd29c1 100644
--- a/include/llvm/Support/BlockFrequency.h
+++ b/include/llvm/Support/BlockFrequency.h
@@ -71,6 +71,10 @@ public:
bool operator>=(BlockFrequency RHS) const {
return Frequency >= RHS.Frequency;
}
+
+ bool operator==(BlockFrequency RHS) const {
+ return Frequency == RHS.Frequency;
+ }
};
}
diff --git a/include/llvm/Support/Compiler.h b/include/llvm/Support/Compiler.h
index be9e46540016..b19e37235df5 100644
--- a/include/llvm/Support/Compiler.h
+++ b/include/llvm/Support/Compiler.h
@@ -493,4 +493,14 @@ void AnnotateIgnoreWritesEnd(const char *file, int line);
#define LLVM_THREAD_LOCAL
#endif
+/// \macro LLVM_ENABLE_EXCEPTIONS
+/// \brief Whether LLVM is built with exception support.
+#if __has_feature(cxx_exceptions)
+#define LLVM_ENABLE_EXCEPTIONS 1
+#elif defined(__GNUC__) && defined(__EXCEPTIONS)
+#define LLVM_ENABLE_EXCEPTIONS 1
+#elif defined(_MSC_VER) && defined(_CPPUNWIND)
+#define LLVM_ENABLE_EXCEPTIONS 1
+#endif
+
#endif
diff --git a/include/llvm/Support/DynamicLibrary.h b/include/llvm/Support/DynamicLibrary.h
index a8874a10d461..469d5dfad062 100644
--- a/include/llvm/Support/DynamicLibrary.h
+++ b/include/llvm/Support/DynamicLibrary.h
@@ -88,6 +88,22 @@ namespace sys {
return !getPermanentLibrary(Filename, ErrMsg).isValid();
}
+ enum SearchOrdering {
+ /// SO_Linker - Search as a call to dlsym(dlopen(NULL)) would when
+ /// DynamicLibrary::getPermanentLibrary(NULL) has been called or
+ /// search the list of explcitly loaded symbols if not.
+ SO_Linker,
+ /// SO_LoadedFirst - Search all loaded libraries, then as SO_Linker would.
+ SO_LoadedFirst,
+ /// SO_LoadedLast - Search as SO_Linker would, then loaded libraries.
+ /// Only useful to search if libraries with RTLD_LOCAL have been added.
+ SO_LoadedLast,
+ /// SO_LoadOrder - Or this in to search libraries in the ordered loaded.
+ /// The default bahaviour is to search loaded libraries in reverse.
+ SO_LoadOrder = 4
+ };
+ static SearchOrdering SearchOrder; // = SO_Linker
+
/// This function will search through all previously loaded dynamic
/// libraries for the symbol \p symbolName. If it is found, the address of
/// that symbol is returned. If not, null is returned. Note that this will
diff --git a/include/llvm/Support/ErrorHandling.h b/include/llvm/Support/ErrorHandling.h
index 7c1edd801571..b45f6348390e 100644
--- a/include/llvm/Support/ErrorHandling.h
+++ b/include/llvm/Support/ErrorHandling.h
@@ -78,12 +78,48 @@ LLVM_ATTRIBUTE_NORETURN void report_fatal_error(StringRef reason,
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const Twine &reason,
bool gen_crash_diag = true);
- /// This function calls abort(), and prints the optional message to stderr.
- /// Use the llvm_unreachable macro (that adds location info), instead of
- /// calling this function directly.
- LLVM_ATTRIBUTE_NORETURN void
- llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr,
- unsigned line=0);
+/// Installs a new bad alloc error handler that should be used whenever a
+/// bad alloc error, e.g. failing malloc/calloc, is encountered by LLVM.
+///
+/// The user can install a bad alloc handler, in order to define the behavior
+/// in case of failing allocations, e.g. throwing an exception. Note that this
+/// handler must not trigger any additional allocations itself.
+///
+/// If no error handler is installed the default is to print the error message
+/// to stderr, and call exit(1). If an error handler is installed then it is
+/// the handler's responsibility to log the message, it will no longer be
+/// printed to stderr. If the error handler returns, then exit(1) will be
+/// called.
+///
+///
+/// \param user_data - An argument which will be passed to the installed error
+/// handler.
+void install_bad_alloc_error_handler(fatal_error_handler_t handler,
+ void *user_data = nullptr);
+
+/// Restores default bad alloc error handling behavior.
+void remove_bad_alloc_error_handler();
+
+/// Reports a bad alloc error, calling any user defined bad alloc
+/// error handler. In contrast to the generic 'report_fatal_error'
+/// functions, this function is expected to return, e.g. the user
+/// defined error handler throws an exception.
+///
+/// Note: When throwing an exception in the bad alloc handler, make sure that
+/// the following unwind succeeds, e.g. do not trigger additional allocations
+/// in the unwind chain.
+///
+/// If no error handler is installed (default), then a bad_alloc exception
+/// is thrown if LLVM is compiled with exception support, otherwise an assertion
+/// is called.
+void report_bad_alloc_error(const char *Reason, bool GenCrashDiag = true);
+
+/// This function calls abort(), and prints the optional message to stderr.
+/// Use the llvm_unreachable macro (that adds location info), instead of
+/// calling this function directly.
+LLVM_ATTRIBUTE_NORETURN void
+llvm_unreachable_internal(const char *msg = nullptr, const char *file = nullptr,
+ unsigned line = 0);
}
/// Marks that the current location is not supposed to be reachable.
diff --git a/include/llvm/Support/GenericDomTreeConstruction.h b/include/llvm/Support/GenericDomTreeConstruction.h
index 9edf03aa3621..a0fec668e05c 100644
--- a/include/llvm/Support/GenericDomTreeConstruction.h
+++ b/include/llvm/Support/GenericDomTreeConstruction.h
@@ -32,6 +32,20 @@
namespace llvm {
namespace DomTreeBuilder {
+template <typename NodePtr, bool Inverse>
+struct ChildrenGetter {
+ static auto Get(NodePtr N) -> decltype(reverse(children<NodePtr>(N))) {
+ return reverse(children<NodePtr>(N));
+ }
+};
+
+template <typename NodePtr>
+struct ChildrenGetter<NodePtr, true> {
+ static auto Get(NodePtr N) -> decltype(inverse_children<NodePtr>(N)) {
+ return inverse_children<NodePtr>(N);
+ }
+};
+
// Information record used by Semi-NCA during tree construction.
template <typename NodeT>
struct SemiNCAInfo {
@@ -45,6 +59,7 @@ struct SemiNCAInfo {
unsigned Semi = 0;
NodePtr Label = nullptr;
NodePtr IDom = nullptr;
+ SmallVector<NodePtr, 2> ReverseChildren;
};
std::vector<NodePtr> NumToNode;
@@ -79,66 +94,49 @@ struct SemiNCAInfo {
.get();
}
- // External storage for depth first iterator that reuses the info lookup map
- // SemiNCAInfo already has. We don't have a set, but a map instead, so we are
- // converting the one argument insert calls.
- struct df_iterator_dom_storage {
- public:
- using BaseSet = decltype(NodeToInfo);
- df_iterator_dom_storage(BaseSet &Storage) : Storage(Storage) {}
-
- using iterator = typename BaseSet::iterator;
- std::pair<iterator, bool> insert(NodePtr N) {
- return Storage.insert({N, InfoRec()});
- }
- void completed(NodePtr) {}
-
- private:
- BaseSet &Storage;
- };
-
- df_iterator_dom_storage getStorage() { return {NodeToInfo}; }
+ static bool AlwaysDescend(NodePtr, NodePtr) { return true; }
- unsigned runReverseDFS(NodePtr V, unsigned N) {
- auto DFStorage = getStorage();
+ // Custom DFS implementation which can skip nodes based on a provided
+ // predicate. It also collects ReverseChildren so that we don't have to spend
+ // time getting predecessors in SemiNCA.
+ template <bool Inverse, typename DescendCondition>
+ unsigned runDFS(NodePtr V, unsigned LastNum, DescendCondition Condition,
+ unsigned AttachToNum) {
+ assert(V);
+ SmallVector<NodePtr, 64> WorkList = {V};
+ if (NodeToInfo.count(V) != 0) NodeToInfo[V].Parent = AttachToNum;
- bool IsChildOfArtificialExit = (N != 0);
- for (auto I = idf_ext_begin(V, DFStorage), E = idf_ext_end(V, DFStorage);
- I != E; ++I) {
- NodePtr BB = *I;
+ while (!WorkList.empty()) {
+ const NodePtr BB = WorkList.pop_back_val();
auto &BBInfo = NodeToInfo[BB];
- BBInfo.DFSNum = BBInfo.Semi = ++N;
+
+ // Visited nodes always have positive DFS numbers.
+ if (BBInfo.DFSNum != 0) continue;
+ BBInfo.DFSNum = BBInfo.Semi = ++LastNum;
BBInfo.Label = BB;
- // Set the parent to the top of the visited stack. The stack includes us,
- // and is 1 based, so we subtract to account for both of these.
- if (I.getPathLength() > 1)
- BBInfo.Parent = NodeToInfo[I.getPath(I.getPathLength() - 2)].DFSNum;
- NumToNode.push_back(BB); // NumToNode[n] = V;
+ NumToNode.push_back(BB);
+
+ for (const NodePtr Succ : ChildrenGetter<NodePtr, Inverse>::Get(BB)) {
+ const auto SIT = NodeToInfo.find(Succ);
+ // Don't visit nodes more than once but remember to collect
+ // RerverseChildren.
+ if (SIT != NodeToInfo.end() && SIT->second.DFSNum != 0) {
+ if (Succ != BB) SIT->second.ReverseChildren.push_back(BB);
+ continue;
+ }
- if (IsChildOfArtificialExit)
- BBInfo.Parent = 1;
+ if (!Condition(BB, Succ)) continue;
- IsChildOfArtificialExit = false;
+ // It's fine to add Succ to the map, because we know that it will be
+ // visited later.
+ auto &SuccInfo = NodeToInfo[Succ];
+ WorkList.push_back(Succ);
+ SuccInfo.Parent = LastNum;
+ SuccInfo.ReverseChildren.push_back(BB);
+ }
}
- return N;
- }
-
- unsigned runForwardDFS(NodePtr V, unsigned N) {
- auto DFStorage = getStorage();
- for (auto I = df_ext_begin(V, DFStorage), E = df_ext_end(V, DFStorage);
- I != E; ++I) {
- NodePtr BB = *I;
- auto &BBInfo = NodeToInfo[BB];
- BBInfo.DFSNum = BBInfo.Semi = ++N;
- BBInfo.Label = BB;
- // Set the parent to the top of the visited stack. The stack includes us,
- // and is 1 based, so we subtract to account for both of these.
- if (I.getPathLength() > 1)
- BBInfo.Parent = NodeToInfo[I.getPath(I.getPathLength() - 2)].DFSNum;
- NumToNode.push_back(BB); // NumToNode[n] = V;
- }
- return N;
+ return LastNum;
}
NodePtr eval(NodePtr VIn, unsigned LastLinked) {
@@ -181,31 +179,14 @@ struct SemiNCAInfo {
template <typename NodeType>
void runSemiNCA(DomTreeT &DT, unsigned NumBlocks) {
- unsigned N = 0;
- NumToNode.push_back(nullptr);
-
- bool MultipleRoots = (DT.Roots.size() > 1);
- if (MultipleRoots) {
- auto &BBInfo = NodeToInfo[nullptr];
- BBInfo.DFSNum = BBInfo.Semi = ++N;
- BBInfo.Label = nullptr;
-
- NumToNode.push_back(nullptr); // NumToNode[n] = V;
- }
-
// Step #1: Number blocks in depth-first order and initialize variables used
// in later stages of the algorithm.
- if (DT.isPostDominator()){
- for (unsigned i = 0, e = static_cast<unsigned>(DT.Roots.size());
- i != e; ++i)
- N = runReverseDFS(DT.Roots[i], N);
- } else {
- N = runForwardDFS(DT.Roots[0], N);
- }
+ const unsigned N = doFullDFSWalk(DT, AlwaysDescend);
// It might be that some blocks did not get a DFS number (e.g., blocks of
// infinite loops). In these cases an artificial exit node is required.
- MultipleRoots |= (DT.isPostDominator() && N != NumBlocks);
+ const bool MultipleRoots =
+ DT.Roots.size() > 1 || (DT.isPostDominator() && N != NumBlocks);
// Initialize IDoms to spanning tree parents.
for (unsigned i = 1; i <= N; ++i) {
@@ -221,7 +202,7 @@ struct SemiNCAInfo {
// Initialize the semi dominator to point to the parent node.
WInfo.Semi = WInfo.Parent;
- for (const auto &N : inverse_children<NodeType>(W))
+ for (const auto &N : WInfo.ReverseChildren)
if (NodeToInfo.count(N)) { // Only if this predecessor is reachable!
unsigned SemiU = NodeToInfo[eval(N, i + 1)].Semi;
if (SemiU < WInfo.Semi)
@@ -279,14 +260,27 @@ struct SemiNCAInfo {
}
}
- void doFullDFSWalk(const DomTreeT &DT) {
- NumToNode.push_back(nullptr);
+ template <typename DescendCondition>
+ unsigned doFullDFSWalk(const DomTreeT &DT, DescendCondition DC) {
unsigned Num = 0;
- for (auto *Root : DT.Roots)
- if (!DT.isPostDominator())
- Num = runForwardDFS(Root, Num);
- else
- Num = runReverseDFS(Root, Num);
+ NumToNode.push_back(nullptr);
+
+ if (DT.Roots.size() > 1) {
+ auto &BBInfo = NodeToInfo[nullptr];
+ BBInfo.DFSNum = BBInfo.Semi = ++Num;
+ BBInfo.Label = nullptr;
+
+ NumToNode.push_back(nullptr); // NumToNode[n] = V;
+ }
+
+ if (DT.isPostDominator()) {
+ for (auto *Root : DT.Roots) Num = runDFS<true>(Root, Num, DC, 1);
+ } else {
+ assert(DT.Roots.size() == 1);
+ Num = runDFS<false>(DT.Roots[0], Num, DC, Num);
+ }
+
+ return Num;
}
static void PrintBlockOrNullptr(raw_ostream &O, NodePtr Obj) {
@@ -299,7 +293,7 @@ struct SemiNCAInfo {
// Checks if the tree contains all reachable nodes in the input graph.
bool verifyReachability(const DomTreeT &DT) {
clear();
- doFullDFSWalk(DT);
+ doFullDFSWalk(DT, AlwaysDescend);
for (auto &NodeToTN : DT.DomTreeNodes) {
const TreeNodePtr TN = NodeToTN.second.get();
@@ -356,7 +350,7 @@ struct SemiNCAInfo {
// NCD(From, To) == IDom(To) or To.
bool verifyNCD(const DomTreeT &DT) {
clear();
- doFullDFSWalk(DT);
+ doFullDFSWalk(DT, AlwaysDescend);
for (auto &BlockToInfo : NodeToInfo) {
auto &Info = BlockToInfo.second;
@@ -440,8 +434,9 @@ struct SemiNCAInfo {
if (!BB || TN->getChildren().empty()) continue;
clear();
- NodeToInfo.insert({BB, {}});
- doFullDFSWalk(DT);
+ doFullDFSWalk(DT, [BB](NodePtr From, NodePtr To) {
+ return From != BB && To != BB;
+ });
for (TreeNodePtr Child : TN->getChildren())
if (NodeToInfo.count(Child->getBlock()) != 0) {
@@ -473,8 +468,10 @@ struct SemiNCAInfo {
const auto &Siblings = TN->getChildren();
for (const TreeNodePtr N : Siblings) {
clear();
- NodeToInfo.insert({N->getBlock(), {}});
- doFullDFSWalk(DT);
+ NodePtr BBN = N->getBlock();
+ doFullDFSWalk(DT, [BBN](NodePtr From, NodePtr To) {
+ return From != BBN && To != BBN;
+ });
for (const TreeNodePtr S : Siblings) {
if (S == N) continue;
diff --git a/include/llvm/Support/ReverseIteration.h b/include/llvm/Support/ReverseIteration.h
new file mode 100644
index 000000000000..cb97b60f06dd
--- /dev/null
+++ b/include/llvm/Support/ReverseIteration.h
@@ -0,0 +1,17 @@
+#ifndef LLVM_SUPPORT_REVERSEITERATION_H
+#define LLVM_SUPPORT_REVERSEITERATION_H
+
+#include "llvm/Config/abi-breaking.h"
+
+namespace llvm {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+template <class T = void> struct ReverseIterate { static bool value; };
+#if LLVM_ENABLE_REVERSE_ITERATION
+template <class T> bool ReverseIterate<T>::value = true;
+#else
+template <class T> bool ReverseIterate<T>::value = false;
+#endif
+#endif
+}
+
+#endif
diff --git a/include/llvm/Support/UnicodeCharRanges.h b/include/llvm/Support/UnicodeCharRanges.h
index d4d4d8eb84a4..4c655833b396 100644
--- a/include/llvm/Support/UnicodeCharRanges.h
+++ b/include/llvm/Support/UnicodeCharRanges.h
@@ -18,11 +18,11 @@
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#define DEBUG_TYPE "unicode"
+
namespace llvm {
namespace sys {
-#define DEBUG_TYPE "unicode"
-
/// \brief Represents a closed range of Unicode code points [Lower, Upper].
struct UnicodeCharRange {
uint32_t Lower;
@@ -99,10 +99,9 @@ private:
const CharRanges Ranges;
};
-#undef DEBUG_TYPE // "unicode"
-
} // namespace sys
} // namespace llvm
+#undef DEBUG_TYPE // "unicode"
#endif // LLVM_SUPPORT_UNICODECHARRANGES_H
diff --git a/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
index 3a3118139bcb..178b08d7b8b7 100644
--- a/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
+++ b/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
@@ -64,6 +64,7 @@ def : GINodeEquiv<G_FREM, frem>;
def : GINodeEquiv<G_FPOW, fpow>;
def : GINodeEquiv<G_FEXP2, fexp2>;
def : GINodeEquiv<G_FLOG2, flog2>;
+def : GINodeEquiv<G_INTRINSIC, intrinsic_wo_chain>;
def : GINodeEquiv<G_BR, br>;
// Specifies the GlobalISel equivalents for SelectionDAG's ComplexPattern.
diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h
index 2fc3ec996e7f..1843a2eed9bf 100644
--- a/include/llvm/Target/TargetInstrInfo.h
+++ b/include/llvm/Target/TargetInstrInfo.h
@@ -1545,6 +1545,16 @@ public:
return None;
}
+ /// Return an array that contains the MMO target flag values and their
+ /// names.
+ ///
+ /// MIR Serialization is able to serialize only the MMO target flags that are
+ /// defined by this method.
+ virtual ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
+ getSerializableMachineMemOperandTargetFlags() const {
+ return None;
+ }
+
/// Determines whether \p Inst is a tail call instruction. Override this
/// method on targets that do not properly set MCID::Return and MCID::Call on
/// tail call instructions."
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index 964d6314b127..60a03bdc182d 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -415,7 +415,8 @@ public:
virtual bool mergeStoresAfterLegalization() const { return false; }
/// Returns if it's reasonable to merge stores to MemVT size.
- virtual bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT) const {
+ virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
+ const SelectionDAG &DAG) const {
return true;
}
@@ -2726,6 +2727,18 @@ public:
return true;
}
+ // Return true if it is profitable to combine a BUILD_VECTOR to a TRUNCATE.
+ // Example of such a combine:
+ // v4i32 build_vector((extract_elt V, 0),
+ // (extract_elt V, 2),
+ // (extract_elt V, 4),
+ // (extract_elt V, 6))
+ // -->
+ // v4i32 truncate (bitcast V to v4i64)
+ virtual bool isDesirableToCombineBuildVectorToTruncate() const {
+ return false;
+ }
+
/// Return true if the target has native support for the specified value type
/// and it is 'desirable' to use the type for the given node type. e.g. On x86
/// i16 is legal, but undesirable since i16 instruction encodings are longer
@@ -2815,6 +2828,9 @@ public:
// TargetLowering::LowerCall that perform tail call conversions.
bool IsTailCall = false;
+ // Is Call lowering done post SelectionDAG type legalization.
+ bool IsPostTypeLegalization = false;
+
unsigned NumFixedArgs = -1;
CallingConv::ID CallConv = CallingConv::C;
SDValue Callee;
@@ -2937,6 +2953,11 @@ public:
return *this;
}
+ CallLoweringInfo &setIsPostTypeLegalization(bool Value=true) {
+ IsPostTypeLegalization = Value;
+ return *this;
+ }
+
ArgListTy &getArgs() {
return Args;
}
@@ -3055,6 +3076,13 @@ public:
return Chain;
}
+ /// This callback is used to inspect load/store instructions and add
+ /// target-specific MachineMemOperand flags to them. The default
+ /// implementation does nothing.
+ virtual MachineMemOperand::Flags getMMOFlags(const Instruction &I) const {
+ return MachineMemOperand::MONone;
+ }
+
/// This callback is invoked by the type legalizer to legalize nodes with an
/// illegal operand type but legal result types. It replaces the
/// LowerOperation callback in the type Legalizer. The reason we can not do
diff --git a/include/llvm/Transforms/Scalar/GVN.h b/include/llvm/Transforms/Scalar/GVN.h
index 4c585a20021c..f25ab40640df 100644
--- a/include/llvm/Transforms/Scalar/GVN.h
+++ b/include/llvm/Transforms/Scalar/GVN.h
@@ -68,21 +68,6 @@ public:
class ValueTable {
DenseMap<Value *, uint32_t> valueNumbering;
DenseMap<Expression, uint32_t> expressionNumbering;
-
- // Expressions is the vector of Expression. ExprIdx is the mapping from
- // value number to the index of Expression in Expressions. We use it
- // instead of a DenseMap because filling such mapping is faster than
- // filling a DenseMap and the compile time is a little better.
- uint32_t nextExprNumber;
- std::vector<Expression> Expressions;
- std::vector<uint32_t> ExprIdx;
- // Value number to PHINode mapping. Used for phi-translate in scalarpre.
- DenseMap<uint32_t, PHINode *> NumberingPhi;
- // Cache for phi-translate in scalarpre.
- typedef DenseMap<std::pair<uint32_t, const BasicBlock *>, uint32_t>
- PhiTranslateMap;
- PhiTranslateMap PhiTranslateTable;
-
AliasAnalysis *AA;
MemoryDependenceResults *MD;
DominatorTree *DT;
@@ -94,10 +79,6 @@ public:
Value *LHS, Value *RHS);
Expression createExtractvalueExpr(ExtractValueInst *EI);
uint32_t lookupOrAddCall(CallInst *C);
- uint32_t phiTranslateImpl(const BasicBlock *BB, const BasicBlock *PhiBlock,
- uint32_t Num, GVN &Gvn);
- std::pair<uint32_t, bool> assignExpNewValueNum(Expression &exp);
- bool areAllValsInBB(uint32_t num, const BasicBlock *BB, GVN &Gvn);
public:
ValueTable();
@@ -106,11 +87,9 @@ public:
~ValueTable();
uint32_t lookupOrAdd(Value *V);
- uint32_t lookup(Value *V, bool Verify = true) const;
+ uint32_t lookup(Value *V) const;
uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred,
Value *LHS, Value *RHS);
- uint32_t phiTranslate(const BasicBlock *BB, const BasicBlock *PhiBlock,
- uint32_t Num, GVN &Gvn);
bool exists(Value *V) const;
void add(Value *V, uint32_t num);
void clear();
@@ -152,10 +131,6 @@ private:
SmallMapVector<llvm::Value *, llvm::Constant *, 4> ReplaceWithConstMap;
SmallVector<Instruction *, 8> InstrsToErase;
- // Map the block to reversed postorder traversal number. It is used to
- // find back edge easily.
- DenseMap<const BasicBlock *, uint32_t> BlockRPONumber;
-
typedef SmallVector<NonLocalDepResult, 64> LoadDepVect;
typedef SmallVector<gvn::AvailableValueInBlock, 64> AvailValInBlkVect;
typedef SmallVector<BasicBlock *, 64> UnavailBlkVect;
@@ -239,7 +214,7 @@ private:
bool performPRE(Function &F);
bool performScalarPRE(Instruction *I);
bool performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
- BasicBlock *Curr, unsigned int ValNo);
+ unsigned int ValNo);
Value *findLeader(const BasicBlock *BB, uint32_t num);
void cleanupGlobalSets();
void verifyRemoved(const Instruction *I) const;
@@ -251,7 +226,6 @@ private:
bool processFoldableCondBr(BranchInst *BI);
void addDeadBlock(BasicBlock *BB);
void assignValNumForDeadCode();
- void assignBlockRPONumber(Function &F);
};
/// Create a legacy GVN pass. This also allows parameterizing whether or not
diff --git a/include/llvm/Transforms/Utils/LowerMemIntrinsics.h b/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
index e4906b709e4b..4554b5cbc644 100644
--- a/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
+++ b/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
@@ -17,21 +17,39 @@
namespace llvm {
+class ConstantInt;
class Instruction;
class MemCpyInst;
class MemMoveInst;
class MemSetInst;
+class TargetTransformInfo;
class Value;
/// Emit a loop implementing the semantics of llvm.memcpy with the equivalent
/// arguments at \p InsertBefore.
-void createMemCpyLoop(Instruction *InsertBefore,
- Value *SrcAddr, Value *DstAddr, Value *CopyLen,
- unsigned SrcAlign, unsigned DestAlign,
+void createMemCpyLoop(Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr,
+ Value *CopyLen, unsigned SrcAlign, unsigned DestAlign,
bool SrcIsVolatile, bool DstIsVolatile);
+/// Emit a loop implementing the semantics of llvm.memcpy where the size is not
+/// a compile-time constant. Loop will be insterted at \p InsertBefore.
+void createMemCpyLoopUnknownSize(Instruction *InsertBefore, Value *SrcAddr,
+ Value *DstAddr, Value *CopyLen,
+ unsigned SrcAlign, unsigned DestAlign,
+ bool SrcIsVolatile, bool DstIsVolatile,
+ const TargetTransformInfo &TTI);
+
+/// Emit a loop implementing the semantics of an llvm.memcpy whose size is a
+/// compile time constant. Loop is inserted at \p InsertBefore.
+void createMemCpyLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr,
+ Value *DstAddr, ConstantInt *CopyLen,
+ unsigned SrcAlign, unsigned DestAlign,
+ bool SrcIsVolatile, bool DstIsVolatile,
+ const TargetTransformInfo &TTI);
+
+
/// Expand \p MemCpy as a loop. \p MemCpy is not deleted.
-void expandMemCpyAsLoop(MemCpyInst *MemCpy);
+void expandMemCpyAsLoop(MemCpyInst *MemCpy, const TargetTransformInfo &TTI);
/// Expand \p MemMove as a loop. \p MemMove is not deleted.
void expandMemMoveAsLoop(MemMoveInst *MemMove);
diff --git a/include/llvm/Transforms/Utils/SSAUpdaterImpl.h b/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
index b0448fed9f4d..2dd205d8b2af 100644
--- a/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
+++ b/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
@@ -22,10 +22,10 @@
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Debug.h"
-namespace llvm {
-
#define DEBUG_TYPE "ssaupdater"
+namespace llvm {
+
class CastInst;
class PHINode;
template<typename T> class SSAUpdaterTraits;
@@ -453,8 +453,8 @@ public:
}
};
-#undef DEBUG_TYPE // "ssaupdater"
+} // end llvm namespace
-} // End llvm namespace
+#undef DEBUG_TYPE // "ssaupdater"
-#endif
+#endif // LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
diff --git a/include/llvm/module.modulemap b/include/llvm/module.modulemap
index dd419e861316..766198bbc5de 100644
--- a/include/llvm/module.modulemap
+++ b/include/llvm/module.modulemap
@@ -23,6 +23,7 @@ module LLVM_Backend {
exclude header "CodeGen/CommandFlags.h"
exclude header "CodeGen/LinkAllAsmWriterComponents.h"
exclude header "CodeGen/LinkAllCodegenComponents.h"
+ exclude header "CodeGen/GlobalISel/InstructionSelectorImpl.h"
// These are intended for (repeated) textual inclusion.
textual header "CodeGen/DIEValue.def"