aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2015-06-21 13:59:01 +0000
committerDimitry Andric <dim@FreeBSD.org>2015-06-21 13:59:01 +0000
commit3a0822f094b578157263e04114075ad7df81db41 (patch)
treebc48361fe2cd1ca5f93ac01b38b183774468fc79 /lib/CodeGen
parent85d8b2bbe386bcfe669575d05b61482d7be07e5d (diff)
downloadsrc-3a0822f094b578157263e04114075ad7df81db41.tar.gz
src-3a0822f094b578157263e04114075ad7df81db41.zip
Vendor import of llvm trunk r240225:vendor/llvm/llvm-trunk-r240225
Notes
Notes: svn path=/vendor/llvm/dist/; revision=284677 svn path=/vendor/llvm/llvm-trunk-r240225/; revision=284678; tag=vendor/llvm/llvm-trunk-r240225
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/AggressiveAntiDepBreaker.h2
-rw-r--r--lib/CodeGen/AntiDepBreaker.h2
-rw-r--r--lib/CodeGen/AsmPrinter/AddressPool.h2
-rw-r--r--lib/CodeGen/AsmPrinter/AsmPrinter.cpp84
-rw-r--r--lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp32
-rw-r--r--lib/CodeGen/AsmPrinter/ByteStreamer.h2
-rw-r--r--lib/CodeGen/AsmPrinter/DIE.cpp6
-rw-r--r--lib/CodeGen/AsmPrinter/DIEHash.h2
-rw-r--r--lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h2
-rw-r--r--lib/CodeGen/AsmPrinter/DebugLocEntry.h2
-rw-r--r--lib/CodeGen/AsmPrinter/DebugLocStream.h2
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfAccelTable.h2
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp2
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfCompileUnit.h2
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfDebug.cpp6
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfExpression.cpp37
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfExpression.h5
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfFile.cpp2
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfFile.h2
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfStringPool.h2
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfUnit.cpp8
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfUnit.h2
-rw-r--r--lib/CodeGen/AsmPrinter/EHStreamer.h2
-rw-r--r--lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp2
-rw-r--r--lib/CodeGen/AsmPrinter/WinException.cpp109
-rw-r--r--lib/CodeGen/AsmPrinter/WinException.h9
-rw-r--r--lib/CodeGen/AtomicExpandPass.cpp4
-rw-r--r--lib/CodeGen/BranchFolding.cpp21
-rw-r--r--lib/CodeGen/BranchFolding.h2
-rw-r--r--lib/CodeGen/CMakeLists.txt3
-rw-r--r--lib/CodeGen/CallingConvLower.cpp28
-rw-r--r--lib/CodeGen/CodeGen.cpp1
-rw-r--r--lib/CodeGen/CodeGenPrepare.cpp51
-rw-r--r--lib/CodeGen/CoreCLRGC.cpp2
-rw-r--r--lib/CodeGen/CriticalAntiDepBreaker.h2
-rw-r--r--lib/CodeGen/DFAPacketizer.cpp2
-rw-r--r--lib/CodeGen/DeadMachineInstructionElim.cpp2
-rw-r--r--lib/CodeGen/DwarfEHPrepare.cpp17
-rw-r--r--lib/CodeGen/EarlyIfConversion.cpp17
-rw-r--r--lib/CodeGen/EdgeBundles.cpp2
-rw-r--r--lib/CodeGen/ExecutionDepsFix.cpp4
-rw-r--r--lib/CodeGen/FaultMaps.cpp114
-rw-r--r--lib/CodeGen/GCMetadata.cpp2
-rw-r--r--lib/CodeGen/GCRootLowering.cpp2
-rw-r--r--lib/CodeGen/IfConversion.cpp2
-rw-r--r--lib/CodeGen/ImplicitNullChecks.cpp261
-rw-r--r--lib/CodeGen/InlineSpiller.cpp4
-rw-r--r--lib/CodeGen/LLVMBuild.txt2
-rw-r--r--lib/CodeGen/LLVMTargetMachine.cpp50
-rw-r--r--lib/CodeGen/LiveVariables.cpp39
-rw-r--r--lib/CodeGen/MIRParser/MIRParser.cpp175
-rw-r--r--lib/CodeGen/MIRPrinter.cpp96
-rw-r--r--lib/CodeGen/MIRPrinter.h33
-rw-r--r--lib/CodeGen/MIRPrintingPass.cpp44
-rw-r--r--lib/CodeGen/MachineBlockPlacement.cpp6
-rw-r--r--lib/CodeGen/MachineCombiner.cpp34
-rw-r--r--lib/CodeGen/MachineCopyPropagation.cpp2
-rw-r--r--lib/CodeGen/MachineFunction.cpp103
-rw-r--r--lib/CodeGen/MachineFunctionAnalysis.cpp8
-rw-r--r--lib/CodeGen/MachineFunctionPrinterPass.cpp2
-rw-r--r--lib/CodeGen/MachineInstr.cpp6
-rw-r--r--lib/CodeGen/MachineLICM.cpp21
-rw-r--r--lib/CodeGen/MachineModuleInfo.cpp19
-rw-r--r--lib/CodeGen/MachineSSAUpdater.cpp2
-rw-r--r--lib/CodeGen/MachineScheduler.cpp12
-rw-r--r--lib/CodeGen/MachineSink.cpp125
-rw-r--r--lib/CodeGen/MachineTraceMetrics.cpp10
-rw-r--r--lib/CodeGen/MachineVerifier.cpp4
-rw-r--r--lib/CodeGen/OptimizePHIs.cpp2
-rw-r--r--lib/CodeGen/PHIElimination.cpp25
-rw-r--r--lib/CodeGen/Passes.cpp11
-rw-r--r--lib/CodeGen/PeepholeOptimizer.cpp2
-rw-r--r--lib/CodeGen/PostRASchedulerList.cpp4
-rw-r--r--lib/CodeGen/RegAllocFast.cpp2
-rw-r--r--lib/CodeGen/RegisterCoalescer.cpp5
-rw-r--r--lib/CodeGen/RegisterCoalescer.h2
-rw-r--r--lib/CodeGen/RegisterScavenging.cpp10
-rw-r--r--lib/CodeGen/ScheduleDAGInstrs.cpp12
-rw-r--r--lib/CodeGen/ScheduleDAGPrinter.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp198
-rw-r--r--lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp27
-rw-r--r--lib/CodeGen/SelectionDAG/InstrEmitter.h2
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeDAG.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeTypes.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/SDNodeDbgValue.h2
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h2
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp95
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp156
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h5
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp10
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/StatepointLowering.cpp187
-rw-r--r--lib/CodeGen/SelectionDAG/TargetLowering.cpp5
-rw-r--r--lib/CodeGen/ShadowStackGCLowering.cpp14
-rw-r--r--lib/CodeGen/SjLjEHPrepare.cpp2
-rw-r--r--lib/CodeGen/Spiller.h2
-rw-r--r--lib/CodeGen/SplitKit.h2
-rw-r--r--lib/CodeGen/StatepointExampleGC.cpp2
-rw-r--r--lib/CodeGen/TailDuplication.cpp2
-rw-r--r--lib/CodeGen/TargetInstrInfo.cpp9
-rw-r--r--lib/CodeGen/UnreachableBlockElim.cpp2
-rw-r--r--lib/CodeGen/VirtRegMap.cpp81
-rw-r--r--lib/CodeGen/WinEHPrepare.cpp43
104 files changed, 1747 insertions, 852 deletions
diff --git a/lib/CodeGen/AggressiveAntiDepBreaker.h b/lib/CodeGen/AggressiveAntiDepBreaker.h
index 18c8bb591c1c..63d2085148b6 100644
--- a/lib/CodeGen/AggressiveAntiDepBreaker.h
+++ b/lib/CodeGen/AggressiveAntiDepBreaker.h
@@ -174,6 +174,6 @@ class RegisterClassInfo;
RenameOrderType& RenameOrder,
std::map<unsigned, unsigned> &RenameMap);
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AntiDepBreaker.h b/lib/CodeGen/AntiDepBreaker.h
index a61a8efa4da0..7985241c6635 100644
--- a/lib/CodeGen/AntiDepBreaker.h
+++ b/lib/CodeGen/AntiDepBreaker.h
@@ -62,6 +62,6 @@ public:
}
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/AddressPool.h b/lib/CodeGen/AsmPrinter/AddressPool.h
index 211fc98c7f6f..e0ce3f90bc34 100644
--- a/lib/CodeGen/AsmPrinter/AddressPool.h
+++ b/lib/CodeGen/AsmPrinter/AddressPool.h
@@ -48,5 +48,5 @@ public:
void resetUsedFlag() { HasBeenUsed = false; }
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 2e3b83a09520..95da5887658e 100644
--- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -151,7 +151,7 @@ void AsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
}
StringRef AsmPrinter::getTargetTriple() const {
- return TM.getTargetTriple();
+ return TM.getTargetTriple().str();
}
/// getCurrentSection() - Return the current section we are emitting to.
@@ -172,7 +172,6 @@ void AsmPrinter::getAnalysisUsage(AnalysisUsage &AU) const {
bool AsmPrinter::doInitialization(Module &M) {
MMI = getAnalysisIfAvailable<MachineModuleInfo>();
- MMI->AnalyzeModule(M);
// Initialize TargetLoweringObjectFile.
const_cast<TargetLoweringObjectFile&>(getObjFileLowering())
@@ -222,7 +221,8 @@ bool AsmPrinter::doInitialization(Module &M) {
// We're at the module level. Construct MCSubtarget from the default CPU
// and target triple.
std::unique_ptr<MCSubtargetInfo> STI(TM.getTarget().createMCSubtargetInfo(
- TM.getTargetTriple(), TM.getTargetCPU(), TM.getTargetFeatureString()));
+ TM.getTargetTriple().str(), TM.getTargetCPU(),
+ TM.getTargetFeatureString()));
OutStreamer->AddComment("Start of file scope inline assembly");
OutStreamer->AddBlankLine();
EmitInlineAsm(M.getModuleInlineAsm()+"\n", *STI, TM.Options.MCOptions);
@@ -232,7 +232,7 @@ bool AsmPrinter::doInitialization(Module &M) {
if (MAI->doesSupportDebugInformation()) {
bool skip_dwarf = false;
- if (Triple(TM.getTargetTriple()).isKnownWindowsMSVCEnvironment()) {
+ if (TM.getTargetTriple().isKnownWindowsMSVCEnvironment()) {
Handlers.push_back(HandlerInfo(new WinCodeViewLineTables(this),
DbgTimerName,
CodeViewLineTablesGroupName));
@@ -900,12 +900,11 @@ void AsmPrinter::EmitFunctionBody() {
if (MAI->hasDotTypeDotSizeDirective()) {
// We can get the size as difference between the function label and the
// temp label.
- const MCExpr *SizeExp =
- MCBinaryExpr::createSub(MCSymbolRefExpr::create(CurrentFnEnd, OutContext),
- MCSymbolRefExpr::create(CurrentFnSymForSize,
- OutContext),
- OutContext);
- OutStreamer->emitELFSize(cast<MCSymbolELF>(CurrentFnSym), SizeExp);
+ const MCExpr *SizeExp = MCBinaryExpr::createSub(
+ MCSymbolRefExpr::create(CurrentFnEnd, OutContext),
+ MCSymbolRefExpr::create(CurrentFnSymForSize, OutContext), OutContext);
+ if (auto Sym = dyn_cast<MCSymbolELF>(CurrentFnSym))
+ OutStreamer->emitELFSize(Sym, SizeExp);
}
for (const HandlerInfo &HI : Handlers) {
@@ -1043,8 +1042,7 @@ bool AsmPrinter::doFinalization(Module &M) {
if (!ModuleFlags.empty())
TLOF.emitModuleFlags(*OutStreamer, ModuleFlags, *Mang, TM);
- Triple TT(TM.getTargetTriple());
- if (TT.isOSBinFormatELF()) {
+ if (TM.getTargetTriple().isOSBinFormatELF()) {
MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
// Output stubs for external and common global variables.
@@ -1591,25 +1589,7 @@ void AsmPrinter::EmitInt32(int Value) const {
/// .set if it avoids relocations.
void AsmPrinter::EmitLabelDifference(const MCSymbol *Hi, const MCSymbol *Lo,
unsigned Size) const {
- if (!MAI->doesDwarfUseRelocationsAcrossSections())
- if (OutStreamer->emitAbsoluteSymbolDiff(Hi, Lo, Size))
- return;
-
- // Get the Hi-Lo expression.
- const MCExpr *Diff =
- MCBinaryExpr::createSub(MCSymbolRefExpr::create(Hi, OutContext),
- MCSymbolRefExpr::create(Lo, OutContext),
- OutContext);
-
- if (!MAI->doesSetDirectiveSuppressesReloc()) {
- OutStreamer->EmitValue(Diff, Size);
- return;
- }
-
- // Otherwise, emit with .set (aka assignment).
- MCSymbol *SetLabel = createTempSymbol("set");
- OutStreamer->EmitAssignment(SetLabel, Diff);
- OutStreamer->EmitSymbolValue(SetLabel, Size);
+ OutStreamer->emitAbsoluteSymbolDiff(Hi, Lo, Size);
}
/// EmitLabelPlusOffset - Emit something like ".long Label+Offset"
@@ -1811,40 +1791,30 @@ static int isRepeatedByteSequence(const ConstantDataSequential *V) {
/// composed of a repeated sequence of identical bytes and return the
/// byte value. If it is not a repeated sequence, return -1.
static int isRepeatedByteSequence(const Value *V, TargetMachine &TM) {
-
if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
- if (CI->getBitWidth() > 64) return -1;
+ uint64_t Size = TM.getDataLayout()->getTypeAllocSizeInBits(V->getType());
+ assert(Size % 8 == 0);
- uint64_t Size =
- TM.getDataLayout()->getTypeAllocSize(V->getType());
- uint64_t Value = CI->getZExtValue();
+ // Extend the element to take zero padding into account.
+ APInt Value = CI->getValue().zextOrSelf(Size);
+ if (!Value.isSplat(8))
+ return -1;
- // Make sure the constant is at least 8 bits long and has a power
- // of 2 bit width. This guarantees the constant bit width is
- // always a multiple of 8 bits, avoiding issues with padding out
- // to Size and other such corner cases.
- if (CI->getBitWidth() < 8 || !isPowerOf2_64(CI->getBitWidth())) return -1;
-
- uint8_t Byte = static_cast<uint8_t>(Value);
-
- for (unsigned i = 1; i < Size; ++i) {
- Value >>= 8;
- if (static_cast<uint8_t>(Value) != Byte) return -1;
- }
- return Byte;
+ return Value.zextOrTrunc(8).getZExtValue();
}
if (const ConstantArray *CA = dyn_cast<ConstantArray>(V)) {
// Make sure all array elements are sequences of the same repeated
// byte.
assert(CA->getNumOperands() != 0 && "Should be a CAZ");
- int Byte = isRepeatedByteSequence(CA->getOperand(0), TM);
- if (Byte == -1) return -1;
-
- for (unsigned i = 1, e = CA->getNumOperands(); i != e; ++i) {
- int ThisByte = isRepeatedByteSequence(CA->getOperand(i), TM);
- if (ThisByte == -1) return -1;
- if (Byte != ThisByte) return -1;
- }
+ Constant *Op0 = CA->getOperand(0);
+ int Byte = isRepeatedByteSequence(Op0, TM);
+ if (Byte == -1)
+ return -1;
+
+ // All array elements must be equal.
+ for (unsigned i = 1, e = CA->getNumOperands(); i != e; ++i)
+ if (CA->getOperand(i) != Op0)
+ return -1;
return Byte;
}
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp b/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
index 7dbfddf60691..8ee613bcdb43 100644
--- a/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
+++ b/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
@@ -157,24 +157,20 @@ void AsmPrinter::EmitTTypeReference(const GlobalValue *GV,
OutStreamer->EmitIntValue(0, GetSizeOfEncodedValue(Encoding));
}
-/// EmitSectionOffset - Emit the 4-byte offset of Label from the start of its
-/// section. This can be done with a special directive if the target supports
-/// it (e.g. cygwin) or by emitting it as an offset from a label at the start
-/// of the section.
-///
-/// SectionLabel is a temporary label emitted at the start of the section that
-/// Label lives in.
-void AsmPrinter::emitSectionOffset(const MCSymbol *Label) const {
- // On COFF targets, we have to emit the special .secrel32 directive.
- if (MAI->needsDwarfSectionOffsetDirective()) {
- OutStreamer->EmitCOFFSecRel32(Label);
- return;
- }
+void AsmPrinter::emitDwarfSymbolReference(const MCSymbol *Label,
+ bool ForceOffset) const {
+ if (!ForceOffset) {
+ // On COFF targets, we have to emit the special .secrel32 directive.
+ if (MAI->needsDwarfSectionOffsetDirective()) {
+ OutStreamer->EmitCOFFSecRel32(Label);
+ return;
+ }
- // If the format uses relocations with dwarf, refer to the symbol directly.
- if (MAI->doesDwarfUseRelocationsAcrossSections()) {
- OutStreamer->EmitSymbolValue(Label, 4);
- return;
+ // If the format uses relocations with dwarf, refer to the symbol directly.
+ if (MAI->doesDwarfUseRelocationsAcrossSections()) {
+ OutStreamer->EmitSymbolValue(Label, 4);
+ return;
+ }
}
// Otherwise, emit it as a label difference from the start of the section.
@@ -183,7 +179,7 @@ void AsmPrinter::emitSectionOffset(const MCSymbol *Label) const {
void AsmPrinter::emitDwarfStringOffset(DwarfStringPoolEntryRef S) const {
if (MAI->doesDwarfUseRelocationsAcrossSections()) {
- emitSectionOffset(S.getSymbol());
+ emitDwarfSymbolReference(S.getSymbol());
return;
}
diff --git a/lib/CodeGen/AsmPrinter/ByteStreamer.h b/lib/CodeGen/AsmPrinter/ByteStreamer.h
index 0cc829fffc54..7a712a076dd9 100644
--- a/lib/CodeGen/AsmPrinter/ByteStreamer.h
+++ b/lib/CodeGen/AsmPrinter/ByteStreamer.h
@@ -103,6 +103,6 @@ public:
}
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DIE.cpp b/lib/CodeGen/AsmPrinter/DIE.cpp
index fa8449e94c9f..4847de45789b 100644
--- a/lib/CodeGen/AsmPrinter/DIE.cpp
+++ b/lib/CodeGen/AsmPrinter/DIE.cpp
@@ -618,11 +618,7 @@ unsigned DIELocList::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
void DIELocList::EmitValue(const AsmPrinter *AP, dwarf::Form Form) const {
DwarfDebug *DD = AP->getDwarfDebug();
MCSymbol *Label = DD->getDebugLocs().getList(Index).Label;
-
- if (AP->MAI->doesDwarfUseRelocationsAcrossSections() && !DD->useSplitDwarf())
- AP->emitSectionOffset(Label);
- else
- AP->EmitLabelDifference(Label, Label->getSection().getBeginSymbol(), 4);
+ AP->emitDwarfSymbolReference(Label, /*ForceOffset*/ DD->useSplitDwarf());
}
#ifndef NDEBUG
diff --git a/lib/CodeGen/AsmPrinter/DIEHash.h b/lib/CodeGen/AsmPrinter/DIEHash.h
index 1850e042f924..789e6dd91e01 100644
--- a/lib/CodeGen/AsmPrinter/DIEHash.h
+++ b/lib/CodeGen/AsmPrinter/DIEHash.h
@@ -157,6 +157,6 @@ private:
AsmPrinter *AP;
DenseMap<const DIE *, unsigned> Numbering;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h b/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h
index 546d1b443781..5d4005018013 100644
--- a/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h
+++ b/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h
@@ -55,6 +55,6 @@ public:
void calculateDbgValueHistory(const MachineFunction *MF,
const TargetRegisterInfo *TRI,
DbgValueHistoryMap &Result);
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DebugLocEntry.h b/lib/CodeGen/AsmPrinter/DebugLocEntry.h
index 6a943c64ea22..083228b8fd41 100644
--- a/lib/CodeGen/AsmPrinter/DebugLocEntry.h
+++ b/lib/CodeGen/AsmPrinter/DebugLocEntry.h
@@ -175,6 +175,6 @@ inline bool operator<(const DebugLocEntry::Value &A,
B.getExpression()->getBitPieceOffset();
}
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DebugLocStream.h b/lib/CodeGen/AsmPrinter/DebugLocStream.h
index 3001da21b907..1ae385db4a55 100644
--- a/lib/CodeGen/AsmPrinter/DebugLocStream.h
+++ b/lib/CodeGen/AsmPrinter/DebugLocStream.h
@@ -129,5 +129,5 @@ private:
return Entries[EI + 1].CommentOffset - Entries[EI].CommentOffset;
}
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DwarfAccelTable.h b/lib/CodeGen/AsmPrinter/DwarfAccelTable.h
index 4d81441f6a72..cc677c260071 100644
--- a/lib/CodeGen/AsmPrinter/DwarfAccelTable.h
+++ b/lib/CodeGen/AsmPrinter/DwarfAccelTable.h
@@ -252,5 +252,5 @@ public:
void dump() { print(dbgs()); }
#endif
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
index 689184a651ed..45c56fbb4463 100644
--- a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
@@ -817,4 +817,4 @@ bool DwarfCompileUnit::includeMinimalInlineScopes() const {
return getCUNode()->getEmissionKind() == DIBuilder::LineTablesOnly ||
(DD->useSplitDwarf() && !Skeleton);
}
-} // end llvm namespace
+} // namespace llvm
diff --git a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
index 50e4a54eb3e0..48c302bf9c18 100644
--- a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
+++ b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
@@ -231,6 +231,6 @@ public:
const MCSymbol *getBaseAddress() const { return BaseAddress; }
};
-} // end llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index 3f6665bd5768..fb3316985b86 100644
--- a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -1414,7 +1414,7 @@ void DwarfDebug::emitDebugPubSection(
Asm->EmitInt16(dwarf::DW_PUBNAMES_VERSION);
Asm->OutStreamer->AddComment("Offset of Compilation Unit Info");
- Asm->emitSectionOffset(TheU->getLabelBegin());
+ Asm->emitDwarfSymbolReference(TheU->getLabelBegin());
Asm->OutStreamer->AddComment("Compilation Unit Length");
Asm->EmitInt32(TheU->getLength());
@@ -1562,8 +1562,6 @@ void DwarfDebug::emitDebugLoc() {
Asm->OutStreamer->EmitLabel(List.Label);
const DwarfCompileUnit *CU = List.CU;
for (const auto &Entry : DebugLocs.getEntries(List)) {
- if (Entry.BeginSym == Entry.EndSym)
- continue;
// Set up the range. This range is relative to the entry point of the
// compile unit. This is a hard coded 0 for low_pc when we're emitting
// ranges, or the DW_AT_low_pc on the compile unit otherwise.
@@ -1741,7 +1739,7 @@ void DwarfDebug::emitDebugARanges() {
Asm->OutStreamer->AddComment("DWARF Arange version number");
Asm->EmitInt16(dwarf::DW_ARANGES_VERSION);
Asm->OutStreamer->AddComment("Offset Into Debug Info Section");
- Asm->emitSectionOffset(CU->getLabelBegin());
+ Asm->emitDwarfSymbolReference(CU->getLabelBegin());
Asm->OutStreamer->AddComment("Address Size (in bytes)");
Asm->EmitInt8(PtrSize);
Asm->OutStreamer->AddComment("Segment Size (in bytes)");
diff --git a/lib/CodeGen/AsmPrinter/DwarfExpression.cpp b/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
index d56982712d53..a2799b8d6300 100644
--- a/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
@@ -65,11 +65,6 @@ void DwarfExpression::AddShr(unsigned ShiftBy) {
EmitOp(dwarf::DW_OP_shr);
}
-void DwarfExpression::AddOpStackValue() {
- if (DwarfVersion >= 4)
- EmitOp(dwarf::DW_OP_stack_value);
-}
-
bool DwarfExpression::AddMachineRegIndirect(unsigned MachineReg, int Offset) {
if (isFrameRegister(MachineReg)) {
// If variable offset is based in frame register then use fbreg.
@@ -177,14 +172,16 @@ void DwarfExpression::AddSignedConstant(int Value) {
// value, so the producers and consumers started to rely on heuristics
// to disambiguate the value vs. location status of the expression.
// See PR21176 for more details.
- AddOpStackValue();
+ if (DwarfVersion >= 4)
+ EmitOp(dwarf::DW_OP_stack_value);
}
void DwarfExpression::AddUnsignedConstant(unsigned Value) {
EmitOp(dwarf::DW_OP_constu);
EmitUnsigned(Value);
// cf. comment in DwarfExpression::AddSignedConstant().
- AddOpStackValue();
+ if (DwarfVersion >= 4)
+ EmitOp(dwarf::DW_OP_stack_value);
}
static unsigned getOffsetOrZero(unsigned OffsetInBits,
@@ -215,30 +212,15 @@ bool DwarfExpression::AddMachineRegExpression(const DIExpression *Expr,
getOffsetOrZero(OffsetInBits, PieceOffsetInBits));
}
case dwarf::DW_OP_plus: {
+ // [DW_OP_reg,Offset,DW_OP_plus,DW_OP_deref] --> [DW_OP_breg,Offset].
auto N = I.getNext();
- unsigned Offset = I->getArg(0);
- // First combine all DW_OP_plus until we hit either a DW_OP_deref or a
- // DW_OP_bit_piece
- while (N != E && N->getOp() == dwarf::DW_OP_plus) {
- Offset += N->getArg(0);
- ++I;
- N = I.getNext();
- }
if (N != E && N->getOp() == dwarf::DW_OP_deref) {
- // [DW_OP_reg,Offset,DW_OP_plus,DW_OP_deref] --> [DW_OP_breg,Offset].
+ unsigned Offset = I->getArg(0);
ValidReg = AddMachineRegIndirect(MachineReg, Offset);
std::advance(I, 2);
- } else {
- assert ((N == E) || (N->getOp() == dwarf::DW_OP_bit_piece));
- if (Offset == 0) {
- ValidReg = AddMachineRegPiece(MachineReg);
- } else {
- ValidReg = AddMachineRegIndirect(MachineReg, Offset);
- AddOpStackValue();
- }
- ++I;
- }
- break;
+ break;
+ } else
+ ValidReg = AddMachineRegPiece(MachineReg);
}
case dwarf::DW_OP_deref: {
// [DW_OP_reg,DW_OP_deref] --> [DW_OP_breg].
@@ -255,7 +237,6 @@ bool DwarfExpression::AddMachineRegExpression(const DIExpression *Expr,
// Emit remaining elements of the expression.
AddExpression(I, E, PieceOffsetInBits);
-
return true;
}
diff --git a/lib/CodeGen/AsmPrinter/DwarfExpression.h b/lib/CodeGen/AsmPrinter/DwarfExpression.h
index f6249fff4253..154d7d9b9645 100644
--- a/lib/CodeGen/AsmPrinter/DwarfExpression.h
+++ b/lib/CodeGen/AsmPrinter/DwarfExpression.h
@@ -83,9 +83,6 @@ public:
bool AddMachineRegPiece(unsigned MachineReg, unsigned PieceSizeInBits = 0,
unsigned PieceOffsetInBits = 0);
- /// Emit a DW_OP_stack_value
- void AddOpStackValue();
-
/// Emit a signed constant.
void AddSignedConstant(int Value);
/// Emit an unsigned constant.
@@ -134,6 +131,6 @@ public:
void EmitUnsigned(uint64_t Value) override;
bool isFrameRegister(unsigned MachineReg) override;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DwarfFile.cpp b/lib/CodeGen/AsmPrinter/DwarfFile.cpp
index 5ef333c4cf44..fdefb1df84b6 100644
--- a/lib/CodeGen/AsmPrinter/DwarfFile.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfFile.cpp
@@ -170,4 +170,4 @@ bool DwarfFile::addScopeVariable(LexicalScope *LS, DbgVariable *Var) {
Vars.push_back(Var);
return true;
}
-}
+} // namespace llvm
diff --git a/lib/CodeGen/AsmPrinter/DwarfFile.h b/lib/CodeGen/AsmPrinter/DwarfFile.h
index 8402027edd6f..22759fdecccf 100644
--- a/lib/CodeGen/AsmPrinter/DwarfFile.h
+++ b/lib/CodeGen/AsmPrinter/DwarfFile.h
@@ -114,5 +114,5 @@ public:
return DITypeNodeToDieMap.lookup(TypeMD);
}
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DwarfStringPool.h b/lib/CodeGen/AsmPrinter/DwarfStringPool.h
index 93a168485a54..c10725815351 100644
--- a/lib/CodeGen/AsmPrinter/DwarfStringPool.h
+++ b/lib/CodeGen/AsmPrinter/DwarfStringPool.h
@@ -45,5 +45,5 @@ public:
/// Get a reference to an entry in the string pool.
EntryRef getEntry(AsmPrinter &Asm, StringRef Str);
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
index 907f6706bc6a..f4b15ba053e9 100644
--- a/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
@@ -931,7 +931,7 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, const DICompositeType *CTy) {
StringRef PropertyName = Property->getName();
addString(ElemDie, dwarf::DW_AT_APPLE_property_name, PropertyName);
if (Property->getType())
- addType(ElemDie, Property->getType());
+ addType(ElemDie, resolve(Property->getType()));
addSourceLine(ElemDie, Property);
StringRef GetterName = Property->getGetterName();
if (!GetterName.empty())
@@ -1449,10 +1449,8 @@ void DwarfUnit::emitHeader(bool UseOffsets) {
// start of the section. Use a relocatable offset where needed to ensure
// linking doesn't invalidate that offset.
const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
- if (!UseOffsets)
- Asm->emitSectionOffset(TLOF.getDwarfAbbrevSection()->getBeginSymbol());
- else
- Asm->EmitInt32(0);
+ Asm->emitDwarfSymbolReference(TLOF.getDwarfAbbrevSection()->getBeginSymbol(),
+ UseOffsets);
Asm->OutStreamer->AddComment("Address Size (in bytes)");
Asm->EmitInt8(Asm->getDataLayout().getPointerSize());
diff --git a/lib/CodeGen/AsmPrinter/DwarfUnit.h b/lib/CodeGen/AsmPrinter/DwarfUnit.h
index f56c9b4eb13e..200ddf0f3cbe 100644
--- a/lib/CodeGen/AsmPrinter/DwarfUnit.h
+++ b/lib/CodeGen/AsmPrinter/DwarfUnit.h
@@ -402,5 +402,5 @@ public:
}
DwarfCompileUnit &getCU() override { return CU; }
};
-} // end llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/EHStreamer.h b/lib/CodeGen/AsmPrinter/EHStreamer.h
index 65973fab6b21..128a8ad39255 100644
--- a/lib/CodeGen/AsmPrinter/EHStreamer.h
+++ b/lib/CodeGen/AsmPrinter/EHStreamer.h
@@ -132,7 +132,7 @@ public:
void beginInstruction(const MachineInstr *MI) override {}
void endInstruction() override {}
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp b/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp
index 535b1f605853..11bfe767a27b 100644
--- a/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp
+++ b/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp
@@ -378,4 +378,4 @@ void WinCodeViewLineTables::beginInstruction(const MachineInstr *MI) {
return;
maybeRecordLocation(DL, Asm->MF);
}
-}
+} // namespace llvm
diff --git a/lib/CodeGen/AsmPrinter/WinException.cpp b/lib/CodeGen/AsmPrinter/WinException.cpp
index f1663503c08e..1ba6060a89f6 100644
--- a/lib/CodeGen/AsmPrinter/WinException.cpp
+++ b/lib/CodeGen/AsmPrinter/WinException.cpp
@@ -50,6 +50,11 @@ WinException::~WinException() {}
/// endModule - Emit all exception information that should come after the
/// content.
void WinException::endModule() {
+ auto &OS = *Asm->OutStreamer;
+ const Module *M = MMI->getModule();
+ for (const Function &F : *M)
+ if (F.hasFnAttribute("safeseh"))
+ OS.EmitCOFFSafeSEH(Asm->getSymbol(&F));
}
void WinException::beginFunction(const MachineFunction *MF) {
@@ -144,7 +149,7 @@ void WinException::endFunction(const MachineFunction *MF) {
if (Per == EHPersonality::MSVC_Win64SEH)
emitCSpecificHandlerTable();
else if (Per == EHPersonality::MSVC_X86SEH)
- emitCSpecificHandlerTable(); // FIXME
+ emitExceptHandlerTable(MF);
else if (Per == EHPersonality::MSVC_CXX)
emitCXXFrameHandler3Table(MF);
else
@@ -444,7 +449,7 @@ void WinException::emitCXXFrameHandler3Table(const MachineFunction *MF) {
Asm->OutContext.getOrCreateParentFrameOffsetSymbol(
GlobalValue::getRealLinkageName(HT.Handler->getName()));
const MCSymbolRefExpr *ParentFrameOffsetRef = MCSymbolRefExpr::create(
- ParentFrameOffset, MCSymbolRefExpr::VK_None, Asm->OutContext);
+ ParentFrameOffset, Asm->OutContext);
OS.EmitValue(ParentFrameOffsetRef, 4); // ParentFrameOffset
}
}
@@ -541,3 +546,103 @@ void WinException::extendIP2StateTable(const MachineFunction *MF,
}
}
}
+
+/// Emit the language-specific data that _except_handler3 and 4 expect. This is
+/// functionally equivalent to the __C_specific_handler table, except it is
+/// indexed by state number instead of IP.
+void WinException::emitExceptHandlerTable(const MachineFunction *MF) {
+ MCStreamer &OS = *Asm->OutStreamer;
+
+ // Define the EH registration node offset label in terms of its frameescape
+ // label. The WinEHStatePass ensures that the registration node is passed to
+ // frameescape. This allows SEH filter functions to access the
+ // EXCEPTION_POINTERS field, which is filled in by the _except_handlerN.
+ const Function *F = MF->getFunction();
+ WinEHFuncInfo &FuncInfo = MMI->getWinEHFuncInfo(F);
+ assert(FuncInfo.EHRegNodeEscapeIndex != INT_MAX &&
+ "no EH reg node frameescape index");
+ StringRef FLinkageName = GlobalValue::getRealLinkageName(F->getName());
+ MCSymbol *ParentFrameOffset =
+ Asm->OutContext.getOrCreateParentFrameOffsetSymbol(FLinkageName);
+ MCSymbol *FrameAllocSym = Asm->OutContext.getOrCreateFrameAllocSymbol(
+ FLinkageName, FuncInfo.EHRegNodeEscapeIndex);
+ const MCSymbolRefExpr *FrameAllocSymRef =
+ MCSymbolRefExpr::create(FrameAllocSym, Asm->OutContext);
+ OS.EmitAssignment(ParentFrameOffset, FrameAllocSymRef);
+
+ // Emit the __ehtable label that we use for llvm.x86.seh.lsda.
+ MCSymbol *LSDALabel = Asm->OutContext.getOrCreateLSDASymbol(FLinkageName);
+ OS.EmitLabel(LSDALabel);
+
+ const Function *Per = MMI->getPersonality();
+ StringRef PerName = Per->getName();
+ int BaseState = -1;
+ if (PerName == "_except_handler4") {
+ // The LSDA for _except_handler4 starts with this struct, followed by the
+ // scope table:
+ //
+ // struct EH4ScopeTable {
+ // int32_t GSCookieOffset;
+ // int32_t GSCookieXOROffset;
+ // int32_t EHCookieOffset;
+ // int32_t EHCookieXOROffset;
+ // ScopeTableEntry ScopeRecord[];
+ // };
+ //
+ // Only the EHCookieOffset field appears to vary, and it appears to be the
+ // offset from the final saved SP value to the retaddr.
+ OS.EmitIntValue(-2, 4);
+ OS.EmitIntValue(0, 4);
+ // FIXME: Calculate.
+ OS.EmitIntValue(9999, 4);
+ OS.EmitIntValue(0, 4);
+ BaseState = -2;
+ }
+
+ // Build a list of pointers to LandingPadInfos and then sort by WinEHState.
+ const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
+ SmallVector<const LandingPadInfo *, 4> LPads;
+ LPads.reserve((PadInfos.size()));
+ for (const LandingPadInfo &LPInfo : PadInfos)
+ LPads.push_back(&LPInfo);
+ std::sort(LPads.begin(), LPads.end(),
+ [](const LandingPadInfo *L, const LandingPadInfo *R) {
+ return L->WinEHState < R->WinEHState;
+ });
+
+ // For each action in each lpad, emit one of these:
+ // struct ScopeTableEntry {
+ // int32_t EnclosingLevel;
+ // int32_t (__cdecl *Filter)();
+ // void *HandlerOrFinally;
+ // };
+ //
+ // The "outermost" action will use BaseState as its enclosing level. Each
+ // other action will refer to the previous state as its enclosing level.
+ int CurState = 0;
+ for (const LandingPadInfo *LPInfo : LPads) {
+ int EnclosingLevel = BaseState;
+ assert(CurState + int(LPInfo->SEHHandlers.size()) - 1 ==
+ LPInfo->WinEHState &&
+ "gaps in the SEH scope table");
+ for (auto I = LPInfo->SEHHandlers.rbegin(), E = LPInfo->SEHHandlers.rend();
+ I != E; ++I) {
+ const SEHHandler &Handler = *I;
+ const BlockAddress *BA = Handler.RecoverBA;
+ const Function *F = Handler.FilterOrFinally;
+ assert(F && "cannot catch all in 32-bit SEH without filter function");
+ const MCExpr *FilterOrNull =
+ create32bitRef(BA ? Asm->getSymbol(F) : nullptr);
+ const MCExpr *ExceptOrFinally = create32bitRef(
+ BA ? Asm->GetBlockAddressSymbol(BA) : Asm->getSymbol(F));
+
+ OS.EmitIntValue(EnclosingLevel, 4);
+ OS.EmitValue(FilterOrNull, 4);
+ OS.EmitValue(ExceptOrFinally, 4);
+
+ // The next state unwinds to this state.
+ EnclosingLevel = CurState;
+ CurState++;
+ }
+ }
+}
diff --git a/lib/CodeGen/AsmPrinter/WinException.h b/lib/CodeGen/AsmPrinter/WinException.h
index 478899b79da9..bbff3c24cffc 100644
--- a/lib/CodeGen/AsmPrinter/WinException.h
+++ b/lib/CodeGen/AsmPrinter/WinException.h
@@ -38,8 +38,15 @@ class WinException : public EHStreamer {
void emitCSpecificHandlerTable();
+ /// Emit the EH table data for 32-bit and 64-bit functions using
+ /// the __CxxFrameHandler3 personality.
void emitCXXFrameHandler3Table(const MachineFunction *MF);
+ /// Emit the EH table data for _except_handler3 and _except_handler4
+ /// personality functions. These are only used on 32-bit and do not use CFI
+ /// tables.
+ void emitExceptHandlerTable(const MachineFunction *MF);
+
void extendIP2StateTable(const MachineFunction *MF, const Function *ParentF,
WinEHFuncInfo &FuncInfo);
@@ -63,7 +70,7 @@ public:
/// Gather and emit post-function exception information.
void endFunction(const MachineFunction *) override;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AtomicExpandPass.cpp b/lib/CodeGen/AtomicExpandPass.cpp
index fa17108b2a8e..0bb0fa34e314 100644
--- a/lib/CodeGen/AtomicExpandPass.cpp
+++ b/lib/CodeGen/AtomicExpandPass.cpp
@@ -55,7 +55,7 @@ namespace {
bool isIdempotentRMW(AtomicRMWInst *AI);
bool simplifyIdempotentRMW(AtomicRMWInst *AI);
};
-}
+} // namespace
char AtomicExpand::ID = 0;
char &llvm::AtomicExpandID = AtomicExpand::ID;
@@ -464,7 +464,7 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
Value *ShouldStore =
Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
- // If the the cmpxchg doesn't actually need any ordering when it fails, we can
+ // If the cmpxchg doesn't actually need any ordering when it fails, we can
// jump straight past that fence instruction (if it exists).
Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB);
diff --git a/lib/CodeGen/BranchFolding.cpp b/lib/CodeGen/BranchFolding.cpp
index b8d9a1a29edc..e7b7f5b939e3 100644
--- a/lib/CodeGen/BranchFolding.cpp
+++ b/lib/CodeGen/BranchFolding.cpp
@@ -79,7 +79,7 @@ namespace {
MachineFunctionPass::getAnalysisUsage(AU);
}
};
-}
+} // namespace
char BranchFolderPass::ID = 0;
char &llvm::BranchFolderPassID = BranchFolderPass::ID;
@@ -273,8 +273,12 @@ static unsigned HashMachineInstr(const MachineInstr *MI) {
// Merge in bits from the operand if easy.
unsigned OperandHash = 0;
switch (Op.getType()) {
- case MachineOperand::MO_Register: OperandHash = Op.getReg(); break;
- case MachineOperand::MO_Immediate: OperandHash = Op.getImm(); break;
+ case MachineOperand::MO_Register:
+ OperandHash = Op.getReg();
+ break;
+ case MachineOperand::MO_Immediate:
+ OperandHash = Op.getImm();
+ break;
case MachineOperand::MO_MachineBasicBlock:
OperandHash = Op.getMBB()->getNumber();
break;
@@ -289,10 +293,11 @@ static unsigned HashMachineInstr(const MachineInstr *MI) {
// pull in the offset.
OperandHash = Op.getOffset();
break;
- default: break;
+ default:
+ break;
}
- Hash += ((OperandHash << 3) | Op.getType()) << (i&31);
+ Hash += ((OperandHash << 3) | Op.getType()) << (i & 31);
}
return Hash;
}
@@ -301,13 +306,13 @@ static unsigned HashMachineInstr(const MachineInstr *MI) {
static unsigned HashEndOfMBB(const MachineBasicBlock *MBB) {
MachineBasicBlock::const_iterator I = MBB->end();
if (I == MBB->begin())
- return 0; // Empty MBB.
+ return 0; // Empty MBB.
--I;
// Skip debug info so it will not affect codegen.
while (I->isDebugValue()) {
- if (I==MBB->begin())
- return 0; // MBB empty except for debug info.
+ if (I == MBB->begin())
+ return 0; // MBB empty except for debug info.
--I;
}
diff --git a/lib/CodeGen/BranchFolding.h b/lib/CodeGen/BranchFolding.h
index 3653a2ccd623..d1b17dd31aab 100644
--- a/lib/CodeGen/BranchFolding.h
+++ b/lib/CodeGen/BranchFolding.h
@@ -142,6 +142,6 @@ namespace llvm {
bool HoistCommonCode(MachineFunction &MF);
bool HoistCommonCodeInSuccs(MachineBasicBlock *MBB);
};
-}
+} // namespace llvm
#endif /* LLVM_CODEGEN_BRANCHFOLDING_HPP */
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index 6d2af9003509..a992c5e00b21 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -20,12 +20,14 @@ add_llvm_library(LLVMCodeGen
ExecutionDepsFix.cpp
ExpandISelPseudos.cpp
ExpandPostRAPseudos.cpp
+ FaultMaps.cpp
GCMetadata.cpp
GCMetadataPrinter.cpp
GCRootLowering.cpp
GCStrategy.cpp
GlobalMerge.cpp
IfConversion.cpp
+ ImplicitNullChecks.cpp
InlineSpiller.cpp
InterferenceCache.cpp
IntrinsicLowering.cpp
@@ -71,6 +73,7 @@ add_llvm_library(LLVMCodeGen
MachineSink.cpp
MachineTraceMetrics.cpp
MachineVerifier.cpp
+ MIRPrinter.cpp
MIRPrintingPass.cpp
OcamlGC.cpp
OptimizePHIs.cpp
diff --git a/lib/CodeGen/CallingConvLower.cpp b/lib/CodeGen/CallingConvLower.cpp
index 034ffb34b9cc..fb29b1db7a43 100644
--- a/lib/CodeGen/CallingConvLower.cpp
+++ b/lib/CodeGen/CallingConvLower.cpp
@@ -37,9 +37,9 @@ CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf,
UsedRegs.resize((TRI.getNumRegs()+31)/32);
}
-// HandleByVal - Allocate space on the stack large enough to pass an argument
-// by value. The size and alignment information of the argument is encoded in
-// its parameter attribute.
+/// Allocate space on the stack large enough to pass an argument by value.
+/// The size and alignment information of the argument is encoded in
+/// its parameter attribute.
void CCState::HandleByVal(unsigned ValNo, MVT ValVT,
MVT LocVT, CCValAssign::LocInfo LocInfo,
int MinSize, int MinAlign,
@@ -57,13 +57,13 @@ void CCState::HandleByVal(unsigned ValNo, MVT ValVT,
addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
}
-/// MarkAllocated - Mark a register and all of its aliases as allocated.
+/// Mark a register and all of its aliases as allocated.
void CCState::MarkAllocated(unsigned Reg) {
for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
UsedRegs[*AI/32] |= 1 << (*AI&31);
}
-/// AnalyzeFormalArguments - Analyze an array of argument values,
+/// Analyze an array of argument values,
/// incorporating info about the formals into this state.
void
CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -83,8 +83,8 @@ CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
}
}
-/// CheckReturn - Analyze the return values of a function, returning true if
-/// the return can be performed without sret-demotion, and false otherwise.
+/// Analyze the return values of a function, returning true if the return can
+/// be performed without sret-demotion and false otherwise.
bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn) {
// Determine which register each value should be copied into.
@@ -97,7 +97,7 @@ bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
return true;
}
-/// AnalyzeReturn - Analyze the returned values of a return,
+/// Analyze the returned values of a return,
/// incorporating info about the result values into this state.
void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn) {
@@ -115,7 +115,7 @@ void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
}
}
-/// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
+/// Analyze the outgoing arguments to a call,
/// incorporating info about the passed values into this state.
void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn) {
@@ -133,8 +133,7 @@ void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
}
}
-/// AnalyzeCallOperands - Same as above except it takes vectors of types
-/// and argument flags.
+/// Same as above except it takes vectors of types and argument flags.
void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
CCAssignFn Fn) {
@@ -152,8 +151,8 @@ void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
}
}
-/// AnalyzeCallResult - Analyze the return values of a call,
-/// incorporating info about the passed values into this state.
+/// Analyze the return values of a call, incorporating info about the passed
+/// values into this state.
void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
CCAssignFn Fn) {
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
@@ -169,8 +168,7 @@ void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
}
}
-/// AnalyzeCallResult - Same as above except it's specialized for calls which
-/// produce a single value.
+/// Same as above except it's specialized for calls that produce a single value.
void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) {
if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) {
#ifndef NDEBUG
diff --git a/lib/CodeGen/CodeGen.cpp b/lib/CodeGen/CodeGen.cpp
index 2c6eaf35a257..155c5ecec772 100644
--- a/lib/CodeGen/CodeGen.cpp
+++ b/lib/CodeGen/CodeGen.cpp
@@ -42,6 +42,7 @@ void llvm::initializeCodeGen(PassRegistry &Registry) {
initializeMachineBlockPlacementPass(Registry);
initializeMachineBlockPlacementStatsPass(Registry);
initializeMachineCSEPass(Registry);
+ initializeImplicitNullChecksPass(Registry);
initializeMachineCombinerPass(Registry);
initializeMachineCopyPropagationPass(Registry);
initializeMachineDominatorTreePass(Registry);
diff --git a/lib/CodeGen/CodeGenPrepare.cpp b/lib/CodeGen/CodeGenPrepare.cpp
index 6a814038c688..247c45bd4366 100644
--- a/lib/CodeGen/CodeGenPrepare.cpp
+++ b/lib/CodeGen/CodeGenPrepare.cpp
@@ -135,8 +135,8 @@ class TypePromotionTransaction;
/// multiple load/stores of the same address.
ValueMap<Value*, Value*> SunkAddrs;
- /// Keeps track of all truncates inserted for the current function.
- SetOfInstrs InsertedTruncsSet;
+ /// Keeps track of all instructions inserted for the current function.
+ SetOfInstrs InsertedInsts;
/// Keeps track of the type of the related instruction before their
/// promotion for the current function.
InstrToOrigTy PromotedInsts;
@@ -189,7 +189,7 @@ class TypePromotionTransaction;
bool splitBranchCondition(Function &F);
bool simplifyOffsetableRelocate(Instruction &I);
};
-}
+} // namespace
char CodeGenPrepare::ID = 0;
INITIALIZE_TM_PASS(CodeGenPrepare, "codegenprepare",
@@ -205,7 +205,7 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
bool EverMadeChange = false;
// Clear per function information.
- InsertedTruncsSet.clear();
+ InsertedInsts.clear();
PromotedInsts.clear();
ModifiedDT = false;
@@ -1406,6 +1406,9 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI, bool& ModifiedDT) {
return false;
// Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
ExtVal->moveBefore(CI);
+ // Mark this instruction as "inserted by CGP", so that other
+ // optimizations don't touch it.
+ InsertedInsts.insert(ExtVal);
return true;
}
}
@@ -2107,8 +2110,8 @@ class AddressingModeMatcher {
/// part of the return value of this addressing mode matching stuff.
ExtAddrMode &AddrMode;
- /// The truncate instruction inserted by other CodeGenPrepare optimizations.
- const SetOfInstrs &InsertedTruncs;
+ /// The instructions inserted by other CodeGenPrepare optimizations.
+ const SetOfInstrs &InsertedInsts;
/// A map from the instructions to their type before promotion.
InstrToOrigTy &PromotedInsts;
/// The ongoing transaction where every action should be registered.
@@ -2122,14 +2125,14 @@ class AddressingModeMatcher {
AddressingModeMatcher(SmallVectorImpl<Instruction *> &AMI,
const TargetMachine &TM, Type *AT, unsigned AS,
Instruction *MI, ExtAddrMode &AM,
- const SetOfInstrs &InsertedTruncs,
+ const SetOfInstrs &InsertedInsts,
InstrToOrigTy &PromotedInsts,
TypePromotionTransaction &TPT)
: AddrModeInsts(AMI), TM(TM),
TLI(*TM.getSubtargetImpl(*MI->getParent()->getParent())
->getTargetLowering()),
AccessTy(AT), AddrSpace(AS), MemoryInst(MI), AddrMode(AM),
- InsertedTruncs(InsertedTruncs), PromotedInsts(PromotedInsts), TPT(TPT) {
+ InsertedInsts(InsertedInsts), PromotedInsts(PromotedInsts), TPT(TPT) {
IgnoreProfitability = false;
}
public:
@@ -2137,8 +2140,7 @@ public:
/// Match - Find the maximal addressing mode that a load/store of V can fold,
/// give an access type of AccessTy. This returns a list of involved
/// instructions in AddrModeInsts.
- /// \p InsertedTruncs The truncate instruction inserted by other
- /// CodeGenPrepare
+ /// \p InsertedInsts The instructions inserted by other CodeGenPrepare
/// optimizations.
/// \p PromotedInsts maps the instructions to their type before promotion.
/// \p The ongoing transaction where every action should be registered.
@@ -2146,13 +2148,13 @@ public:
Instruction *MemoryInst,
SmallVectorImpl<Instruction*> &AddrModeInsts,
const TargetMachine &TM,
- const SetOfInstrs &InsertedTruncs,
+ const SetOfInstrs &InsertedInsts,
InstrToOrigTy &PromotedInsts,
TypePromotionTransaction &TPT) {
ExtAddrMode Result;
bool Success = AddressingModeMatcher(AddrModeInsts, TM, AccessTy, AS,
- MemoryInst, Result, InsertedTruncs,
+ MemoryInst, Result, InsertedInsts,
PromotedInsts, TPT).MatchAddr(V, 0);
(void)Success; assert(Success && "Couldn't select *anything*?");
return Result;
@@ -2361,12 +2363,12 @@ public:
/// action to promote the operand of \p Ext instead of using Ext.
/// \return NULL if no promotable action is possible with the current
/// sign extension.
- /// \p InsertedTruncs keeps track of all the truncate instructions inserted by
- /// the others CodeGenPrepare optimizations. This information is important
+ /// \p InsertedInsts keeps track of all the instructions inserted by the
+ /// other CodeGenPrepare optimizations. This information is important
/// because we do not want to promote these instructions as CodeGenPrepare
/// will reinsert them later. Thus creating an infinite loop: create/remove.
/// \p PromotedInsts maps the instructions to their type before promotion.
- static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedTruncs,
+ static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts,
const TargetLowering &TLI,
const InstrToOrigTy &PromotedInsts);
};
@@ -2439,7 +2441,7 @@ bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
}
TypePromotionHelper::Action TypePromotionHelper::getAction(
- Instruction *Ext, const SetOfInstrs &InsertedTruncs,
+ Instruction *Ext, const SetOfInstrs &InsertedInsts,
const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) {
assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
"Unexpected instruction type");
@@ -2455,7 +2457,7 @@ TypePromotionHelper::Action TypePromotionHelper::getAction(
// Do not promote if the operand has been added by codegenprepare.
// Otherwise, it means we are undoing an optimization that is likely to be
// redone, thus causing potential infinite loop.
- if (isa<TruncInst>(ExtOpnd) && InsertedTruncs.count(ExtOpnd))
+ if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd))
return nullptr;
// SExt or Trunc instructions.
@@ -2839,7 +2841,7 @@ bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
// Try to move this ext out of the way of the addressing mode.
// Ask for a method for doing so.
TypePromotionHelper::Action TPH =
- TypePromotionHelper::getAction(Ext, InsertedTruncs, TLI, PromotedInsts);
+ TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts);
if (!TPH)
return false;
@@ -3157,7 +3159,7 @@ IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
TypePromotionTransaction::ConstRestorationPt LastKnownGood =
TPT.getRestorationPoint();
AddressingModeMatcher Matcher(MatchedAddrModeInsts, TM, AddressAccessTy, AS,
- MemoryInst, Result, InsertedTruncs,
+ MemoryInst, Result, InsertedInsts,
PromotedInsts, TPT);
Matcher.IgnoreProfitability = true;
bool Success = Matcher.MatchAddr(Address, 0);
@@ -3240,7 +3242,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
SmallVector<Instruction*, 16> NewAddrModeInsts;
ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
V, AccessTy, AddrSpace, MemoryInst, NewAddrModeInsts, *TM,
- InsertedTruncsSet, PromotedInsts, TPT);
+ InsertedInsts, PromotedInsts, TPT);
// This check is broken into two cases with very similar code to avoid using
// getNumUses() as much as possible. Some values have a lot of uses, so
@@ -3652,7 +3654,7 @@ bool CodeGenPrepare::ExtLdPromotion(TypePromotionTransaction &TPT,
continue;
// Get the action to perform the promotion.
TypePromotionHelper::Action TPH = TypePromotionHelper::getAction(
- I, InsertedTruncsSet, *TLI, PromotedInsts);
+ I, InsertedInsts, *TLI, PromotedInsts);
// Check if we can promote.
if (!TPH)
continue;
@@ -3828,7 +3830,7 @@ bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
if (!InsertedTrunc) {
BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt);
- InsertedTruncsSet.insert(InsertedTrunc);
+ InsertedInsts.insert(InsertedTrunc);
}
// Replace a use of the {s|z}ext source with a use of the result.
@@ -4357,6 +4359,11 @@ bool CodeGenPrepare::OptimizeExtractElementInst(Instruction *Inst) {
}
bool CodeGenPrepare::OptimizeInst(Instruction *I, bool& ModifiedDT) {
+ // Bail out if we inserted the instruction to prevent optimizations from
+ // stepping on each other's toes.
+ if (InsertedInsts.count(I))
+ return false;
+
if (PHINode *P = dyn_cast<PHINode>(I)) {
// It is possible for very late stage optimizations (such as SimplifyCFG)
// to introduce PHI nodes too late to be cleaned up. If we detect such a
diff --git a/lib/CodeGen/CoreCLRGC.cpp b/lib/CodeGen/CoreCLRGC.cpp
index 28c97ba71bd9..0816d1488c28 100644
--- a/lib/CodeGen/CoreCLRGC.cpp
+++ b/lib/CodeGen/CoreCLRGC.cpp
@@ -45,7 +45,7 @@ public:
return (1 == PT->getAddressSpace());
}
};
-}
+} // namespace
static GCRegistry::Add<CoreCLRGC> X("coreclr", "CoreCLR-compatible GC");
diff --git a/lib/CodeGen/CriticalAntiDepBreaker.h b/lib/CodeGen/CriticalAntiDepBreaker.h
index af011a0a65f6..1ca530087c44 100644
--- a/lib/CodeGen/CriticalAntiDepBreaker.h
+++ b/lib/CodeGen/CriticalAntiDepBreaker.h
@@ -103,6 +103,6 @@ class TargetRegisterInfo;
const TargetRegisterClass *RC,
SmallVectorImpl<unsigned> &Forbid);
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/DFAPacketizer.cpp b/lib/CodeGen/DFAPacketizer.cpp
index 0a188c0935ad..02cdb5086de2 100644
--- a/lib/CodeGen/DFAPacketizer.cpp
+++ b/lib/CodeGen/DFAPacketizer.cpp
@@ -110,7 +110,7 @@ public:
// Schedule - Actual scheduling work.
void schedule() override;
};
-}
+} // namespace llvm
DefaultVLIWScheduler::DefaultVLIWScheduler(MachineFunction &MF,
MachineLoopInfo &MLI, bool IsPostRA)
diff --git a/lib/CodeGen/DeadMachineInstructionElim.cpp b/lib/CodeGen/DeadMachineInstructionElim.cpp
index 963d573ea7f0..efaf47c40c82 100644
--- a/lib/CodeGen/DeadMachineInstructionElim.cpp
+++ b/lib/CodeGen/DeadMachineInstructionElim.cpp
@@ -45,7 +45,7 @@ namespace {
private:
bool isDead(const MachineInstr *MI) const;
};
-}
+} // namespace
char DeadMachineInstructionElim::ID = 0;
char &llvm::DeadMachineInstructionElimID = DeadMachineInstructionElim::ID;
diff --git a/lib/CodeGen/DwarfEHPrepare.cpp b/lib/CodeGen/DwarfEHPrepare.cpp
index 42656fb08db1..e019dfbc8f7d 100644
--- a/lib/CodeGen/DwarfEHPrepare.cpp
+++ b/lib/CodeGen/DwarfEHPrepare.cpp
@@ -181,27 +181,22 @@ size_t DwarfEHPrepare::pruneUnreachableResumes(
bool DwarfEHPrepare::InsertUnwindResumeCalls(Function &Fn) {
SmallVector<ResumeInst*, 16> Resumes;
SmallVector<LandingPadInst*, 16> CleanupLPads;
- bool FoundLP = false;
for (BasicBlock &BB : Fn) {
if (auto *RI = dyn_cast<ResumeInst>(BB.getTerminator()))
Resumes.push_back(RI);
- if (auto *LP = BB.getLandingPadInst()) {
+ if (auto *LP = BB.getLandingPadInst())
if (LP->isCleanup())
CleanupLPads.push_back(LP);
- // Check the personality on the first landingpad. Don't do anything if
- // it's for MSVC.
- if (!FoundLP) {
- FoundLP = true;
- EHPersonality Pers = classifyEHPersonality(LP->getPersonalityFn());
- if (isMSVCEHPersonality(Pers))
- return false;
- }
- }
}
if (Resumes.empty())
return false;
+ // Check the personality, don't do anything if it's for MSVC.
+ EHPersonality Pers = classifyEHPersonality(Fn.getPersonalityFn());
+ if (isMSVCEHPersonality(Pers))
+ return false;
+
LLVMContext &Ctx = Fn.getContext();
size_t ResumesLeft = pruneUnreachableResumes(Fn, Resumes, CleanupLPads);
diff --git a/lib/CodeGen/EarlyIfConversion.cpp b/lib/CodeGen/EarlyIfConversion.cpp
index d3687b98b344..fbc4d97c4987 100644
--- a/lib/CodeGen/EarlyIfConversion.cpp
+++ b/lib/CodeGen/EarlyIfConversion.cpp
@@ -479,11 +479,20 @@ void SSAIfConv::rewritePHIOperands() {
// Convert all PHIs to select instructions inserted before FirstTerm.
for (unsigned i = 0, e = PHIs.size(); i != e; ++i) {
PHIInfo &PI = PHIs[i];
+ unsigned DstReg = 0;
+
DEBUG(dbgs() << "If-converting " << *PI.PHI);
- unsigned PHIDst = PI.PHI->getOperand(0).getReg();
- unsigned DstReg = MRI->createVirtualRegister(MRI->getRegClass(PHIDst));
- TII->insertSelect(*Head, FirstTerm, HeadDL, DstReg, Cond, PI.TReg, PI.FReg);
- DEBUG(dbgs() << " --> " << *std::prev(FirstTerm));
+ if (PI.TReg == PI.FReg) {
+ // We do not need the select instruction if both incoming values are
+ // equal.
+ DstReg = PI.TReg;
+ } else {
+ unsigned PHIDst = PI.PHI->getOperand(0).getReg();
+ DstReg = MRI->createVirtualRegister(MRI->getRegClass(PHIDst));
+ TII->insertSelect(*Head, FirstTerm, HeadDL,
+ DstReg, Cond, PI.TReg, PI.FReg);
+ DEBUG(dbgs() << " --> " << *std::prev(FirstTerm));
+ }
// Rewrite PHI operands TPred -> (DstReg, Head), remove FPred.
for (unsigned i = PI.PHI->getNumOperands(); i != 1; i -= 2) {
diff --git a/lib/CodeGen/EdgeBundles.cpp b/lib/CodeGen/EdgeBundles.cpp
index aea7c31ba316..f43b2f1264d2 100644
--- a/lib/CodeGen/EdgeBundles.cpp
+++ b/lib/CodeGen/EdgeBundles.cpp
@@ -89,7 +89,7 @@ raw_ostream &WriteGraph<>(raw_ostream &O, const EdgeBundles &G,
O << "}\n";
return O;
}
-}
+} // namespace llvm
/// view - Visualize the annotated bipartite CFG with Graphviz.
void EdgeBundles::view() const {
diff --git a/lib/CodeGen/ExecutionDepsFix.cpp b/lib/CodeGen/ExecutionDepsFix.cpp
index 5b09cf1a0fd7..dd508b3e5e32 100644
--- a/lib/CodeGen/ExecutionDepsFix.cpp
+++ b/lib/CodeGen/ExecutionDepsFix.cpp
@@ -110,7 +110,7 @@ struct DomainValue {
Instrs.clear();
}
};
-}
+} // namespace
namespace {
/// Information about a live register.
@@ -201,7 +201,7 @@ private:
bool shouldBreakDependence(MachineInstr*, unsigned OpIdx, unsigned Pref);
void processUndefReads(MachineBasicBlock*);
};
-}
+} // namespace
char ExeDepsFix::ID = 0;
diff --git a/lib/CodeGen/FaultMaps.cpp b/lib/CodeGen/FaultMaps.cpp
new file mode 100644
index 000000000000..0512ff95d1bf
--- /dev/null
+++ b/lib/CodeGen/FaultMaps.cpp
@@ -0,0 +1,114 @@
+//===---------------------------- FaultMaps.cpp ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/FaultMaps.h"
+
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCObjectFileInfo.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "faultmaps"
+
+static const int FaultMapVersion = 1;
+const char *FaultMaps::WFMP = "Fault Maps: ";
+
+FaultMaps::FaultMaps(AsmPrinter &AP) : AP(AP) {}
+
+void FaultMaps::recordFaultingOp(FaultKind FaultTy,
+ const MCSymbol *HandlerLabel) {
+ MCContext &OutContext = AP.OutStreamer->getContext();
+ MCSymbol *FaultingLabel = OutContext.createTempSymbol();
+
+ AP.OutStreamer->EmitLabel(FaultingLabel);
+
+ const MCExpr *FaultingOffset = MCBinaryExpr::createSub(
+ MCSymbolRefExpr::create(FaultingLabel, OutContext),
+ MCSymbolRefExpr::create(AP.CurrentFnSymForSize, OutContext), OutContext);
+
+ const MCExpr *HandlerOffset = MCBinaryExpr::createSub(
+ MCSymbolRefExpr::create(HandlerLabel, OutContext),
+ MCSymbolRefExpr::create(AP.CurrentFnSymForSize, OutContext), OutContext);
+
+ FunctionInfos[AP.CurrentFnSym].emplace_back(FaultTy, FaultingOffset,
+ HandlerOffset);
+}
+
+void FaultMaps::serializeToFaultMapSection() {
+ if (FunctionInfos.empty())
+ return;
+
+ MCContext &OutContext = AP.OutStreamer->getContext();
+ MCStreamer &OS = *AP.OutStreamer;
+
+ // Create the section.
+ MCSection *FaultMapSection =
+ OutContext.getObjectFileInfo()->getFaultMapSection();
+ OS.SwitchSection(FaultMapSection);
+
+ // Emit a dummy symbol to force section inclusion.
+ OS.EmitLabel(OutContext.getOrCreateSymbol(Twine("__LLVM_FaultMaps")));
+
+ DEBUG(dbgs() << "********** Fault Map Output **********\n");
+
+ // Header
+ OS.EmitIntValue(FaultMapVersion, 1); // Version.
+ OS.EmitIntValue(0, 1); // Reserved.
+ OS.EmitIntValue(0, 2); // Reserved.
+
+ DEBUG(dbgs() << WFMP << "#functions = " << FunctionInfos.size() << "\n");
+ OS.EmitIntValue(FunctionInfos.size(), 4);
+
+ DEBUG(dbgs() << WFMP << "functions:\n");
+
+ for (const auto &FFI : FunctionInfos)
+ emitFunctionInfo(FFI.first, FFI.second);
+}
+
+void FaultMaps::emitFunctionInfo(const MCSymbol *FnLabel,
+ const FunctionFaultInfos &FFI) {
+ MCStreamer &OS = *AP.OutStreamer;
+
+ DEBUG(dbgs() << WFMP << " function addr: " << *FnLabel << "\n");
+ OS.EmitSymbolValue(FnLabel, 8);
+
+ DEBUG(dbgs() << WFMP << " #faulting PCs: " << FFI.size() << "\n");
+ OS.EmitIntValue(FFI.size(), 4);
+
+ OS.EmitIntValue(0, 4); // Reserved
+
+ for (auto &Fault : FFI) {
+ DEBUG(dbgs() << WFMP << " fault type: "
+ << faultTypeToString(Fault.Kind) << "\n");
+ OS.EmitIntValue(Fault.Kind, 4);
+
+ DEBUG(dbgs() << WFMP << " faulting PC offset: "
+ << *Fault.FaultingOffsetExpr << "\n");
+ OS.EmitValue(Fault.FaultingOffsetExpr, 4);
+
+ DEBUG(dbgs() << WFMP << " fault handler PC offset: "
+ << *Fault.HandlerOffsetExpr << "\n");
+ OS.EmitValue(Fault.HandlerOffsetExpr, 4);
+ }
+}
+
+
+const char *FaultMaps::faultTypeToString(FaultMaps::FaultKind FT) {
+ switch (FT) {
+ default:
+ llvm_unreachable("unhandled fault type!");
+
+ case FaultMaps::FaultingLoad:
+ return "FaultingLoad";
+ }
+}
diff --git a/lib/CodeGen/GCMetadata.cpp b/lib/CodeGen/GCMetadata.cpp
index c8116a453d2d..cba7f5fda5c3 100644
--- a/lib/CodeGen/GCMetadata.cpp
+++ b/lib/CodeGen/GCMetadata.cpp
@@ -38,7 +38,7 @@ public:
bool runOnFunction(Function &F) override;
bool doFinalization(Module &M) override;
};
-}
+} // namespace
INITIALIZE_PASS(GCModuleInfo, "collector-metadata",
"Create Garbage Collector Module Metadata", false, false)
diff --git a/lib/CodeGen/GCRootLowering.cpp b/lib/CodeGen/GCRootLowering.cpp
index d8edd7e4063f..fcef3226ed79 100644
--- a/lib/CodeGen/GCRootLowering.cpp
+++ b/lib/CodeGen/GCRootLowering.cpp
@@ -76,7 +76,7 @@ public:
bool runOnMachineFunction(MachineFunction &MF) override;
};
-}
+} // namespace
// -----------------------------------------------------------------------------
diff --git a/lib/CodeGen/IfConversion.cpp b/lib/CodeGen/IfConversion.cpp
index e861ceb2a664..963dfe74742e 100644
--- a/lib/CodeGen/IfConversion.cpp
+++ b/lib/CodeGen/IfConversion.cpp
@@ -264,7 +264,7 @@ namespace {
};
char IfConverter::ID = 0;
-}
+} // namespace
char &llvm::IfConverterID = IfConverter::ID;
diff --git a/lib/CodeGen/ImplicitNullChecks.cpp b/lib/CodeGen/ImplicitNullChecks.cpp
new file mode 100644
index 000000000000..b1176ce184cb
--- /dev/null
+++ b/lib/CodeGen/ImplicitNullChecks.cpp
@@ -0,0 +1,261 @@
+//===-- ImplicitNullChecks.cpp - Fold null checks into memory accesses ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass turns explicit null checks of the form
+//
+// test %r10, %r10
+// je throw_npe
+// movl (%r10), %esi
+// ...
+//
+// to
+//
+// faulting_load_op("movl (%r10), %esi", throw_npe)
+// ...
+//
+// With the help of a runtime that understands the .fault_maps section,
+// faulting_load_op branches to throw_npe if executing movl (%r10), %esi incurs
+// a page fault.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+using namespace llvm;
+
+static cl::opt<unsigned> PageSize("imp-null-check-page-size",
+ cl::desc("The page size of the target in "
+ "bytes"),
+ cl::init(4096));
+
+namespace {
+
+class ImplicitNullChecks : public MachineFunctionPass {
+ /// Represents one null check that can be made implicit.
+ struct NullCheck {
+ // The memory operation the null check can be folded into.
+ MachineInstr *MemOperation;
+
+ // The instruction actually doing the null check (Ptr != 0).
+ MachineInstr *CheckOperation;
+
+ // The block the check resides in.
+ MachineBasicBlock *CheckBlock;
+
+ // The block branched to if the pointer is non-null.
+ MachineBasicBlock *NotNullSucc;
+
+ // The block branched to if the pointer is null.
+ MachineBasicBlock *NullSucc;
+
+ NullCheck()
+ : MemOperation(), CheckOperation(), CheckBlock(), NotNullSucc(),
+ NullSucc() {}
+
+ explicit NullCheck(MachineInstr *memOperation, MachineInstr *checkOperation,
+ MachineBasicBlock *checkBlock,
+ MachineBasicBlock *notNullSucc,
+ MachineBasicBlock *nullSucc)
+ : MemOperation(memOperation), CheckOperation(checkOperation),
+ CheckBlock(checkBlock), NotNullSucc(notNullSucc), NullSucc(nullSucc) {
+ }
+ };
+
+ const TargetInstrInfo *TII = nullptr;
+ const TargetRegisterInfo *TRI = nullptr;
+ MachineModuleInfo *MMI = nullptr;
+
+ bool analyzeBlockForNullChecks(MachineBasicBlock &MBB,
+ SmallVectorImpl<NullCheck> &NullCheckList);
+ MachineInstr *insertFaultingLoad(MachineInstr *LoadMI, MachineBasicBlock *MBB,
+ MCSymbol *HandlerLabel);
+ void rewriteNullChecks(ArrayRef<NullCheck> NullCheckList);
+
+public:
+ static char ID;
+
+ ImplicitNullChecks() : MachineFunctionPass(ID) {
+ initializeImplicitNullChecksPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // namespace
+
+bool ImplicitNullChecks::runOnMachineFunction(MachineFunction &MF) {
+ TII = MF.getSubtarget().getInstrInfo();
+ TRI = MF.getRegInfo().getTargetRegisterInfo();
+ MMI = &MF.getMMI();
+
+ SmallVector<NullCheck, 16> NullCheckList;
+
+ for (auto &MBB : MF)
+ analyzeBlockForNullChecks(MBB, NullCheckList);
+
+ if (!NullCheckList.empty())
+ rewriteNullChecks(NullCheckList);
+
+ return !NullCheckList.empty();
+}
+
+/// Analyze MBB to check if its terminating branch can be turned into an
+/// implicit null check. If yes, append a description of the said null check to
+/// NullCheckList and return true, else return false.
+bool ImplicitNullChecks::analyzeBlockForNullChecks(
+ MachineBasicBlock &MBB, SmallVectorImpl<NullCheck> &NullCheckList) {
+ typedef TargetInstrInfo::MachineBranchPredicate MachineBranchPredicate;
+
+ MachineBranchPredicate MBP;
+
+ if (TII->AnalyzeBranchPredicate(MBB, MBP, true))
+ return false;
+
+ // Is the predicate comparing an integer to zero?
+ if (!(MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
+ (MBP.Predicate == MachineBranchPredicate::PRED_NE ||
+ MBP.Predicate == MachineBranchPredicate::PRED_EQ)))
+ return false;
+
+ // If we cannot erase the test instruction itself, then making the null check
+ // implicit does not buy us much.
+ if (!MBP.SingleUseCondition)
+ return false;
+
+ MachineBasicBlock *NotNullSucc, *NullSucc;
+
+ if (MBP.Predicate == MachineBranchPredicate::PRED_NE) {
+ NotNullSucc = MBP.TrueDest;
+ NullSucc = MBP.FalseDest;
+ } else {
+ NotNullSucc = MBP.FalseDest;
+ NullSucc = MBP.TrueDest;
+ }
+
+ // We handle the simplest case for now. We can potentially do better by using
+ // the machine dominator tree.
+ if (NotNullSucc->pred_size() != 1)
+ return false;
+
+ // Starting with a code fragment like:
+ //
+ // test %RAX, %RAX
+ // jne LblNotNull
+ //
+ // LblNull:
+ // callq throw_NullPointerException
+ //
+ // LblNotNull:
+ // Def = Load (%RAX + <offset>)
+ // ...
+ //
+ //
+ // we want to end up with
+ //
+ // Def = TrappingLoad (%RAX + <offset>), LblNull
+ // jmp LblNotNull ;; explicit or fallthrough
+ //
+ // LblNotNull:
+ // ...
+ //
+ // LblNull:
+ // callq throw_NullPointerException
+ //
+
+ unsigned PointerReg = MBP.LHS.getReg();
+ MachineInstr *MemOp = &*NotNullSucc->begin();
+ unsigned BaseReg, Offset;
+ if (TII->getMemOpBaseRegImmOfs(MemOp, BaseReg, Offset, TRI))
+ if (MemOp->mayLoad() && !MemOp->isPredicable() && BaseReg == PointerReg &&
+ Offset < PageSize && MemOp->getDesc().getNumDefs() == 1) {
+ NullCheckList.emplace_back(MemOp, MBP.ConditionDef, &MBB, NotNullSucc,
+ NullSucc);
+ return true;
+ }
+
+ return false;
+}
+
+/// Wrap a machine load instruction, LoadMI, into a FAULTING_LOAD_OP machine
+/// instruction. The FAULTING_LOAD_OP instruction does the same load as LoadMI
+/// (defining the same register), and branches to HandlerLabel if the load
+/// faults. The FAULTING_LOAD_OP instruction is inserted at the end of MBB.
+MachineInstr *ImplicitNullChecks::insertFaultingLoad(MachineInstr *LoadMI,
+ MachineBasicBlock *MBB,
+ MCSymbol *HandlerLabel) {
+ DebugLoc DL;
+ unsigned NumDefs = LoadMI->getDesc().getNumDefs();
+ assert(NumDefs == 1 && "other cases unhandled!");
+ (void)NumDefs;
+
+ unsigned DefReg = LoadMI->defs().begin()->getReg();
+ assert(std::distance(LoadMI->defs().begin(), LoadMI->defs().end()) == 1 &&
+ "expected exactly one def!");
+
+ auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_LOAD_OP), DefReg)
+ .addSym(HandlerLabel)
+ .addImm(LoadMI->getOpcode());
+
+ for (auto &MO : LoadMI->uses())
+ MIB.addOperand(MO);
+
+ MIB.setMemRefs(LoadMI->memoperands_begin(), LoadMI->memoperands_end());
+
+ return MIB;
+}
+
+/// Rewrite the null checks in NullCheckList into implicit null checks.
+void ImplicitNullChecks::rewriteNullChecks(
+ ArrayRef<ImplicitNullChecks::NullCheck> NullCheckList) {
+ DebugLoc DL;
+
+ for (auto &NC : NullCheckList) {
+ MCSymbol *HandlerLabel = MMI->getContext().createTempSymbol();
+
+ // Remove the conditional branch dependent on the null check.
+ unsigned BranchesRemoved = TII->RemoveBranch(*NC.CheckBlock);
+ (void)BranchesRemoved;
+ assert(BranchesRemoved > 0 && "expected at least one branch!");
+
+ // Insert a faulting load where the conditional branch was originally. We
+ // check earlier ensures that this bit of code motion is legal. We do not
+ // touch the successors list for any basic block since we haven't changed
+ // control flow, we've just made it implicit.
+ insertFaultingLoad(NC.MemOperation, NC.CheckBlock, HandlerLabel);
+ NC.MemOperation->removeFromParent();
+ NC.CheckOperation->eraseFromParent();
+
+ // Insert an *unconditional* branch to not-null successor.
+ TII->InsertBranch(*NC.CheckBlock, NC.NotNullSucc, nullptr, /*Cond=*/None,
+ DL);
+
+ // Emit the HandlerLabel as an EH_LABEL.
+ BuildMI(*NC.NullSucc, NC.NullSucc->begin(), DL,
+ TII->get(TargetOpcode::EH_LABEL)).addSym(HandlerLabel);
+ }
+}
+
+char ImplicitNullChecks::ID = 0;
+char &llvm::ImplicitNullChecksID = ImplicitNullChecks::ID;
+INITIALIZE_PASS_BEGIN(ImplicitNullChecks, "implicit-null-checks",
+ "Implicit null checks", false, false)
+INITIALIZE_PASS_END(ImplicitNullChecks, "implicit-null-checks",
+ "Implicit null checks", false, false)
diff --git a/lib/CodeGen/InlineSpiller.cpp b/lib/CodeGen/InlineSpiller.cpp
index 9989f233d09c..48c95c9b691f 100644
--- a/lib/CodeGen/InlineSpiller.cpp
+++ b/lib/CodeGen/InlineSpiller.cpp
@@ -181,7 +181,7 @@ private:
void spillAroundUses(unsigned Reg);
void spillAll();
};
-}
+} // namespace
namespace llvm {
@@ -194,7 +194,7 @@ Spiller *createInlineSpiller(MachineFunctionPass &pass,
return new InlineSpiller(pass, mf, vrm);
}
-}
+} // namespace llvm
//===----------------------------------------------------------------------===//
// Snippets
diff --git a/lib/CodeGen/LLVMBuild.txt b/lib/CodeGen/LLVMBuild.txt
index 05905d04dabf..18ed77607c6a 100644
--- a/lib/CodeGen/LLVMBuild.txt
+++ b/lib/CodeGen/LLVMBuild.txt
@@ -22,4 +22,4 @@ subdirectories = AsmPrinter SelectionDAG MIRParser
type = Library
name = CodeGen
parent = Libraries
-required_libraries = Analysis Core MC Scalar Support Target TransformUtils
+required_libraries = Analysis Core Instrumentation MC Scalar Support Target TransformUtils
diff --git a/lib/CodeGen/LLVMTargetMachine.cpp b/lib/CodeGen/LLVMTargetMachine.cpp
index ff5205801bc4..b486bdc91453 100644
--- a/lib/CodeGen/LLVMTargetMachine.cpp
+++ b/lib/CodeGen/LLVMTargetMachine.cpp
@@ -43,16 +43,17 @@ EnableFastISelOption("fast-isel", cl::Hidden,
cl::desc("Enable the \"fast\" instruction selector"));
void LLVMTargetMachine::initAsmInfo() {
- MRI = TheTarget.createMCRegInfo(getTargetTriple());
+ MRI = TheTarget.createMCRegInfo(getTargetTriple().str());
MII = TheTarget.createMCInstrInfo();
// FIXME: Having an MCSubtargetInfo on the target machine is a hack due
// to some backends having subtarget feature dependent module level
// code generation. This is similar to the hack in the AsmPrinter for
// module level assembly etc.
- STI = TheTarget.createMCSubtargetInfo(getTargetTriple(), getTargetCPU(),
+ STI = TheTarget.createMCSubtargetInfo(getTargetTriple().str(), getTargetCPU(),
getTargetFeatureString());
- MCAsmInfo *TmpAsmInfo = TheTarget.createMCAsmInfo(*MRI, getTargetTriple());
+ MCAsmInfo *TmpAsmInfo =
+ TheTarget.createMCAsmInfo(*MRI, getTargetTriple().str());
// TargetSelect.h moved to a different directory between LLVM 2.9 and 3.0,
// and if the old one gets included then MCAsmInfo will be NULL and
// we'll crash later.
@@ -72,12 +73,12 @@ void LLVMTargetMachine::initAsmInfo() {
LLVMTargetMachine::LLVMTargetMachine(const Target &T,
StringRef DataLayoutString,
- StringRef Triple, StringRef CPU,
+ const Triple &TT, StringRef CPU,
StringRef FS, TargetOptions Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
- : TargetMachine(T, DataLayoutString, Triple, CPU, FS, Options) {
- CodeGenInfo = T.createMCCodeGenInfo(Triple, RM, CM, OL);
+ : TargetMachine(T, DataLayoutString, TT, CPU, FS, Options) {
+ CodeGenInfo = T.createMCCodeGenInfo(TT.str(), RM, CM, OL);
}
TargetIRAnalysis LLVMTargetMachine::getTargetIRAnalysis() {
@@ -87,11 +88,11 @@ TargetIRAnalysis LLVMTargetMachine::getTargetIRAnalysis() {
}
/// addPassesToX helper drives creation and initialization of TargetPassConfig.
-static MCContext *addPassesToGenerateCode(LLVMTargetMachine *TM,
- PassManagerBase &PM,
- bool DisableVerify,
- AnalysisID StartAfter,
- AnalysisID StopAfter) {
+static MCContext *
+addPassesToGenerateCode(LLVMTargetMachine *TM, PassManagerBase &PM,
+ bool DisableVerify, AnalysisID StartAfter,
+ AnalysisID StopAfter,
+ MachineFunctionInitializer *MFInitializer = nullptr) {
// Add internal analysis passes from the target machine.
PM.add(createTargetTransformInfoWrapperPass(TM->getTargetIRAnalysis()));
@@ -121,7 +122,7 @@ static MCContext *addPassesToGenerateCode(LLVMTargetMachine *TM,
PM.add(MMI);
// Set up a MachineFunction for the rest of CodeGen to work on.
- PM.add(new MachineFunctionAnalysis(*TM));
+ PM.add(new MachineFunctionAnalysis(*TM, MFInitializer));
// Enable FastISel with -fast, but allow that to be overridden.
if (EnableFastISelOption == cl::BOU_TRUE ||
@@ -142,10 +143,11 @@ static MCContext *addPassesToGenerateCode(LLVMTargetMachine *TM,
bool LLVMTargetMachine::addPassesToEmitFile(
PassManagerBase &PM, raw_pwrite_stream &Out, CodeGenFileType FileType,
- bool DisableVerify, AnalysisID StartAfter, AnalysisID StopAfter) {
+ bool DisableVerify, AnalysisID StartAfter, AnalysisID StopAfter,
+ MachineFunctionInitializer *MFInitializer) {
// Add common CodeGen passes.
- MCContext *Context = addPassesToGenerateCode(this, PM, DisableVerify,
- StartAfter, StopAfter);
+ MCContext *Context = addPassesToGenerateCode(
+ this, PM, DisableVerify, StartAfter, StopAfter, MFInitializer);
if (!Context)
return true;
@@ -167,15 +169,15 @@ bool LLVMTargetMachine::addPassesToEmitFile(
switch (FileType) {
case CGFT_AssemblyFile: {
MCInstPrinter *InstPrinter = getTarget().createMCInstPrinter(
- Triple(getTargetTriple()), MAI.getAssemblerDialect(), MAI, MII, MRI);
+ getTargetTriple(), MAI.getAssemblerDialect(), MAI, MII, MRI);
// Create a code emitter if asked to show the encoding.
MCCodeEmitter *MCE = nullptr;
if (Options.MCOptions.ShowMCEncoding)
MCE = getTarget().createMCCodeEmitter(MII, MRI, *Context);
- MCAsmBackend *MAB = getTarget().createMCAsmBackend(MRI, getTargetTriple(),
- TargetCPU);
+ MCAsmBackend *MAB =
+ getTarget().createMCAsmBackend(MRI, getTargetTriple().str(), TargetCPU);
auto FOut = llvm::make_unique<formatted_raw_ostream>(Out);
MCStreamer *S = getTarget().createAsmStreamer(
*Context, std::move(FOut), Options.MCOptions.AsmVerbose,
@@ -188,15 +190,15 @@ bool LLVMTargetMachine::addPassesToEmitFile(
// Create the code emitter for the target if it exists. If not, .o file
// emission fails.
MCCodeEmitter *MCE = getTarget().createMCCodeEmitter(MII, MRI, *Context);
- MCAsmBackend *MAB = getTarget().createMCAsmBackend(MRI, getTargetTriple(),
- TargetCPU);
+ MCAsmBackend *MAB =
+ getTarget().createMCAsmBackend(MRI, getTargetTriple().str(), TargetCPU);
if (!MCE || !MAB)
return true;
// Don't waste memory on names of temp labels.
Context->setUseNamesOnTempLabels(false);
- Triple T(getTargetTriple());
+ Triple T(getTargetTriple().str());
AsmStreamer.reset(getTarget().createMCObjectStreamer(
T, *Context, *MAB, Out, MCE, STI, Options.MCOptions.MCRelaxAll,
/*DWARFMustBeAtTheEnd*/ true));
@@ -241,12 +243,12 @@ bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM, MCContext *&Ctx,
const MCRegisterInfo &MRI = *getMCRegisterInfo();
MCCodeEmitter *MCE =
getTarget().createMCCodeEmitter(*getMCInstrInfo(), MRI, *Ctx);
- MCAsmBackend *MAB = getTarget().createMCAsmBackend(MRI, getTargetTriple(),
- TargetCPU);
+ MCAsmBackend *MAB =
+ getTarget().createMCAsmBackend(MRI, getTargetTriple().str(), TargetCPU);
if (!MCE || !MAB)
return true;
- Triple T(getTargetTriple());
+ const Triple &T = getTargetTriple();
const MCSubtargetInfo &STI = *getMCSubtargetInfo();
std::unique_ptr<MCStreamer> AsmStreamer(getTarget().createMCObjectStreamer(
T, *Ctx, *MAB, Out, MCE, STI, Options.MCOptions.MCRelaxAll,
diff --git a/lib/CodeGen/LiveVariables.cpp b/lib/CodeGen/LiveVariables.cpp
index 11deb813dde8..b355393e76f7 100644
--- a/lib/CodeGen/LiveVariables.cpp
+++ b/lib/CodeGen/LiveVariables.cpp
@@ -738,45 +738,22 @@ bool LiveVariables::VarInfo::isLiveIn(const MachineBasicBlock &MBB,
bool LiveVariables::isLiveOut(unsigned Reg, const MachineBasicBlock &MBB) {
LiveVariables::VarInfo &VI = getVarInfo(Reg);
+ SmallPtrSet<const MachineBasicBlock *, 8> Kills;
+ for (unsigned i = 0, e = VI.Kills.size(); i != e; ++i)
+ Kills.insert(VI.Kills[i]->getParent());
+
// Loop over all of the successors of the basic block, checking to see if
// the value is either live in the block, or if it is killed in the block.
- SmallVector<MachineBasicBlock*, 8> OpSuccBlocks;
- for (MachineBasicBlock::const_succ_iterator SI = MBB.succ_begin(),
- E = MBB.succ_end(); SI != E; ++SI) {
- MachineBasicBlock *SuccMBB = *SI;
-
+ for (const MachineBasicBlock *SuccMBB : MBB.successors()) {
// Is it alive in this successor?
unsigned SuccIdx = SuccMBB->getNumber();
if (VI.AliveBlocks.test(SuccIdx))
return true;
- OpSuccBlocks.push_back(SuccMBB);
+ // Or is it live because there is a use in a successor that kills it?
+ if (Kills.count(SuccMBB))
+ return true;
}
- // Check to see if this value is live because there is a use in a successor
- // that kills it.
- switch (OpSuccBlocks.size()) {
- case 1: {
- MachineBasicBlock *SuccMBB = OpSuccBlocks[0];
- for (unsigned i = 0, e = VI.Kills.size(); i != e; ++i)
- if (VI.Kills[i]->getParent() == SuccMBB)
- return true;
- break;
- }
- case 2: {
- MachineBasicBlock *SuccMBB1 = OpSuccBlocks[0], *SuccMBB2 = OpSuccBlocks[1];
- for (unsigned i = 0, e = VI.Kills.size(); i != e; ++i)
- if (VI.Kills[i]->getParent() == SuccMBB1 ||
- VI.Kills[i]->getParent() == SuccMBB2)
- return true;
- break;
- }
- default:
- std::sort(OpSuccBlocks.begin(), OpSuccBlocks.end());
- for (unsigned i = 0, e = VI.Kills.size(); i != e; ++i)
- if (std::binary_search(OpSuccBlocks.begin(), OpSuccBlocks.end(),
- VI.Kills[i]->getParent()))
- return true;
- }
return false;
}
diff --git a/lib/CodeGen/MIRParser/MIRParser.cpp b/lib/CodeGen/MIRParser/MIRParser.cpp
index 7a51b3881afc..1fef3f6dcb34 100644
--- a/lib/CodeGen/MIRParser/MIRParser.cpp
+++ b/lib/CodeGen/MIRParser/MIRParser.cpp
@@ -14,10 +14,17 @@
#include "llvm/CodeGen/MIRParser/MIRParser.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/AsmParser/Parser.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MIRYamlMapping.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/ValueSymbolTable.h"
#include "llvm/Support/LineIterator.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/SourceMgr.h"
@@ -27,7 +34,7 @@
using namespace llvm;
-namespace {
+namespace llvm {
/// This class implements the parsing of LLVM IR that's embedded inside a MIR
/// file.
@@ -35,29 +42,56 @@ class MIRParserImpl {
SourceMgr SM;
StringRef Filename;
LLVMContext &Context;
+ StringMap<std::unique_ptr<yaml::MachineFunction>> Functions;
public:
MIRParserImpl(std::unique_ptr<MemoryBuffer> Contents, StringRef Filename,
LLVMContext &Context);
+ void reportDiagnostic(const SMDiagnostic &Diag);
+
+ /// Report an error with the given message at unknown location.
+ ///
+ /// Always returns true.
+ bool error(const Twine &Message);
+
/// Try to parse the optional LLVM module and the machine functions in the MIR
/// file.
///
/// Return null if an error occurred.
- std::unique_ptr<Module> parse(SMDiagnostic &Error);
+ std::unique_ptr<Module> parse();
/// Parse the machine function in the current YAML document.
///
+ /// \param NoLLVMIR - set to true when the MIR file doesn't have LLVM IR.
+ /// A dummy IR function is created and inserted into the given module when
+ /// this parameter is true.
+ ///
+ /// Return true if an error occurred.
+ bool parseMachineFunction(yaml::Input &In, Module &M, bool NoLLVMIR);
+
+ /// Initialize the machine function to the state that's described in the MIR
+ /// file.
+ ///
+ /// Return true if error occurred.
+ bool initializeMachineFunction(MachineFunction &MF);
+
+ /// Initialize the machine basic block using it's YAML representation.
+ ///
/// Return true if an error occurred.
- bool parseMachineFunction(yaml::Input &In);
+ bool initializeMachineBasicBlock(MachineBasicBlock &MBB,
+ const yaml::MachineBasicBlock &YamlMBB);
private:
/// Return a MIR diagnostic converted from an LLVM assembly diagnostic.
SMDiagnostic diagFromLLVMAssemblyDiag(const SMDiagnostic &Error,
SMRange SourceRange);
+
+ /// Create an empty function with the given name.
+ void createDummyFunction(StringRef Name, Module &M);
};
-} // end anonymous namespace
+} // end namespace llvm
MIRParserImpl::MIRParserImpl(std::unique_ptr<MemoryBuffer> Contents,
StringRef Filename, LLVMContext &Context)
@@ -65,30 +99,54 @@ MIRParserImpl::MIRParserImpl(std::unique_ptr<MemoryBuffer> Contents,
SM.AddNewSourceBuffer(std::move(Contents), SMLoc());
}
+bool MIRParserImpl::error(const Twine &Message) {
+ Context.diagnose(DiagnosticInfoMIRParser(
+ DS_Error, SMDiagnostic(Filename, SourceMgr::DK_Error, Message.str())));
+ return true;
+}
+
+void MIRParserImpl::reportDiagnostic(const SMDiagnostic &Diag) {
+ DiagnosticSeverity Kind;
+ switch (Diag.getKind()) {
+ case SourceMgr::DK_Error:
+ Kind = DS_Error;
+ break;
+ case SourceMgr::DK_Warning:
+ Kind = DS_Warning;
+ break;
+ case SourceMgr::DK_Note:
+ Kind = DS_Note;
+ break;
+ }
+ Context.diagnose(DiagnosticInfoMIRParser(Kind, Diag));
+}
+
static void handleYAMLDiag(const SMDiagnostic &Diag, void *Context) {
- *reinterpret_cast<SMDiagnostic *>(Context) = Diag;
+ reinterpret_cast<MIRParserImpl *>(Context)->reportDiagnostic(Diag);
}
-std::unique_ptr<Module> MIRParserImpl::parse(SMDiagnostic &Error) {
+std::unique_ptr<Module> MIRParserImpl::parse() {
yaml::Input In(SM.getMemoryBuffer(SM.getMainFileID())->getBuffer(),
- /*Ctxt=*/nullptr, handleYAMLDiag, &Error);
+ /*Ctxt=*/nullptr, handleYAMLDiag, this);
if (!In.setCurrentDocument()) {
- if (!Error.getMessage().empty())
+ if (In.error())
return nullptr;
// Create an empty module when the MIR file is empty.
return llvm::make_unique<Module>(Filename, Context);
}
std::unique_ptr<Module> M;
+ bool NoLLVMIR = false;
// Parse the block scalar manually so that we can return unique pointer
// without having to go trough YAML traits.
if (const auto *BSN =
dyn_cast_or_null<yaml::BlockScalarNode>(In.getCurrentNode())) {
+ SMDiagnostic Error;
M = parseAssembly(MemoryBufferRef(BSN->getValue(), Filename), Error,
Context);
if (!M) {
- Error = diagFromLLVMAssemblyDiag(Error, BSN->getSourceRange());
+ reportDiagnostic(diagFromLLVMAssemblyDiag(Error, BSN->getSourceRange()));
return M;
}
In.nextDocument();
@@ -97,11 +155,12 @@ std::unique_ptr<Module> MIRParserImpl::parse(SMDiagnostic &Error) {
} else {
// Create an new, empty module.
M = llvm::make_unique<Module>(Filename, Context);
+ NoLLVMIR = true;
}
// Parse the machine functions.
do {
- if (parseMachineFunction(In))
+ if (parseMachineFunction(In, *M, NoLLVMIR))
return nullptr;
In.nextDocument();
} while (In.setCurrentDocument());
@@ -109,13 +168,68 @@ std::unique_ptr<Module> MIRParserImpl::parse(SMDiagnostic &Error) {
return M;
}
-bool MIRParserImpl::parseMachineFunction(yaml::Input &In) {
- yaml::MachineFunction MF;
- yaml::yamlize(In, MF, false);
+bool MIRParserImpl::parseMachineFunction(yaml::Input &In, Module &M,
+ bool NoLLVMIR) {
+ auto MF = llvm::make_unique<yaml::MachineFunction>();
+ yaml::yamlize(In, *MF, false);
if (In.error())
return true;
- // TODO: Initialize the real machine function with the state in the yaml
- // machine function later on.
+ auto FunctionName = MF->Name;
+ if (Functions.find(FunctionName) != Functions.end())
+ return error(Twine("redefinition of machine function '") + FunctionName +
+ "'");
+ Functions.insert(std::make_pair(FunctionName, std::move(MF)));
+ if (NoLLVMIR)
+ createDummyFunction(FunctionName, M);
+ else if (!M.getFunction(FunctionName))
+ return error(Twine("function '") + FunctionName +
+ "' isn't defined in the provided LLVM IR");
+ return false;
+}
+
+void MIRParserImpl::createDummyFunction(StringRef Name, Module &M) {
+ auto &Context = M.getContext();
+ Function *F = cast<Function>(M.getOrInsertFunction(
+ Name, FunctionType::get(Type::getVoidTy(Context), false)));
+ BasicBlock *BB = BasicBlock::Create(Context, "entry", F);
+ new UnreachableInst(Context, BB);
+}
+
+bool MIRParserImpl::initializeMachineFunction(MachineFunction &MF) {
+ auto It = Functions.find(MF.getName());
+ if (It == Functions.end())
+ return error(Twine("no machine function information for function '") +
+ MF.getName() + "' in the MIR file");
+ // TODO: Recreate the machine function.
+ const yaml::MachineFunction &YamlMF = *It->getValue();
+ if (YamlMF.Alignment)
+ MF.setAlignment(YamlMF.Alignment);
+ MF.setExposesReturnsTwice(YamlMF.ExposesReturnsTwice);
+ MF.setHasInlineAsm(YamlMF.HasInlineAsm);
+ const auto &F = *MF.getFunction();
+ for (const auto &YamlMBB : YamlMF.BasicBlocks) {
+ const BasicBlock *BB = nullptr;
+ if (!YamlMBB.Name.empty()) {
+ BB = dyn_cast_or_null<BasicBlock>(
+ F.getValueSymbolTable().lookup(YamlMBB.Name));
+ if (!BB)
+ return error(Twine("basic block '") + YamlMBB.Name +
+ "' is not defined in the function '" + MF.getName() + "'");
+ }
+ auto *MBB = MF.CreateMachineBasicBlock(BB);
+ MF.insert(MF.end(), MBB);
+ if (initializeMachineBasicBlock(*MBB, YamlMBB))
+ return true;
+ }
+ return false;
+}
+
+bool MIRParserImpl::initializeMachineBasicBlock(
+ MachineBasicBlock &MBB, const yaml::MachineBasicBlock &YamlMBB) {
+ MBB.setAlignment(YamlMBB.Alignment);
+ if (YamlMBB.AddressTaken)
+ MBB.setHasAddressTaken();
+ MBB.setIsLandingPad(YamlMBB.IsLandingPad);
return false;
}
@@ -150,22 +264,33 @@ SMDiagnostic MIRParserImpl::diagFromLLVMAssemblyDiag(const SMDiagnostic &Error,
Error.getFixIts());
}
-std::unique_ptr<Module> llvm::parseMIRFile(StringRef Filename,
- SMDiagnostic &Error,
- LLVMContext &Context) {
+MIRParser::MIRParser(std::unique_ptr<MIRParserImpl> Impl)
+ : Impl(std::move(Impl)) {}
+
+MIRParser::~MIRParser() {}
+
+std::unique_ptr<Module> MIRParser::parseLLVMModule() { return Impl->parse(); }
+
+bool MIRParser::initializeMachineFunction(MachineFunction &MF) {
+ return Impl->initializeMachineFunction(MF);
+}
+
+std::unique_ptr<MIRParser> llvm::createMIRParserFromFile(StringRef Filename,
+ SMDiagnostic &Error,
+ LLVMContext &Context) {
auto FileOrErr = MemoryBuffer::getFile(Filename);
if (std::error_code EC = FileOrErr.getError()) {
Error = SMDiagnostic(Filename, SourceMgr::DK_Error,
"Could not open input file: " + EC.message());
- return std::unique_ptr<Module>();
+ return nullptr;
}
- return parseMIR(std::move(FileOrErr.get()), Error, Context);
+ return createMIRParser(std::move(FileOrErr.get()), Context);
}
-std::unique_ptr<Module> llvm::parseMIR(std::unique_ptr<MemoryBuffer> Contents,
- SMDiagnostic &Error,
- LLVMContext &Context) {
+std::unique_ptr<MIRParser>
+llvm::createMIRParser(std::unique_ptr<MemoryBuffer> Contents,
+ LLVMContext &Context) {
auto Filename = Contents->getBufferIdentifier();
- MIRParserImpl Parser(std::move(Contents), Filename, Context);
- return Parser.parse(Error);
+ return llvm::make_unique<MIRParser>(
+ llvm::make_unique<MIRParserImpl>(std::move(Contents), Filename, Context));
}
diff --git a/lib/CodeGen/MIRPrinter.cpp b/lib/CodeGen/MIRPrinter.cpp
new file mode 100644
index 000000000000..bbf163a759ef
--- /dev/null
+++ b/lib/CodeGen/MIRPrinter.cpp
@@ -0,0 +1,96 @@
+//===- MIRPrinter.cpp - MIR serialization format printer ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the class that prints out the LLVM IR and machine
+// functions using the MIR serialization format.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MIRPrinter.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MIRYamlMapping.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/YAMLTraits.h"
+
+using namespace llvm;
+
+namespace {
+
+/// This class prints out the machine functions using the MIR serialization
+/// format.
+class MIRPrinter {
+ raw_ostream &OS;
+
+public:
+ MIRPrinter(raw_ostream &OS) : OS(OS) {}
+
+ void print(const MachineFunction &MF);
+
+ void convert(yaml::MachineBasicBlock &YamlMBB, const MachineBasicBlock &MBB);
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace yaml {
+
+/// This struct serializes the LLVM IR module.
+template <> struct BlockScalarTraits<Module> {
+ static void output(const Module &Mod, void *Ctxt, raw_ostream &OS) {
+ Mod.print(OS, nullptr);
+ }
+ static StringRef input(StringRef Str, void *Ctxt, Module &Mod) {
+ llvm_unreachable("LLVM Module is supposed to be parsed separately");
+ return "";
+ }
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+void MIRPrinter::print(const MachineFunction &MF) {
+ yaml::MachineFunction YamlMF;
+ YamlMF.Name = MF.getName();
+ YamlMF.Alignment = MF.getAlignment();
+ YamlMF.ExposesReturnsTwice = MF.exposesReturnsTwice();
+ YamlMF.HasInlineAsm = MF.hasInlineAsm();
+ for (const auto &MBB : MF) {
+ yaml::MachineBasicBlock YamlMBB;
+ convert(YamlMBB, MBB);
+ YamlMF.BasicBlocks.push_back(YamlMBB);
+ }
+ yaml::Output Out(OS);
+ Out << YamlMF;
+}
+
+void MIRPrinter::convert(yaml::MachineBasicBlock &YamlMBB,
+ const MachineBasicBlock &MBB) {
+ // TODO: Serialize unnamed BB references.
+ if (const auto *BB = MBB.getBasicBlock())
+ YamlMBB.Name = BB->hasName() ? BB->getName() : "<unnamed bb>";
+ else
+ YamlMBB.Name = "";
+ YamlMBB.Alignment = MBB.getAlignment();
+ YamlMBB.AddressTaken = MBB.hasAddressTaken();
+ YamlMBB.IsLandingPad = MBB.isLandingPad();
+}
+
+void llvm::printMIR(raw_ostream &OS, const Module &M) {
+ yaml::Output Out(OS);
+ Out << const_cast<Module &>(M);
+}
+
+void llvm::printMIR(raw_ostream &OS, const MachineFunction &MF) {
+ MIRPrinter Printer(OS);
+ Printer.print(MF);
+}
diff --git a/lib/CodeGen/MIRPrinter.h b/lib/CodeGen/MIRPrinter.h
new file mode 100644
index 000000000000..16aa9038b6b2
--- /dev/null
+++ b/lib/CodeGen/MIRPrinter.h
@@ -0,0 +1,33 @@
+//===- MIRPrinter.h - MIR serialization format printer --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the functions that print out the LLVM IR and the machine
+// functions using the MIR serialization format.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_CODEGEN_MIRPRINTER_H
+#define LLVM_LIB_CODEGEN_MIRPRINTER_H
+
+namespace llvm {
+
+class MachineFunction;
+class Module;
+class raw_ostream;
+
+/// Print LLVM IR using the MIR serialization format to the given output stream.
+void printMIR(raw_ostream &OS, const Module &M);
+
+/// Print a machine function using the MIR serialization format to the given
+/// output stream.
+void printMIR(raw_ostream &OS, const MachineFunction &MF);
+
+} // end namespace llvm
+
+#endif
diff --git a/lib/CodeGen/MIRPrintingPass.cpp b/lib/CodeGen/MIRPrintingPass.cpp
index 5e0f4cdcbfde..13d61e65d7e0 100644
--- a/lib/CodeGen/MIRPrintingPass.cpp
+++ b/lib/CodeGen/MIRPrintingPass.cpp
@@ -12,54 +12,17 @@
//
//===----------------------------------------------------------------------===//
+#include "MIRPrinter.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MIRYamlMapping.h"
-#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/YAMLTraits.h"
using namespace llvm;
-namespace llvm {
-namespace yaml {
-
-/// This struct serializes the LLVM IR module.
-template <> struct BlockScalarTraits<Module> {
- static void output(const Module &Mod, void *Ctxt, raw_ostream &OS) {
- Mod.print(OS, nullptr);
- }
- static StringRef input(StringRef Str, void *Ctxt, Module &Mod) {
- llvm_unreachable("LLVM Module is supposed to be parsed separately");
- return "";
- }
-};
-
-} // end namespace yaml
-} // end namespace llvm
-
namespace {
-/// This class prints out the machine functions using the MIR serialization
-/// format.
-class MIRPrinter {
- raw_ostream &OS;
-
-public:
- MIRPrinter(raw_ostream &OS) : OS(OS) {}
-
- void print(const MachineFunction &MF);
-};
-
-void MIRPrinter::print(const MachineFunction &MF) {
- yaml::MachineFunction YamlMF;
- YamlMF.Name = MF.getName();
- yaml::Output Out(OS);
- Out << YamlMF;
-}
-
/// This pass prints out the LLVM IR to an output stream using the MIR
/// serialization format.
struct MIRPrintingPass : public MachineFunctionPass {
@@ -80,14 +43,13 @@ struct MIRPrintingPass : public MachineFunctionPass {
virtual bool runOnMachineFunction(MachineFunction &MF) override {
std::string Str;
raw_string_ostream StrOS(Str);
- MIRPrinter(StrOS).print(MF);
+ printMIR(StrOS, MF);
MachineFunctions.append(StrOS.str());
return false;
}
virtual bool doFinalization(Module &M) override {
- yaml::Output Out(OS);
- Out << M;
+ printMIR(OS, M);
OS << MachineFunctions;
return false;
}
diff --git a/lib/CodeGen/MachineBlockPlacement.cpp b/lib/CodeGen/MachineBlockPlacement.cpp
index 2969bad4ff98..141990bbe87d 100644
--- a/lib/CodeGen/MachineBlockPlacement.cpp
+++ b/lib/CodeGen/MachineBlockPlacement.cpp
@@ -179,7 +179,7 @@ public:
/// in-loop predecessors of this chain.
unsigned LoopPredecessors;
};
-}
+} // namespace
namespace {
class MachineBlockPlacement : public MachineFunctionPass {
@@ -267,7 +267,7 @@ public:
MachineFunctionPass::getAnalysisUsage(AU);
}
};
-}
+} // namespace
char MachineBlockPlacement::ID = 0;
char &llvm::MachineBlockPlacementID = MachineBlockPlacement::ID;
@@ -1185,7 +1185,7 @@ public:
MachineFunctionPass::getAnalysisUsage(AU);
}
};
-}
+} // namespace
char MachineBlockPlacementStats::ID = 0;
char &llvm::MachineBlockPlacementStatsID = MachineBlockPlacementStats::ID;
diff --git a/lib/CodeGen/MachineCombiner.cpp b/lib/CodeGen/MachineCombiner.cpp
index a4bc77edb753..5019e8eef19b 100644
--- a/lib/CodeGen/MachineCombiner.cpp
+++ b/lib/CodeGen/MachineCombiner.cpp
@@ -78,7 +78,7 @@ private:
void instr2instrSC(SmallVectorImpl<MachineInstr *> &Instrs,
SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC);
};
-}
+} // namespace
char MachineCombiner::ID = 0;
char &llvm::MachineCombinerID = MachineCombiner::ID;
@@ -223,14 +223,14 @@ bool MachineCombiner::preservesCriticalPathLen(
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) {
assert(TSchedModel.hasInstrSchedModel() && "Missing machine model\n");
- // NewRoot is the last instruction in the \p InsInstrs vector
- // Get depth and latency of NewRoot
+ // NewRoot is the last instruction in the \p InsInstrs vector.
+ // Get depth and latency of NewRoot.
unsigned NewRootIdx = InsInstrs.size() - 1;
MachineInstr *NewRoot = InsInstrs[NewRootIdx];
unsigned NewRootDepth = getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace);
unsigned NewRootLatency = getLatency(Root, NewRoot, BlockTrace);
- // Get depth, latency and slack of Root
+ // Get depth, latency and slack of Root.
unsigned RootDepth = BlockTrace.getInstrCycles(Root).Depth;
unsigned RootLatency = TSchedModel.computeInstrLatency(Root);
unsigned RootSlack = BlockTrace.getInstrSlack(Root);
@@ -245,7 +245,7 @@ bool MachineCombiner::preservesCriticalPathLen(
dbgs() << " RootDepth + RootLatency + RootSlack "
<< RootDepth + RootLatency + RootSlack << "\n";);
- /// True when the new sequence does not lenghten the critical path.
+ /// True when the new sequence does not lengthen the critical path.
return ((NewRootDepth + NewRootLatency) <=
(RootDepth + RootLatency + RootSlack));
}
@@ -284,7 +284,7 @@ bool MachineCombiner::preservesResourceLen(
ArrayRef<const MCSchedClassDesc *> MSCInsArr = makeArrayRef(InsInstrsSC);
ArrayRef<const MCSchedClassDesc *> MSCDelArr = makeArrayRef(DelInstrsSC);
- // Compute new resource length
+ // Compute new resource length.
unsigned ResLenAfterCombine =
BlockTrace.getResourceLength(MBBarr, MSCInsArr, MSCDelArr);
@@ -322,7 +322,7 @@ bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) {
auto &MI = *BlockIter++;
DEBUG(dbgs() << "INSTR "; MI.dump(); dbgs() << "\n";);
- SmallVector<MachineCombinerPattern::MC_PATTERN, 16> Pattern;
+ SmallVector<MachineCombinerPattern::MC_PATTERN, 16> Patterns;
// The motivating example is:
//
// MUL Other MUL_op1 MUL_op2 Other
@@ -345,11 +345,11 @@ bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) {
//
// The algorithm does not try to evaluate all patterns and pick the best.
// This is only an artificial restriction though. In practice there is
- // mostly one pattern and hasPattern() can order patterns based on an
- // internal cost heuristic.
+ // mostly one pattern, and getMachineCombinerPatterns() can order patterns
+ // based on an internal cost heuristic.
- if (TII->hasPattern(MI, Pattern)) {
- for (auto P : Pattern) {
+ if (TII->getMachineCombinerPatterns(MI, Patterns)) {
+ for (auto P : Patterns) {
SmallVector<MachineInstr *, 16> InsInstrs;
SmallVector<MachineInstr *, 16> DelInstrs;
DenseMap<unsigned, unsigned> InstrIdxForVirtReg;
@@ -373,8 +373,7 @@ bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) {
InstrIdxForVirtReg) &&
preservesResourceLen(MBB, BlockTrace, InsInstrs, DelInstrs))) {
for (auto *InstrPtr : InsInstrs)
- MBB->insert((MachineBasicBlock::iterator) & MI,
- (MachineInstr *)InstrPtr);
+ MBB->insert((MachineBasicBlock::iterator) &MI, InstrPtr);
for (auto *InstrPtr : DelInstrs)
InstrPtr->eraseFromParentAndMarkDBGValuesForRemoval();
@@ -383,15 +382,14 @@ bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) {
Traces->invalidate(MBB);
Traces->verifyAnalysis();
- // Eagerly stop after the first pattern fired
+ // Eagerly stop after the first pattern fires.
break;
} else {
// Cleanup instructions of the alternative code sequence. There is no
// use for them.
- for (auto *InstrPtr : InsInstrs) {
- MachineFunction *MF = MBB->getParent();
- MF->DeleteMachineInstr((MachineInstr *)InstrPtr);
- }
+ MachineFunction *MF = MBB->getParent();
+ for (auto *InstrPtr : InsInstrs)
+ MF->DeleteMachineInstr(InstrPtr);
}
InstrIdxForVirtReg.clear();
}
diff --git a/lib/CodeGen/MachineCopyPropagation.cpp b/lib/CodeGen/MachineCopyPropagation.cpp
index a6863412132b..ec171b0cae0c 100644
--- a/lib/CodeGen/MachineCopyPropagation.cpp
+++ b/lib/CodeGen/MachineCopyPropagation.cpp
@@ -55,7 +55,7 @@ namespace {
DenseMap<unsigned, MachineInstr*> &AvailCopyMap);
bool CopyPropagateBlock(MachineBasicBlock &MBB);
};
-}
+} // namespace
char MachineCopyPropagation::ID = 0;
char &llvm::MachineCopyPropagationID = MachineCopyPropagation::ID;
diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp
index 09662b6e48d3..67b9d77697e9 100644
--- a/lib/CodeGen/MachineFunction.cpp
+++ b/lib/CodeGen/MachineFunction.cpp
@@ -19,6 +19,7 @@
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunctionInitializer.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
@@ -41,11 +42,13 @@ using namespace llvm;
#define DEBUG_TYPE "codegen"
+void MachineFunctionInitializer::anchor() {}
+
//===----------------------------------------------------------------------===//
// MachineFunction implementation
//===----------------------------------------------------------------------===//
-// Out of line virtual method.
+// Out-of-line virtual method.
MachineFunctionInfo::~MachineFunctionInfo() {}
void ilist_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) {
@@ -114,8 +117,8 @@ MachineFunction::~MachineFunction() {
}
}
-/// getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it
-/// does already exist, allocate one.
+/// Get the JumpTableInfo for this function.
+/// If it does not already exist, allocate one.
MachineJumpTableInfo *MachineFunction::
getOrCreateJumpTableInfo(unsigned EntryKind) {
if (JumpTableInfo) return JumpTableInfo;
@@ -130,11 +133,10 @@ bool MachineFunction::shouldSplitStack() {
return getFunction()->hasFnAttribute("split-stack");
}
-/// RenumberBlocks - This discards all of the MachineBasicBlock numbers and
-/// recomputes them. This guarantees that the MBB numbers are sequential,
-/// dense, and match the ordering of the blocks within the function. If a
-/// specific MachineBasicBlock is specified, only that block and those after
-/// it are renumbered.
+/// This discards all of the MachineBasicBlock numbers and recomputes them.
+/// This guarantees that the MBB numbers are sequential, dense, and match the
+/// ordering of the blocks within the function. If a specific MachineBasicBlock
+/// is specified, only that block and those after it are renumbered.
void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
if (empty()) { MBBNumbering.clear(); return; }
MachineFunction::iterator MBBI, E = end();
@@ -172,9 +174,7 @@ void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
MBBNumbering.resize(BlockNo);
}
-/// CreateMachineInstr - Allocate a new MachineInstr. Use this instead
-/// of `new MachineInstr'.
-///
+/// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
MachineInstr *
MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
DebugLoc DL, bool NoImp) {
@@ -182,17 +182,15 @@ MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
MachineInstr(*this, MCID, DL, NoImp);
}
-/// CloneMachineInstr - Create a new MachineInstr which is a copy of the
-/// 'Orig' instruction, identical in all ways except the instruction
-/// has no parent, prev, or next.
-///
+/// Create a new MachineInstr which is a copy of the 'Orig' instruction,
+/// identical in all ways except the instruction has no parent, prev, or next.
MachineInstr *
MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
MachineInstr(*this, *Orig);
}
-/// DeleteMachineInstr - Delete the given MachineInstr.
+/// Delete the given MachineInstr.
///
/// This function also serves as the MachineInstr destructor - the real
/// ~MachineInstr() destructor must be empty.
@@ -208,17 +206,15 @@ MachineFunction::DeleteMachineInstr(MachineInstr *MI) {
InstructionRecycler.Deallocate(Allocator, MI);
}
-/// CreateMachineBasicBlock - Allocate a new MachineBasicBlock. Use this
-/// instead of `new MachineBasicBlock'.
-///
+/// Allocate a new MachineBasicBlock. Use this instead of
+/// `new MachineBasicBlock'.
MachineBasicBlock *
MachineFunction::CreateMachineBasicBlock(const BasicBlock *bb) {
return new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
MachineBasicBlock(*this, bb);
}
-/// DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
-///
+/// Delete the given MachineBasicBlock.
void
MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
assert(MBB->getParent() == this && "MBB parent mismatch!");
@@ -408,7 +404,7 @@ namespace llvm {
return OutStr;
}
};
-}
+} // namespace llvm
void MachineFunction::viewCFG() const
{
@@ -430,7 +426,7 @@ void MachineFunction::viewCFGOnly() const
#endif // NDEBUG
}
-/// addLiveIn - Add the specified physical register as a live-in value and
+/// Add the specified physical register as a live-in value and
/// create a corresponding virtual register for it.
unsigned MachineFunction::addLiveIn(unsigned PReg,
const TargetRegisterClass *RC) {
@@ -454,7 +450,7 @@ unsigned MachineFunction::addLiveIn(unsigned PReg,
return VReg;
}
-/// getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
+/// Return the MCSymbol for the specified non-empty jump table.
/// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
/// normal 'L' label is returned.
MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
@@ -471,8 +467,7 @@ MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
return Ctx.getOrCreateSymbol(Name);
}
-/// getPICBaseSymbol - Return a function-local symbol to represent the PIC
-/// base.
+/// Return a function-local symbol to represent the PIC base.
MCSymbol *MachineFunction::getPICBaseSymbol() const {
const DataLayout *DL = getTarget().getDataLayout();
return Ctx.getOrCreateSymbol(Twine(DL->getPrivateGlobalPrefix())+
@@ -483,8 +478,7 @@ MCSymbol *MachineFunction::getPICBaseSymbol() const {
// MachineFrameInfo implementation
//===----------------------------------------------------------------------===//
-/// ensureMaxAlignment - Make sure the function is at least Align bytes
-/// aligned.
+/// Make sure the function is at least Align bytes aligned.
void MachineFrameInfo::ensureMaxAlignment(unsigned Align) {
if (!StackRealignable || !RealignOption)
assert(Align <= StackAlignment &&
@@ -492,7 +486,7 @@ void MachineFrameInfo::ensureMaxAlignment(unsigned Align) {
if (MaxAlignment < Align) MaxAlignment = Align;
}
-/// clampStackAlignment - Clamp the alignment if requested and emit a warning.
+/// Clamp the alignment if requested and emit a warning.
static inline unsigned clampStackAlignment(bool ShouldClamp, unsigned Align,
unsigned StackAlign) {
if (!ShouldClamp || Align <= StackAlign)
@@ -503,9 +497,8 @@ static inline unsigned clampStackAlignment(bool ShouldClamp, unsigned Align,
return StackAlign;
}
-/// CreateStackObject - Create a new statically sized stack object, returning
-/// a nonnegative identifier to represent it.
-///
+/// Create a new statically sized stack object, returning a nonnegative
+/// identifier to represent it.
int MachineFrameInfo::CreateStackObject(uint64_t Size, unsigned Alignment,
bool isSS, const AllocaInst *Alloca) {
assert(Size != 0 && "Cannot allocate zero size stack objects!");
@@ -519,10 +512,8 @@ int MachineFrameInfo::CreateStackObject(uint64_t Size, unsigned Alignment,
return Index;
}
-/// CreateSpillStackObject - Create a new statically sized stack object that
-/// represents a spill slot, returning a nonnegative identifier to represent
-/// it.
-///
+/// Create a new statically sized stack object that represents a spill slot,
+/// returning a nonnegative identifier to represent it.
int MachineFrameInfo::CreateSpillStackObject(uint64_t Size,
unsigned Alignment) {
Alignment = clampStackAlignment(!StackRealignable || !RealignOption,
@@ -533,11 +524,9 @@ int MachineFrameInfo::CreateSpillStackObject(uint64_t Size,
return Index;
}
-/// CreateVariableSizedObject - Notify the MachineFrameInfo object that a
-/// variable sized object has been created. This must be created whenever a
-/// variable sized object is created, whether or not the index returned is
-/// actually used.
-///
+/// Notify the MachineFrameInfo object that a variable sized object has been
+/// created. This must be created whenever a variable sized object is created,
+/// whether or not the index returned is actually used.
int MachineFrameInfo::CreateVariableSizedObject(unsigned Alignment,
const AllocaInst *Alloca) {
HasVarSizedObjects = true;
@@ -548,11 +537,10 @@ int MachineFrameInfo::CreateVariableSizedObject(unsigned Alignment,
return (int)Objects.size()-NumFixedObjects-1;
}
-/// CreateFixedObject - Create a new object at a fixed location on the stack.
+/// Create a new object at a fixed location on the stack.
/// All fixed objects should be created before other objects are created for
/// efficiency. By default, fixed objects are immutable. This returns an
/// index with a negative value.
-///
int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
bool Immutable, bool isAliased) {
assert(Size != 0 && "Cannot allocate zero size fixed stack objects!");
@@ -569,8 +557,8 @@ int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
return -++NumFixedObjects;
}
-/// CreateFixedSpillStackObject - Create a spill slot at a fixed location
-/// on the stack. Returns an index with a negative value.
+/// Create a spill slot at a fixed location on the stack.
+/// Returns an index with a negative value.
int MachineFrameInfo::CreateFixedSpillStackObject(uint64_t Size,
int64_t SPOffset) {
unsigned Align = MinAlign(SPOffset, StackAlignment);
@@ -700,7 +688,7 @@ void MachineFrameInfo::dump(const MachineFunction &MF) const {
// MachineJumpTableInfo implementation
//===----------------------------------------------------------------------===//
-/// getEntrySize - Return the size of each entry in the jump table.
+/// Return the size of each entry in the jump table.
unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
// The size of a jump table entry is 4 bytes unless the entry is just the
// address of a block, in which case it is the pointer size.
@@ -719,7 +707,7 @@ unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
llvm_unreachable("Unknown jump table encoding!");
}
-/// getEntryAlignment - Return the alignment of each entry in the jump table.
+/// Return the alignment of each entry in the jump table.
unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
// The alignment of a jump table entry is the alignment of int32 unless the
// entry is just the address of a block, in which case it is the pointer
@@ -739,8 +727,7 @@ unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
llvm_unreachable("Unknown jump table encoding!");
}
-/// createJumpTableIndex - Create a new jump table entry in the jump table info.
-///
+/// Create a new jump table entry in the jump table info.
unsigned MachineJumpTableInfo::createJumpTableIndex(
const std::vector<MachineBasicBlock*> &DestBBs) {
assert(!DestBBs.empty() && "Cannot create an empty jump table!");
@@ -748,8 +735,8 @@ unsigned MachineJumpTableInfo::createJumpTableIndex(
return JumpTables.size()-1;
}
-/// ReplaceMBBInJumpTables - If Old is the target of any jump tables, update
-/// the jump tables to branch to New instead.
+/// If Old is the target of any jump tables, update the jump tables to branch
+/// to New instead.
bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old,
MachineBasicBlock *New) {
assert(Old != New && "Not making a change?");
@@ -759,8 +746,8 @@ bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old,
return MadeChange;
}
-/// ReplaceMBBInJumpTable - If Old is a target of the jump tables, update
-/// the jump table to branch to New instead.
+/// If Old is a target of the jump tables, update the jump table to branch to
+/// New instead.
bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx,
MachineBasicBlock *Old,
MachineBasicBlock *New) {
@@ -858,8 +845,8 @@ MachineConstantPool::~MachineConstantPool() {
delete *I;
}
-/// CanShareConstantPoolEntry - Test whether the given two constants
-/// can be allocated the same constant pool entry.
+/// Test whether the given two constants can be allocated the same constant pool
+/// entry.
static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
const DataLayout *TD) {
// Handle the trivial case quickly.
@@ -901,10 +888,8 @@ static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
return A == B;
}
-/// getConstantPoolIndex - Create a new entry in the constant pool or return
-/// an existing one. User must specify the log2 of the minimum required
-/// alignment for the object.
-///
+/// Create a new entry in the constant pool or return an existing one.
+/// User must specify the log2 of the minimum required alignment for the object.
unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
unsigned Alignment) {
assert(Alignment && "Alignment must be specified!");
diff --git a/lib/CodeGen/MachineFunctionAnalysis.cpp b/lib/CodeGen/MachineFunctionAnalysis.cpp
index f6f34ba9d927..338cd1e22032 100644
--- a/lib/CodeGen/MachineFunctionAnalysis.cpp
+++ b/lib/CodeGen/MachineFunctionAnalysis.cpp
@@ -15,12 +15,14 @@
#include "llvm/CodeGen/GCMetadata.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineFunctionInitializer.h"
using namespace llvm;
char MachineFunctionAnalysis::ID = 0;
-MachineFunctionAnalysis::MachineFunctionAnalysis(const TargetMachine &tm) :
- FunctionPass(ID), TM(tm), MF(nullptr) {
+MachineFunctionAnalysis::MachineFunctionAnalysis(
+ const TargetMachine &tm, MachineFunctionInitializer *MFInitializer)
+ : FunctionPass(ID), TM(tm), MF(nullptr), MFInitializer(MFInitializer) {
initializeMachineModuleInfoPass(*PassRegistry::getPassRegistry());
}
@@ -47,6 +49,8 @@ bool MachineFunctionAnalysis::runOnFunction(Function &F) {
assert(!MF && "MachineFunctionAnalysis already initialized!");
MF = new MachineFunction(&F, TM, NextFnNum++,
getAnalysis<MachineModuleInfo>());
+ if (MFInitializer)
+ MFInitializer->initializeMachineFunction(*MF);
return false;
}
diff --git a/lib/CodeGen/MachineFunctionPrinterPass.cpp b/lib/CodeGen/MachineFunctionPrinterPass.cpp
index 790f5accdb26..57b7230e6cd5 100644
--- a/lib/CodeGen/MachineFunctionPrinterPass.cpp
+++ b/lib/CodeGen/MachineFunctionPrinterPass.cpp
@@ -49,7 +49,7 @@ struct MachineFunctionPrinterPass : public MachineFunctionPass {
};
char MachineFunctionPrinterPass::ID = 0;
-}
+} // namespace
char &llvm::MachineFunctionPrinterPassID = MachineFunctionPrinterPass::ID;
INITIALIZE_PASS(MachineFunctionPrinterPass, "machineinstr-printer",
diff --git a/lib/CodeGen/MachineInstr.cpp b/lib/CodeGen/MachineInstr.cpp
index e67102865bfa..19ba5cfd34b0 100644
--- a/lib/CodeGen/MachineInstr.cpp
+++ b/lib/CodeGen/MachineInstr.cpp
@@ -1450,9 +1450,9 @@ bool MachineInstr::isInvariantLoad(AliasAnalysis *AA) const {
if (const Value *V = (*I)->getValue()) {
// If we have an AliasAnalysis, ask it whether the memory is constant.
- if (AA && AA->pointsToConstantMemory(
- AliasAnalysis::Location(V, (*I)->getSize(),
- (*I)->getAAInfo())))
+ if (AA &&
+ AA->pointsToConstantMemory(
+ MemoryLocation(V, (*I)->getSize(), (*I)->getAAInfo())))
continue;
}
diff --git a/lib/CodeGen/MachineLICM.cpp b/lib/CodeGen/MachineLICM.cpp
index cce590c6dc5b..e9ea5ed9648c 100644
--- a/lib/CodeGen/MachineLICM.cpp
+++ b/lib/CodeGen/MachineLICM.cpp
@@ -27,7 +27,7 @@
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -74,7 +74,7 @@ namespace {
const TargetRegisterInfo *TRI;
const MachineFrameInfo *MFI;
MachineRegisterInfo *MRI;
- const InstrItineraryData *InstrItins;
+ TargetSchedModel SchedModel;
bool PreRegAlloc;
// Various analyses that we use...
@@ -338,12 +338,13 @@ bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
return false;
Changed = FirstInLoop = false;
- TII = MF.getSubtarget().getInstrInfo();
- TLI = MF.getSubtarget().getTargetLowering();
- TRI = MF.getSubtarget().getRegisterInfo();
+ const TargetSubtargetInfo &ST = MF.getSubtarget();
+ TII = ST.getInstrInfo();
+ TLI = ST.getTargetLowering();
+ TRI = ST.getRegisterInfo();
MFI = MF.getFrameInfo();
MRI = &MF.getRegInfo();
- InstrItins = MF.getSubtarget().getInstrItineraryData();
+ SchedModel.init(ST.getSchedModel(), &ST, TII);
PreRegAlloc = MRI->isSSA();
@@ -1046,7 +1047,7 @@ bool MachineLICM::HasLoopPHIUse(const MachineInstr *MI) const {
/// it 'high'.
bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
unsigned DefIdx, unsigned Reg) const {
- if (!InstrItins || InstrItins->isEmpty() || MRI->use_nodbg_empty(Reg))
+ if (MRI->use_nodbg_empty(Reg))
return false;
for (MachineInstr &UseMI : MRI->use_nodbg_instructions(Reg)) {
@@ -1062,7 +1063,7 @@ bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
if (MOReg != Reg)
continue;
- if (TII->hasHighOperandLatency(InstrItins, MRI, &MI, DefIdx, &UseMI, i))
+ if (TII->hasHighOperandLatency(SchedModel, MRI, &MI, DefIdx, &UseMI, i))
return true;
}
@@ -1078,8 +1079,6 @@ bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const {
if (TII->isAsCheapAsAMove(&MI) || MI.isCopyLike())
return true;
- if (!InstrItins || InstrItins->isEmpty())
- return false;
bool isCheap = false;
unsigned NumDefs = MI.getDesc().getNumDefs();
@@ -1092,7 +1091,7 @@ bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const {
if (TargetRegisterInfo::isPhysicalRegister(Reg))
continue;
- if (!TII->hasLowDefLatency(InstrItins, &MI, i))
+ if (!TII->hasLowDefLatency(SchedModel, &MI, i))
return false;
isCheap = true;
}
diff --git a/lib/CodeGen/MachineModuleInfo.cpp b/lib/CodeGen/MachineModuleInfo.cpp
index eec984f53b90..a303426b420c 100644
--- a/lib/CodeGen/MachineModuleInfo.cpp
+++ b/lib/CodeGen/MachineModuleInfo.cpp
@@ -97,7 +97,7 @@ public:
void UpdateForDeletedBlock(BasicBlock *BB);
void UpdateForRAUWBlock(BasicBlock *Old, BasicBlock *New);
};
-}
+} // namespace llvm
MCSymbol *MMIAddrLabelMap::getAddrLabelSymbol(BasicBlock *BB) {
assert(BB->hasAddressTaken() &&
@@ -318,23 +318,6 @@ void MachineModuleInfo::EndFunction() {
VariableDbgInfos.clear();
}
-/// AnalyzeModule - Scan the module for global debug information.
-///
-void MachineModuleInfo::AnalyzeModule(const Module &M) {
- // Insert functions in the llvm.used array (but not llvm.compiler.used) into
- // UsedFunctions.
- const GlobalVariable *GV = M.getGlobalVariable("llvm.used");
- if (!GV || !GV->hasInitializer()) return;
-
- // Should be an array of 'i8*'.
- const ConstantArray *InitList = cast<ConstantArray>(GV->getInitializer());
-
- for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i)
- if (const Function *F =
- dyn_cast<Function>(InitList->getOperand(i)->stripPointerCasts()))
- UsedFunctions.insert(F);
-}
-
//===- Address of Block Management ----------------------------------------===//
diff --git a/lib/CodeGen/MachineSSAUpdater.cpp b/lib/CodeGen/MachineSSAUpdater.cpp
index 71a6ebaba243..fd1bf31aa3e5 100644
--- a/lib/CodeGen/MachineSSAUpdater.cpp
+++ b/lib/CodeGen/MachineSSAUpdater.cpp
@@ -340,7 +340,7 @@ public:
}
};
-} // End llvm namespace
+} // namespace llvm
/// GetValueAtEndOfBlockInternal - Check to see if AvailableVals has an entry
/// for the specified BB and if so, return it. If not, construct SSA form by
diff --git a/lib/CodeGen/MachineScheduler.cpp b/lib/CodeGen/MachineScheduler.cpp
index 44107d6ad16b..dd7654b1e556 100644
--- a/lib/CodeGen/MachineScheduler.cpp
+++ b/lib/CodeGen/MachineScheduler.cpp
@@ -347,7 +347,7 @@ bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
if (skipOptnoneFunction(*mf.getFunction()))
return false;
- if (!mf.getSubtarget().enablePostMachineScheduler()) {
+ if (!mf.getSubtarget().enablePostRAScheduler()) {
DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
return false;
}
@@ -1262,7 +1262,7 @@ public:
protected:
void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG);
};
-} // anonymous
+} // namespace
void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads,
ScheduleDAGMI *DAG) {
@@ -1271,7 +1271,7 @@ void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads,
SUnit *SU = Loads[Idx];
unsigned BaseReg;
unsigned Offset;
- if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
+ if (TII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset));
}
if (LoadRecords.size() < 2)
@@ -1355,7 +1355,7 @@ public:
void apply(ScheduleDAGMI *DAG) override;
};
-} // anonymous
+} // namespace
/// \brief Callback from DAG postProcessing to create cluster edges to encourage
/// fused operations.
@@ -1407,7 +1407,7 @@ public:
protected:
void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
};
-} // anonymous
+} // namespace
/// constrainLocalCopy handles two possibilities:
/// 1) Local src:
@@ -2150,7 +2150,7 @@ void GenericSchedulerBase::setPolicy(CandPolicy &Policy,
bool IsPostRA,
SchedBoundary &CurrZone,
SchedBoundary *OtherZone) {
- // Apply preemptive heuristics based on the the total latency and resources
+ // Apply preemptive heuristics based on the total latency and resources
// inside and outside this zone. Potential stalls should be considered before
// following this policy.
diff --git a/lib/CodeGen/MachineSink.cpp b/lib/CodeGen/MachineSink.cpp
index aed0e500d441..1b9be50068a9 100644
--- a/lib/CodeGen/MachineSink.cpp
+++ b/lib/CodeGen/MachineSink.cpp
@@ -73,6 +73,9 @@ namespace {
SparseBitVector<> RegsToClearKillFlags;
+ typedef std::map<MachineBasicBlock *, SmallVector<MachineBasicBlock *, 4>>
+ AllSuccsCache;
+
public:
static char ID; // Pass identification
MachineSinking() : MachineFunctionPass(ID) {
@@ -120,18 +123,24 @@ namespace {
MachineBasicBlock *From,
MachineBasicBlock *To,
bool BreakPHIEdge);
- bool SinkInstruction(MachineInstr *MI, bool &SawStore);
+ bool SinkInstruction(MachineInstr *MI, bool &SawStore,
+ AllSuccsCache &AllSuccessors);
bool AllUsesDominatedByBlock(unsigned Reg, MachineBasicBlock *MBB,
MachineBasicBlock *DefMBB,
bool &BreakPHIEdge, bool &LocalUse) const;
MachineBasicBlock *FindSuccToSinkTo(MachineInstr *MI, MachineBasicBlock *MBB,
- bool &BreakPHIEdge);
+ bool &BreakPHIEdge, AllSuccsCache &AllSuccessors);
bool isProfitableToSinkTo(unsigned Reg, MachineInstr *MI,
MachineBasicBlock *MBB,
- MachineBasicBlock *SuccToSinkTo);
+ MachineBasicBlock *SuccToSinkTo,
+ AllSuccsCache &AllSuccessors);
bool PerformTrivialForwardCoalescing(MachineInstr *MI,
MachineBasicBlock *MBB);
+
+ SmallVector<MachineBasicBlock *, 4> &
+ GetAllSortedSuccessors(MachineInstr *MI, MachineBasicBlock *MBB,
+ AllSuccsCache &AllSuccessors) const;
};
} // end anonymous namespace
@@ -269,9 +278,8 @@ bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
// Process all basic blocks.
CEBCandidates.clear();
ToSplit.clear();
- for (MachineFunction::iterator I = MF.begin(), E = MF.end();
- I != E; ++I)
- MadeChange |= ProcessBlock(*I);
+ for (auto &MBB: MF)
+ MadeChange |= ProcessBlock(MBB);
// If we have anything we marked as toSplit, split it now.
for (auto &Pair : ToSplit) {
@@ -310,6 +318,9 @@ bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
bool MadeChange = false;
+ // Cache all successors, sorted by frequency info and loop depth.
+ AllSuccsCache AllSuccessors;
+
// Walk the basic block bottom-up. Remember if we saw a store.
MachineBasicBlock::iterator I = MBB.end();
--I;
@@ -332,7 +343,7 @@ bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
continue;
}
- if (SinkInstruction(MI, SawStore))
+ if (SinkInstruction(MI, SawStore, AllSuccessors))
++NumSunk, MadeChange = true;
// If we just processed the first instruction in the block, we're done.
@@ -484,7 +495,8 @@ static void collectDebugValues(MachineInstr *MI,
/// isProfitableToSinkTo - Return true if it is profitable to sink MI.
bool MachineSinking::isProfitableToSinkTo(unsigned Reg, MachineInstr *MI,
MachineBasicBlock *MBB,
- MachineBasicBlock *SuccToSinkTo) {
+ MachineBasicBlock *SuccToSinkTo,
+ AllSuccsCache &AllSuccessors) {
assert (MI && "Invalid MachineInstr!");
assert (SuccToSinkTo && "Invalid SinkTo Candidate BB");
@@ -514,18 +526,66 @@ bool MachineSinking::isProfitableToSinkTo(unsigned Reg, MachineInstr *MI,
// can further profitably sinked into another block in next round.
bool BreakPHIEdge = false;
// FIXME - If finding successor is compile time expensive then cache results.
- if (MachineBasicBlock *MBB2 = FindSuccToSinkTo(MI, SuccToSinkTo, BreakPHIEdge))
- return isProfitableToSinkTo(Reg, MI, SuccToSinkTo, MBB2);
+ if (MachineBasicBlock *MBB2 =
+ FindSuccToSinkTo(MI, SuccToSinkTo, BreakPHIEdge, AllSuccessors))
+ return isProfitableToSinkTo(Reg, MI, SuccToSinkTo, MBB2, AllSuccessors);
// If SuccToSinkTo is final destination and it is a post dominator of current
// block then it is not profitable to sink MI into SuccToSinkTo block.
return false;
}
+/// Get the sorted sequence of successors for this MachineBasicBlock, possibly
+/// computing it if it was not already cached.
+SmallVector<MachineBasicBlock *, 4> &
+MachineSinking::GetAllSortedSuccessors(MachineInstr *MI, MachineBasicBlock *MBB,
+ AllSuccsCache &AllSuccessors) const {
+
+ // Do we have the sorted successors in cache ?
+ auto Succs = AllSuccessors.find(MBB);
+ if (Succs != AllSuccessors.end())
+ return Succs->second;
+
+ SmallVector<MachineBasicBlock *, 4> AllSuccs(MBB->succ_begin(),
+ MBB->succ_end());
+
+ // Handle cases where sinking can happen but where the sink point isn't a
+ // successor. For example:
+ //
+ // x = computation
+ // if () {} else {}
+ // use x
+ //
+ const std::vector<MachineDomTreeNode *> &Children =
+ DT->getNode(MBB)->getChildren();
+ for (const auto &DTChild : Children)
+ // DomTree children of MBB that have MBB as immediate dominator are added.
+ if (DTChild->getIDom()->getBlock() == MI->getParent() &&
+ // Skip MBBs already added to the AllSuccs vector above.
+ !MBB->isSuccessor(DTChild->getBlock()))
+ AllSuccs.push_back(DTChild->getBlock());
+
+ // Sort Successors according to their loop depth or block frequency info.
+ std::stable_sort(
+ AllSuccs.begin(), AllSuccs.end(),
+ [this](const MachineBasicBlock *L, const MachineBasicBlock *R) {
+ uint64_t LHSFreq = MBFI ? MBFI->getBlockFreq(L).getFrequency() : 0;
+ uint64_t RHSFreq = MBFI ? MBFI->getBlockFreq(R).getFrequency() : 0;
+ bool HasBlockFreq = LHSFreq != 0 && RHSFreq != 0;
+ return HasBlockFreq ? LHSFreq < RHSFreq
+ : LI->getLoopDepth(L) < LI->getLoopDepth(R);
+ });
+
+ auto it = AllSuccessors.insert(std::make_pair(MBB, AllSuccs));
+
+ return it.first->second;
+}
+
/// FindSuccToSinkTo - Find a successor to sink this instruction to.
MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
MachineBasicBlock *MBB,
- bool &BreakPHIEdge) {
+ bool &BreakPHIEdge,
+ AllSuccsCache &AllSuccessors) {
assert (MI && "Invalid MachineInstr!");
assert (MBB && "Invalid MachineBasicBlock!");
@@ -579,38 +639,8 @@ MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
// we should sink to. If we have reliable block frequency information
// (frequency != 0) available, give successors with smaller frequencies
// higher priority, otherwise prioritize smaller loop depths.
- SmallVector<MachineBasicBlock*, 4> Succs(MBB->succ_begin(),
- MBB->succ_end());
-
- // Handle cases where sinking can happen but where the sink point isn't a
- // successor. For example:
- //
- // x = computation
- // if () {} else {}
- // use x
- //
- const std::vector<MachineDomTreeNode *> &Children =
- DT->getNode(MBB)->getChildren();
- for (const auto &DTChild : Children)
- // DomTree children of MBB that have MBB as immediate dominator are added.
- if (DTChild->getIDom()->getBlock() == MI->getParent() &&
- // Skip MBBs already added to the Succs vector above.
- !MBB->isSuccessor(DTChild->getBlock()))
- Succs.push_back(DTChild->getBlock());
-
- // Sort Successors according to their loop depth or block frequency info.
- std::stable_sort(
- Succs.begin(), Succs.end(),
- [this](const MachineBasicBlock *L, const MachineBasicBlock *R) {
- uint64_t LHSFreq = MBFI ? MBFI->getBlockFreq(L).getFrequency() : 0;
- uint64_t RHSFreq = MBFI ? MBFI->getBlockFreq(R).getFrequency() : 0;
- bool HasBlockFreq = LHSFreq != 0 && RHSFreq != 0;
- return HasBlockFreq ? LHSFreq < RHSFreq
- : LI->getLoopDepth(L) < LI->getLoopDepth(R);
- });
- for (SmallVectorImpl<MachineBasicBlock *>::iterator SI = Succs.begin(),
- E = Succs.end(); SI != E; ++SI) {
- MachineBasicBlock *SuccBlock = *SI;
+ for (MachineBasicBlock *SuccBlock :
+ GetAllSortedSuccessors(MI, MBB, AllSuccessors)) {
bool LocalUse = false;
if (AllUsesDominatedByBlock(Reg, SuccBlock, MBB,
BreakPHIEdge, LocalUse)) {
@@ -625,7 +655,7 @@ MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
// If we couldn't find a block to sink to, ignore this instruction.
if (!SuccToSinkTo)
return nullptr;
- if (!isProfitableToSinkTo(Reg, MI, MBB, SuccToSinkTo))
+ if (!isProfitableToSinkTo(Reg, MI, MBB, SuccToSinkTo, AllSuccessors))
return nullptr;
}
}
@@ -645,7 +675,8 @@ MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
/// SinkInstruction - Determine whether it is safe to sink the specified machine
/// instruction out of its current block into a successor.
-bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
+bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore,
+ AllSuccsCache &AllSuccessors) {
// Don't sink insert_subreg, subreg_to_reg, reg_sequence. These are meant to
// be close to the source to make it easier to coalesce.
if (AvoidsSinking(MI, MRI))
@@ -669,8 +700,8 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
bool BreakPHIEdge = false;
MachineBasicBlock *ParentBlock = MI->getParent();
- MachineBasicBlock *SuccToSinkTo = FindSuccToSinkTo(MI, ParentBlock,
- BreakPHIEdge);
+ MachineBasicBlock *SuccToSinkTo =
+ FindSuccToSinkTo(MI, ParentBlock, BreakPHIEdge, AllSuccessors);
// If there are no outputs, it must have side-effects.
if (!SuccToSinkTo)
diff --git a/lib/CodeGen/MachineTraceMetrics.cpp b/lib/CodeGen/MachineTraceMetrics.cpp
index 34ac9d5b0ed7..7704d1498be0 100644
--- a/lib/CodeGen/MachineTraceMetrics.cpp
+++ b/lib/CodeGen/MachineTraceMetrics.cpp
@@ -306,7 +306,7 @@ public:
MinInstrCountEnsemble(MachineTraceMetrics *mtm)
: MachineTraceMetrics::Ensemble(mtm) {}
};
-}
+} // namespace
// Select the preferred predecessor for MBB.
const MachineBasicBlock*
@@ -414,7 +414,7 @@ struct LoopBounds {
const MachineLoopInfo *loops)
: Blocks(blocks), Loops(loops), Downward(false) {}
};
-}
+} // namespace
// Specialize po_iterator_storage in order to prune the post-order traversal so
// it is limited to the current loop and doesn't traverse the loop back edges.
@@ -447,7 +447,7 @@ public:
return LB.Visited.insert(To).second;
}
};
-}
+} // namespace llvm
/// Compute the trace through MBB.
void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) {
@@ -619,7 +619,7 @@ struct DataDep {
assert((++DefI).atEnd() && "Register has multiple defs");
}
};
-}
+} // namespace
// Get the input data dependencies that must be ready before UseMI can issue.
// Return true if UseMI has any physreg operands.
@@ -681,7 +681,7 @@ struct LiveRegUnit {
LiveRegUnit(unsigned RU) : RegUnit(RU), Cycle(0), MI(nullptr), Op(0) {}
};
-}
+} // namespace
// Identify physreg dependencies for UseMI, and update the live regunit
// tracking set when scanning instructions downwards.
diff --git a/lib/CodeGen/MachineVerifier.cpp b/lib/CodeGen/MachineVerifier.cpp
index ca35ec5fdcf8..72a67690614c 100644
--- a/lib/CodeGen/MachineVerifier.cpp
+++ b/lib/CodeGen/MachineVerifier.cpp
@@ -258,7 +258,7 @@ namespace {
}
};
-}
+} // namespace
char MachineVerifierPass::ID = 0;
INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
@@ -1710,7 +1710,7 @@ namespace {
bool EntryIsSetup;
bool ExitIsSetup;
};
-}
+} // namespace
/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
/// by a FrameDestroy <n>, stack adjustments are identical on all
diff --git a/lib/CodeGen/OptimizePHIs.cpp b/lib/CodeGen/OptimizePHIs.cpp
index a1042e720c37..9780d75eb0d2 100644
--- a/lib/CodeGen/OptimizePHIs.cpp
+++ b/lib/CodeGen/OptimizePHIs.cpp
@@ -55,7 +55,7 @@ namespace {
bool IsDeadPHICycle(MachineInstr *MI, InstrSet &PHIsInCycle);
bool OptimizeBB(MachineBasicBlock &MBB);
};
-}
+} // namespace
char OptimizePHIs::ID = 0;
char &llvm::OptimizePHIsID = OptimizePHIs::ID;
diff --git a/lib/CodeGen/PHIElimination.cpp b/lib/CodeGen/PHIElimination.cpp
index d51419083d06..471c78a6d8ca 100644
--- a/lib/CodeGen/PHIElimination.cpp
+++ b/lib/CodeGen/PHIElimination.cpp
@@ -88,8 +88,8 @@ namespace {
// These functions are temporary abstractions around LiveVariables and
// LiveIntervals, so they can go away when LiveVariables does.
- bool isLiveIn(unsigned Reg, MachineBasicBlock *MBB);
- bool isLiveOutPastPHIs(unsigned Reg, MachineBasicBlock *MBB);
+ bool isLiveIn(unsigned Reg, const MachineBasicBlock *MBB);
+ bool isLiveOutPastPHIs(unsigned Reg, const MachineBasicBlock *MBB);
typedef std::pair<unsigned, unsigned> BBVRegPair;
typedef DenseMap<BBVRegPair, unsigned> VRegPHIUse;
@@ -104,7 +104,7 @@ namespace {
MachineInstrExpressionTrait> LoweredPHIMap;
LoweredPHIMap LoweredPHIs;
};
-}
+} // namespace
STATISTIC(NumLowered, "Number of phis lowered");
STATISTIC(NumCriticalEdgesSplit, "Number of critical edges split");
@@ -143,16 +143,16 @@ bool PHIElimination::runOnMachineFunction(MachineFunction &MF) {
// updating LiveIntervals, so we disable it.
if (!DisableEdgeSplitting && (LV || LIS)) {
MachineLoopInfo *MLI = getAnalysisIfAvailable<MachineLoopInfo>();
- for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
- Changed |= SplitPHIEdges(MF, *I, MLI);
+ for (auto &MBB : MF)
+ Changed |= SplitPHIEdges(MF, MBB, MLI);
}
// Populate VRegPHIUseCount
analyzePHINodes(MF);
// Eliminate PHI instructions by inserting copies into predecessor blocks.
- for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
- Changed |= EliminatePHINodes(MF, *I);
+ for (auto &MBB : MF)
+ Changed |= EliminatePHINodes(MF, MBB);
// Remove dead IMPLICIT_DEF instructions.
for (MachineInstr *DefMI : ImpDefs) {
@@ -623,7 +623,7 @@ bool PHIElimination::SplitPHIEdges(MachineFunction &MF,
return Changed;
}
-bool PHIElimination::isLiveIn(unsigned Reg, MachineBasicBlock *MBB) {
+bool PHIElimination::isLiveIn(unsigned Reg, const MachineBasicBlock *MBB) {
assert((LV || LIS) &&
"isLiveIn() requires either LiveVariables or LiveIntervals");
if (LIS)
@@ -632,7 +632,8 @@ bool PHIElimination::isLiveIn(unsigned Reg, MachineBasicBlock *MBB) {
return LV->isLiveIn(Reg, *MBB);
}
-bool PHIElimination::isLiveOutPastPHIs(unsigned Reg, MachineBasicBlock *MBB) {
+bool PHIElimination::isLiveOutPastPHIs(unsigned Reg,
+ const MachineBasicBlock *MBB) {
assert((LV || LIS) &&
"isLiveOutPastPHIs() requires either LiveVariables or LiveIntervals");
// LiveVariables considers uses in PHIs to be in the predecessor basic block,
@@ -642,11 +643,9 @@ bool PHIElimination::isLiveOutPastPHIs(unsigned Reg, MachineBasicBlock *MBB) {
// out of the block.
if (LIS) {
const LiveInterval &LI = LIS->getInterval(Reg);
- for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
- SE = MBB->succ_end(); SI != SE; ++SI) {
- if (LI.liveAt(LIS->getMBBStartIdx(*SI)))
+ for (const MachineBasicBlock *SI : MBB->successors())
+ if (LI.liveAt(LIS->getMBBStartIdx(SI)))
return true;
- }
return false;
} else {
return LV->isLiveOut(Reg, *MBB);
diff --git a/lib/CodeGen/Passes.cpp b/lib/CodeGen/Passes.cpp
index 4cd86e66c0e8..210a7a1649cd 100644
--- a/lib/CodeGen/Passes.cpp
+++ b/lib/CodeGen/Passes.cpp
@@ -24,6 +24,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/SymbolRewriter.h"
@@ -72,6 +73,10 @@ static cl::opt<bool> DisableCopyProp("disable-copyprop", cl::Hidden,
cl::desc("Disable Copy Propagation pass"));
static cl::opt<bool> DisablePartialLibcallInlining("disable-partial-libcall-inlining",
cl::Hidden, cl::desc("Disable Partial Libcall Inlining"));
+static cl::opt<bool> EnableImplicitNullChecks(
+ "enable-implicit-null-checks",
+ cl::desc("Fold null checks into faulting memory operations"),
+ cl::init(false));
static cl::opt<bool> PrintLSR("print-lsr-output", cl::Hidden,
cl::desc("Print LLVM IR produced by the loop-reduce pass"));
static cl::opt<bool> PrintISelInput("print-isel-input", cl::Hidden,
@@ -452,6 +457,9 @@ void TargetPassConfig::addCodeGenPrepare() {
void TargetPassConfig::addISelPrepare() {
addPreISel();
+ // Add both the safe stack and the stack protection passes: each of them will
+ // only protect functions that have corresponding attributes.
+ addPass(createSafeStackPass());
addPass(createStackProtectorPass(TM));
if (PrintISelInput)
@@ -543,6 +551,9 @@ void TargetPassConfig::addMachinePasses() {
// Run pre-sched2 passes.
addPreSched2();
+ if (EnableImplicitNullChecks)
+ addPass(&ImplicitNullChecksID);
+
// Second pass scheduler.
if (getOptLevel() != CodeGenOpt::None) {
if (MISchedPostRA)
diff --git a/lib/CodeGen/PeepholeOptimizer.cpp b/lib/CodeGen/PeepholeOptimizer.cpp
index ebe05e3f2731..71c0a64325ba 100644
--- a/lib/CodeGen/PeepholeOptimizer.cpp
+++ b/lib/CodeGen/PeepholeOptimizer.cpp
@@ -293,7 +293,7 @@ namespace {
/// register of the last source.
unsigned getReg() const { return Reg; }
};
-}
+} // namespace
char PeepholeOptimizer::ID = 0;
char &llvm::PeepholeOptimizerID = PeepholeOptimizer::ID;
diff --git a/lib/CodeGen/PostRASchedulerList.cpp b/lib/CodeGen/PostRASchedulerList.cpp
index 55f08e496de1..6760b5f95097 100644
--- a/lib/CodeGen/PostRASchedulerList.cpp
+++ b/lib/CodeGen/PostRASchedulerList.cpp
@@ -184,7 +184,7 @@ namespace {
void dumpSchedule() const;
void emitNoop(unsigned CurCycle);
};
-}
+} // namespace
char &llvm::PostRASchedulerID = PostRAScheduler::ID;
@@ -257,7 +257,7 @@ bool PostRAScheduler::enablePostRAScheduler(
TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const {
Mode = ST.getAntiDepBreakMode();
ST.getCriticalPathRCs(CriticalPathRCs);
- return ST.enablePostMachineScheduler() &&
+ return ST.enablePostRAScheduler() &&
OptLevel >= ST.getOptLevelToEnablePostRAScheduler();
}
diff --git a/lib/CodeGen/RegAllocFast.cpp b/lib/CodeGen/RegAllocFast.cpp
index fd3d4d78968b..4a466381b9db 100644
--- a/lib/CodeGen/RegAllocFast.cpp
+++ b/lib/CodeGen/RegAllocFast.cpp
@@ -194,7 +194,7 @@ namespace {
bool setPhysReg(MachineInstr *MI, unsigned OpNum, unsigned PhysReg);
};
char RAFast::ID = 0;
-}
+} // namespace
/// getStackSpaceFor - This allocates space for the specified virtual register
/// to be held on the stack.
diff --git a/lib/CodeGen/RegisterCoalescer.cpp b/lib/CodeGen/RegisterCoalescer.cpp
index e513a4f1ccf5..e2061fe1dbae 100644
--- a/lib/CodeGen/RegisterCoalescer.cpp
+++ b/lib/CodeGen/RegisterCoalescer.cpp
@@ -2633,7 +2633,8 @@ bool RegisterCoalescer::joinVirtRegs(CoalescerPair &CP) {
// "overflow bit" 32. As a workaround we drop all subregister ranges
// which means we loose some precision but are back to a well defined
// state.
- assert((CP.getNewRC()->getLaneMask() & 0x80000000u)
+ assert(TargetRegisterInfo::isImpreciseLaneMask(
+ CP.getNewRC()->getLaneMask())
&& "SubRange merge should only fail when merging into bit 32.");
DEBUG(dbgs() << "\tSubrange join aborted!\n");
LHS.clearSubRanges();
@@ -2696,7 +2697,7 @@ struct MBBPriorityInfo {
MBBPriorityInfo(MachineBasicBlock *mbb, unsigned depth, bool issplit)
: MBB(mbb), Depth(depth), IsSplit(issplit) {}
};
-}
+} // namespace
/// C-style comparator that sorts first based on the loop depth of the basic
/// block (the unsigned), and then on the MBB number.
diff --git a/lib/CodeGen/RegisterCoalescer.h b/lib/CodeGen/RegisterCoalescer.h
index 04067a1427af..4ba74417a16c 100644
--- a/lib/CodeGen/RegisterCoalescer.h
+++ b/lib/CodeGen/RegisterCoalescer.h
@@ -111,6 +111,6 @@ namespace llvm {
/// Return the register class of the coalesced register.
const TargetRegisterClass *getNewRC() const { return NewRC; }
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/RegisterScavenging.cpp b/lib/CodeGen/RegisterScavenging.cpp
index a34bd6341d22..4176686d1f7f 100644
--- a/lib/CodeGen/RegisterScavenging.cpp
+++ b/lib/CodeGen/RegisterScavenging.cpp
@@ -103,10 +103,6 @@ void RegScavenger::determineKillsAndDefs() {
// Find out which registers are early clobbered, killed, defined, and marked
// def-dead in this instruction.
- // FIXME: The scavenger is not predication aware. If the instruction is
- // predicated, conservatively assume "kill" markers do not actually kill the
- // register. Similarly ignores "dead" markers.
- bool isPred = TII->isPredicated(MI);
KillRegUnits.reset();
DefRegUnits.reset();
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -124,7 +120,7 @@ void RegScavenger::determineKillsAndDefs() {
}
// Apply the mask.
- (isPred ? DefRegUnits : KillRegUnits) |= TmpRegUnits;
+ KillRegUnits |= TmpRegUnits;
}
if (!MO.isReg())
continue;
@@ -136,11 +132,11 @@ void RegScavenger::determineKillsAndDefs() {
// Ignore undef uses.
if (MO.isUndef())
continue;
- if (!isPred && MO.isKill())
+ if (MO.isKill())
addRegUnits(KillRegUnits, Reg);
} else {
assert(MO.isDef());
- if (!isPred && MO.isDead())
+ if (MO.isDead())
addRegUnits(KillRegUnits, Reg);
else
addRegUnits(DefRegUnits, Reg);
diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp
index e8e47b764dd2..ae4b935d719a 100644
--- a/lib/CodeGen/ScheduleDAGInstrs.cpp
+++ b/lib/CodeGen/ScheduleDAGInstrs.cpp
@@ -574,11 +574,11 @@ static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI,
int64_t Overlapa = MMOa->getSize() + MMOa->getOffset() - MinOffset;
int64_t Overlapb = MMOb->getSize() + MMOb->getOffset() - MinOffset;
- AliasAnalysis::AliasResult AAResult = AA->alias(
- AliasAnalysis::Location(MMOa->getValue(), Overlapa,
- UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
- AliasAnalysis::Location(MMOb->getValue(), Overlapb,
- UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
+ AliasAnalysis::AliasResult AAResult =
+ AA->alias(MemoryLocation(MMOa->getValue(), Overlapa,
+ UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
+ MemoryLocation(MMOb->getValue(), Overlapb,
+ UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
return (AAResult != AliasAnalysis::NoAlias);
}
@@ -1508,7 +1508,7 @@ public:
return getCurr()->Preds.end();
}
};
-} // anonymous
+} // namespace
static bool hasDataSucc(const SUnit *SU) {
for (SUnit::const_succ_iterator
diff --git a/lib/CodeGen/ScheduleDAGPrinter.cpp b/lib/CodeGen/ScheduleDAGPrinter.cpp
index b2e4617720ff..cdf27ae5fedd 100644
--- a/lib/CodeGen/ScheduleDAGPrinter.cpp
+++ b/lib/CodeGen/ScheduleDAGPrinter.cpp
@@ -72,7 +72,7 @@ namespace llvm {
return G->addCustomGraphFeatures(GW);
}
};
-}
+} // namespace llvm
std::string DOTGraphTraits<ScheduleDAG*>::getNodeLabel(const SUnit *SU,
const ScheduleDAG *G) {
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index a71c6761c75f..5fea52c97496 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -255,6 +255,7 @@ namespace {
SDValue visitSRA(SDNode *N);
SDValue visitSRL(SDNode *N);
SDValue visitRotate(SDNode *N);
+ SDValue visitBSWAP(SDNode *N);
SDValue visitCTLZ(SDNode *N);
SDValue visitCTLZ_ZERO_UNDEF(SDNode *N);
SDValue visitCTTZ(SDNode *N);
@@ -387,6 +388,13 @@ namespace {
unsigned SequenceNum;
};
+ /// This is a helper function for MergeStoresOfConstantsOrVecElts. Returns a
+ /// constant build_vector of the stored constant values in Stores.
+ SDValue getMergedConstantVectorStore(SelectionDAG &DAG,
+ SDLoc SL,
+ ArrayRef<MemOpLink> Stores,
+ EVT Ty) const;
+
/// This is a helper function for MergeConsecutiveStores. When the source
/// elements of the consecutive stores are all constants or all extracted
/// vector elements, try to merge them into one larger store.
@@ -395,6 +403,13 @@ namespace {
EVT MemVT, unsigned NumElem,
bool IsConstantSrc, bool UseVector);
+ /// This is a helper function for MergeConsecutiveStores.
+ /// Stores that may be merged are placed in StoreNodes.
+ /// Loads that may alias with those stores are placed in AliasLoadNodes.
+ void getStoreMergeAndAliasCandidates(
+ StoreSDNode* St, SmallVectorImpl<MemOpLink> &StoreNodes,
+ SmallVectorImpl<LSBaseSDNode*> &AliasLoadNodes);
+
/// Merge consecutive store operations into a wide store.
/// This optimization uses wide integers or vectors when possible.
/// \return True if some memory operations were changed.
@@ -444,7 +459,7 @@ namespace {
return TLI.getSetCCResultType(*DAG.getContext(), VT);
}
};
-}
+} // namespace
namespace {
@@ -460,7 +475,7 @@ public:
DC.removeFromWorklist(N);
}
};
-}
+} // namespace
//===----------------------------------------------------------------------===//
// TargetLowering::DAGCombinerInfo implementation
@@ -1335,6 +1350,7 @@ SDValue DAGCombiner::visit(SDNode *N) {
case ISD::SRL: return visitSRL(N);
case ISD::ROTR:
case ISD::ROTL: return visitRotate(N);
+ case ISD::BSWAP: return visitBSWAP(N);
case ISD::CTLZ: return visitCTLZ(N);
case ISD::CTLZ_ZERO_UNDEF: return visitCTLZ_ZERO_UNDEF(N);
case ISD::CTTZ: return visitCTTZ(N);
@@ -1454,12 +1470,9 @@ SDValue DAGCombiner::combine(SDNode *N) {
if (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1)) {
SDValue Ops[] = {N1, N0};
SDNode *CSENode;
- if (const BinaryWithFlagsSDNode *BinNode =
- dyn_cast<BinaryWithFlagsSDNode>(N)) {
+ if (const auto *BinNode = dyn_cast<BinaryWithFlagsSDNode>(N)) {
CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops,
- BinNode->Flags.hasNoUnsignedWrap(),
- BinNode->Flags.hasNoSignedWrap(),
- BinNode->Flags.hasExact());
+ &BinNode->Flags);
} else {
CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops);
}
@@ -4764,6 +4777,19 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
return SDValue();
}
+SDValue DAGCombiner::visitBSWAP(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+
+ // fold (bswap c1) -> c2
+ if (isConstantIntBuildVectorOrConstantInt(N0))
+ return DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N0);
+ // fold (bswap (bswap x)) -> x
+ if (N0.getOpcode() == ISD::BSWAP)
+ return N0->getOperand(0);
+ return SDValue();
+}
+
SDValue DAGCombiner::visitCTLZ(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
@@ -5141,7 +5167,7 @@ SDValue DAGCombiner::visitMSCATTER(SDNode *N) {
std::tie(IndexLo, IndexHi) = DAG.SplitVector(MSC->getIndex(), DL);
MachineMemOperand *MMO = DAG.getMachineFunction().
- getMachineMemOperand(MSC->getPointerInfo(),
+ getMachineMemOperand(MSC->getPointerInfo(),
MachineMemOperand::MOStore, LoMemVT.getStoreSize(),
Alignment, MSC->getAAInfo(), MSC->getRanges());
@@ -5280,7 +5306,7 @@ SDValue DAGCombiner::visitMGATHER(SDNode *N) {
std::tie(IndexLo, IndexHi) = DAG.SplitVector(Index, DL);
MachineMemOperand *MMO = DAG.getMachineFunction().
- getMachineMemOperand(MGT->getPointerInfo(),
+ getMachineMemOperand(MGT->getPointerInfo(),
MachineMemOperand::MOLoad, LoMemVT.getStoreSize(),
Alignment, MGT->getAAInfo(), MGT->getRanges());
@@ -8078,7 +8104,7 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
auto *BV00 = dyn_cast<BuildVectorSDNode>(N00);
auto *BV01 = dyn_cast<BuildVectorSDNode>(N01);
-
+
// Check 1: Make sure that the first operand of the inner multiply is NOT
// a constant. Otherwise, we may induce infinite looping.
if (!(isConstOrConstSplatFP(N00) || (BV00 && BV00->isConstant()))) {
@@ -9928,7 +9954,7 @@ struct LoadedSlice {
return true;
}
};
-}
+} // namespace
/// \brief Check that all bits set in \p UsedBits form a dense region, i.e.,
/// \p UsedBits looks like 0..0 1..1 0..0.
@@ -10576,6 +10602,18 @@ struct BaseIndexOffset {
};
} // namespace
+SDValue DAGCombiner::getMergedConstantVectorStore(SelectionDAG &DAG,
+ SDLoc SL,
+ ArrayRef<MemOpLink> Stores,
+ EVT Ty) const {
+ SmallVector<SDValue, 8> BuildVector;
+
+ for (unsigned I = 0, E = Ty.getVectorNumElements(); I != E; ++I)
+ BuildVector.push_back(cast<StoreSDNode>(Stores[I].MemNode)->getValue());
+
+ return DAG.getNode(ISD::BUILD_VECTOR, SL, Ty, BuildVector);
+}
+
bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
SmallVectorImpl<MemOpLink> &StoreNodes, EVT MemVT,
unsigned NumElem, bool IsConstantSrc, bool UseVector) {
@@ -10606,12 +10644,7 @@ bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem);
assert(TLI.isTypeLegal(Ty) && "Illegal vector store");
if (IsConstantSrc) {
- // A vector store with a constant source implies that the constant is
- // zero; we only handle merging stores of constant zeros because the zero
- // can be materialized without a load.
- // It may be beneficial to loosen this restriction to allow non-zero
- // store merging.
- StoredVal = DAG.getConstant(0, DL, Ty);
+ StoredVal = getMergedConstantVectorStore(DAG, DL, StoreNodes, Ty);
} else {
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < NumElem ; ++i) {
@@ -10631,8 +10664,8 @@ bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
// elements, so this path implies a store of constants.
assert(IsConstantSrc && "Merged vector elements should use vector store");
- unsigned StoreBW = NumElem * ElementSizeBytes * 8;
- APInt StoreInt(StoreBW, 0);
+ unsigned SizeInBits = NumElem * ElementSizeBytes * 8;
+ APInt StoreInt(SizeInBits, 0);
// Construct a single integer constant which is made of the smaller
// constant inputs.
@@ -10641,18 +10674,18 @@ bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
unsigned Idx = IsLE ? (NumElem - 1 - i) : i;
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[Idx].MemNode);
SDValue Val = St->getValue();
- StoreInt <<= ElementSizeBytes*8;
+ StoreInt <<= ElementSizeBytes * 8;
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) {
- StoreInt |= C->getAPIntValue().zext(StoreBW);
+ StoreInt |= C->getAPIntValue().zext(SizeInBits);
} else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val)) {
- StoreInt |= C->getValueAPF().bitcastToAPInt().zext(StoreBW);
+ StoreInt |= C->getValueAPF().bitcastToAPInt().zext(SizeInBits);
} else {
llvm_unreachable("Invalid constant element type");
}
}
// Create the new Load and Store operations.
- EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
+ EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);
StoredVal = DAG.getConstant(StoreInt, DL, StoreTy);
}
@@ -10698,62 +10731,25 @@ static bool allowableAlignment(const SelectionDAG &DAG,
return (Align >= ABIAlignment);
}
-bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
- if (OptLevel == CodeGenOpt::None)
- return false;
-
- EVT MemVT = St->getMemoryVT();
- int64_t ElementSizeBytes = MemVT.getSizeInBits()/8;
- bool NoVectors = DAG.getMachineFunction().getFunction()->hasFnAttribute(
- Attribute::NoImplicitFloat);
-
- // This function cannot currently deal with non-byte-sized memory sizes.
- if (ElementSizeBytes * 8 != MemVT.getSizeInBits())
- return false;
-
- // Don't merge vectors into wider inputs.
- if (MemVT.isVector() || !MemVT.isSimple())
- return false;
-
- // Perform an early exit check. Do not bother looking at stored values that
- // are not constants, loads, or extracted vector elements.
- SDValue StoredVal = St->getValue();
- bool IsLoadSrc = isa<LoadSDNode>(StoredVal);
- bool IsConstantSrc = isa<ConstantSDNode>(StoredVal) ||
- isa<ConstantFPSDNode>(StoredVal);
- bool IsExtractVecEltSrc = (StoredVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT);
-
- if (!IsConstantSrc && !IsLoadSrc && !IsExtractVecEltSrc)
- return false;
-
- // Only look at ends of store sequences.
- SDValue Chain = SDValue(St, 0);
- if (Chain->hasOneUse() && Chain->use_begin()->getOpcode() == ISD::STORE)
- return false;
-
+void DAGCombiner::getStoreMergeAndAliasCandidates(
+ StoreSDNode* St, SmallVectorImpl<MemOpLink> &StoreNodes,
+ SmallVectorImpl<LSBaseSDNode*> &AliasLoadNodes) {
// This holds the base pointer, index, and the offset in bytes from the base
// pointer.
BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr());
// We must have a base and an offset.
if (!BasePtr.Base.getNode())
- return false;
+ return;
// Do not handle stores to undef base pointers.
if (BasePtr.Base.getOpcode() == ISD::UNDEF)
- return false;
-
- // Save the LoadSDNodes that we find in the chain.
- // We need to make sure that these nodes do not interfere with
- // any of the store nodes.
- SmallVector<LSBaseSDNode*, 8> AliasLoadNodes;
-
- // Save the StoreSDNodes that we find in the chain.
- SmallVector<MemOpLink, 8> StoreNodes;
+ return;
// Walk up the chain and look for nodes with offsets from the same
// base pointer. Stop when reaching an instruction with a different kind
// or instruction which has a different base pointer.
+ EVT MemVT = St->getMemoryVT();
unsigned Seq = 0;
StoreSDNode *Index = St;
while (Index) {
@@ -10810,7 +10806,51 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
}
}
}
+}
+
+bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
+ if (OptLevel == CodeGenOpt::None)
+ return false;
+
+ EVT MemVT = St->getMemoryVT();
+ int64_t ElementSizeBytes = MemVT.getSizeInBits() / 8;
+ bool NoVectors = DAG.getMachineFunction().getFunction()->hasFnAttribute(
+ Attribute::NoImplicitFloat);
+
+ // This function cannot currently deal with non-byte-sized memory sizes.
+ if (ElementSizeBytes * 8 != MemVT.getSizeInBits())
+ return false;
+
+ // Don't merge vectors into wider inputs.
+ if (MemVT.isVector() || !MemVT.isSimple())
+ return false;
+
+ // Perform an early exit check. Do not bother looking at stored values that
+ // are not constants, loads, or extracted vector elements.
+ SDValue StoredVal = St->getValue();
+ bool IsLoadSrc = isa<LoadSDNode>(StoredVal);
+ bool IsConstantSrc = isa<ConstantSDNode>(StoredVal) ||
+ isa<ConstantFPSDNode>(StoredVal);
+ bool IsExtractVecEltSrc = (StoredVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT);
+
+ if (!IsConstantSrc && !IsLoadSrc && !IsExtractVecEltSrc)
+ return false;
+
+ // Only look at ends of store sequences.
+ SDValue Chain = SDValue(St, 0);
+ if (Chain->hasOneUse() && Chain->use_begin()->getOpcode() == ISD::STORE)
+ return false;
+
+ // Save the LoadSDNodes that we find in the chain.
+ // We need to make sure that these nodes do not interfere with
+ // any of the store nodes.
+ SmallVector<LSBaseSDNode*, 8> AliasLoadNodes;
+
+ // Save the StoreSDNodes that we find in the chain.
+ SmallVector<MemOpLink, 8> StoreNodes;
+ getStoreMergeAndAliasCandidates(St, StoreNodes, AliasLoadNodes);
+
// Check if there is anything to merge.
if (StoreNodes.size() < 2)
return false;
@@ -10876,8 +10916,8 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
}
// Find a legal type for the constant store.
- unsigned StoreBW = (i+1) * ElementSizeBytes * 8;
- EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
+ unsigned SizeInBits = (i+1) * ElementSizeBytes * 8;
+ EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);
if (TLI.isTypeLegal(StoreTy) &&
allowableAlignment(DAG, TLI, StoreTy, FirstStoreAS,
FirstStoreAlign)) {
@@ -11039,8 +11079,8 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
}
// Find a legal type for the integer store.
- unsigned StoreBW = (i+1) * ElementSizeBytes * 8;
- StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
+ unsigned SizeInBits = (i+1) * ElementSizeBytes * 8;
+ StoreTy = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);
if (TLI.isTypeLegal(StoreTy) &&
allowableAlignment(DAG, TLI, StoreTy, FirstStoreAS, FirstStoreAlign) &&
allowableAlignment(DAG, TLI, StoreTy, FirstLoadAS, FirstLoadAlign))
@@ -11094,8 +11134,8 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
if (UseVectorTy) {
JointMemOpVT = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem);
} else {
- unsigned StoreBW = NumElem * ElementSizeBytes * 8;
- JointMemOpVT = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
+ unsigned SizeInBits = NumElem * ElementSizeBytes * 8;
+ JointMemOpVT = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);
}
SDLoc LoadDL(LoadNodes[0].MemNode);
@@ -12093,7 +12133,7 @@ static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) {
}
// If any of the operands is a floating point scalar bitcast to a vector,
- // use floating point types throughout, and bitcast everything.
+ // use floating point types throughout, and bitcast everything.
// Replace UNDEFs by another scalar UNDEF node, of the final desired type.
if (AnyFP) {
SVT = EVT::getFloatingPointVT(OpVT.getSizeInBits());
@@ -12924,7 +12964,7 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
SDValue RHS = N->getOperand(1);
SDLoc dl(N);
- // Make sure we're not running after operation legalization where it
+ // Make sure we're not running after operation legalization where it
// may have custom lowered the vector shuffles.
if (LegalOperations)
return SDValue();
@@ -13845,12 +13885,10 @@ bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const {
int64_t Overlap2 = (Op1->getMemoryVT().getSizeInBits() >> 3) +
Op1->getSrcValueOffset() - MinOffset;
AliasAnalysis::AliasResult AAResult =
- AA.alias(AliasAnalysis::Location(Op0->getMemOperand()->getValue(),
- Overlap1,
- UseTBAA ? Op0->getAAInfo() : AAMDNodes()),
- AliasAnalysis::Location(Op1->getMemOperand()->getValue(),
- Overlap2,
- UseTBAA ? Op1->getAAInfo() : AAMDNodes()));
+ AA.alias(MemoryLocation(Op0->getMemOperand()->getValue(), Overlap1,
+ UseTBAA ? Op0->getAAInfo() : AAMDNodes()),
+ MemoryLocation(Op1->getMemOperand()->getValue(), Overlap2,
+ UseTBAA ? Op1->getAAInfo() : AAMDNodes()));
if (AAResult == AliasAnalysis::NoAlias)
return false;
}
diff --git a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index f3d75cb32a7d..ecaa2c972719 100644
--- a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -259,20 +259,27 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
// If this is an MSVC EH personality, we need to do a bit more work.
EHPersonality Personality = EHPersonality::Unknown;
- if (!LPads.empty())
- Personality = classifyEHPersonality(LPads.back()->getPersonalityFn());
+ if (Fn->hasPersonalityFn())
+ Personality = classifyEHPersonality(Fn->getPersonalityFn());
if (!isMSVCEHPersonality(Personality))
return;
- if (Personality == EHPersonality::MSVC_Win64SEH) {
+ if (Personality == EHPersonality::MSVC_Win64SEH ||
+ Personality == EHPersonality::MSVC_X86SEH) {
addSEHHandlersForLPads(LPads);
- } else if (Personality == EHPersonality::MSVC_CXX) {
+ }
+
+ WinEHFuncInfo &EHInfo = MMI.getWinEHFuncInfo(&fn);
+ if (Personality == EHPersonality::MSVC_CXX) {
const Function *WinEHParentFn = MMI.getWinEHParent(&fn);
- WinEHFuncInfo &EHInfo = MMI.getWinEHFuncInfo(WinEHParentFn);
calculateWinCXXEHStateNumbers(WinEHParentFn, EHInfo);
+ }
- // Copy the state numbers to LandingPadInfo for the current function, which
- // could be a handler or the parent.
+ // Copy the state numbers to LandingPadInfo for the current function, which
+ // could be a handler or the parent. This should happen for 32-bit SEH and
+ // C++ EH.
+ if (Personality == EHPersonality::MSVC_CXX ||
+ Personality == EHPersonality::MSVC_X86SEH) {
for (const LandingPadInst *LP : LPads) {
MachineBasicBlock *LPadMBB = MBBMap[LP->getParent()];
MMI.addWinEHState(LPadMBB, EHInfo.LandingPadStateMap[LP]);
@@ -539,8 +546,10 @@ void llvm::ComputeUsesVAFloatArgument(const CallInst &I,
/// landingpad instruction and add them to the specified machine module info.
void llvm::AddLandingPadInfo(const LandingPadInst &I, MachineModuleInfo &MMI,
MachineBasicBlock *MBB) {
- MMI.addPersonality(MBB,
- cast<Function>(I.getPersonalityFn()->stripPointerCasts()));
+ MMI.addPersonality(
+ MBB,
+ cast<Function>(
+ I.getParent()->getParent()->getPersonalityFn()->stripPointerCasts()));
if (I.isCleanup())
MMI.addCleanup(MBB);
diff --git a/lib/CodeGen/SelectionDAG/InstrEmitter.h b/lib/CodeGen/SelectionDAG/InstrEmitter.h
index 7b86f7dd8de0..2a61914eecd3 100644
--- a/lib/CodeGen/SelectionDAG/InstrEmitter.h
+++ b/lib/CodeGen/SelectionDAG/InstrEmitter.h
@@ -140,6 +140,6 @@ private:
DenseMap<SDValue, unsigned> &VRBaseMap);
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 7d98872f8af1..37f95e5a22b9 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -198,7 +198,7 @@ public:
ReplacedNode(Old);
}
};
-}
+} // namespace
/// Return a vector shuffle operation which
/// performs the same shuffe in terms of order or result bytes, but on a type
diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
index 9c297698c1db..c3e3b7c525b9 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
@@ -676,7 +676,7 @@ namespace {
NodesToAnalyze.insert(N);
}
};
-}
+} // namespace
/// ReplaceValueWith - The specified value was legalized to the specified other
diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index c06227bd9701..50ad2391d15b 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -1010,7 +1010,7 @@ SDValue VectorLegalizer::UnrollVSETCC(SDValue Op) {
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
-}
+} // namespace
bool SelectionDAG::LegalizeVectors() {
return VectorLegalizer(*this).Run();
diff --git a/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h b/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h
index c27f8de601f2..949353256938 100644
--- a/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h
+++ b/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h
@@ -119,6 +119,6 @@ public:
bool isInvalidated() const { return Invalid; }
};
-} // end llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
index 6351fa2c4a2f..4c74182014a0 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
@@ -180,6 +180,6 @@ namespace llvm {
void EmitPhysRegCopy(SUnit *SU, DenseMap<SUnit*, unsigned> &VRBaseMap,
MachineBasicBlock::iterator InsertPos);
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index cf51e756d847..0eff930ceddd 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -400,19 +400,24 @@ static void AddNodeIDOperands(FoldingSetNodeID &ID,
ID.AddInteger(Op.getResNo());
}
}
+/// Add logical or fast math flag values to FoldingSetNodeID value.
+static void AddNodeIDFlags(FoldingSetNodeID &ID, unsigned Opcode,
+ const SDNodeFlags *Flags) {
+ if (!Flags || !isBinOpWithFlags(Opcode))
+ return;
-static void AddBinaryNodeIDCustom(FoldingSetNodeID &ID, bool nuw, bool nsw,
- bool exact) {
- ID.AddBoolean(nuw);
- ID.AddBoolean(nsw);
- ID.AddBoolean(exact);
+ unsigned RawFlags = Flags->getRawFlags();
+ // If no flags are set, do not alter the ID. We must match the ID of nodes
+ // that were created without explicitly specifying flags. This also saves time
+ // and allows a gradual increase in API usage of the optional optimization
+ // flags.
+ if (RawFlags != 0)
+ ID.AddInteger(RawFlags);
}
-/// AddBinaryNodeIDCustom - Add BinarySDNodes special infos
-static void AddBinaryNodeIDCustom(FoldingSetNodeID &ID, unsigned Opcode,
- bool nuw, bool nsw, bool exact) {
- if (isBinOpWithFlags(Opcode))
- AddBinaryNodeIDCustom(ID, nuw, nsw, exact);
+static void AddNodeIDFlags(FoldingSetNodeID &ID, const SDNode *N) {
+ if (auto *Node = dyn_cast<BinaryWithFlagsSDNode>(N))
+ AddNodeIDFlags(ID, Node->getOpcode(), &Node->Flags);
}
static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
@@ -507,20 +512,6 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
ID.AddInteger(ST->getPointerInfo().getAddrSpace());
break;
}
- case ISD::SDIV:
- case ISD::UDIV:
- case ISD::SRA:
- case ISD::SRL:
- case ISD::MUL:
- case ISD::ADD:
- case ISD::SUB:
- case ISD::SHL: {
- const BinaryWithFlagsSDNode *BinNode = cast<BinaryWithFlagsSDNode>(N);
- AddBinaryNodeIDCustom(
- ID, N->getOpcode(), BinNode->Flags.hasNoUnsignedWrap(),
- BinNode->Flags.hasNoSignedWrap(), BinNode->Flags.hasExact());
- break;
- }
case ISD::ATOMIC_CMP_SWAP:
case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
case ISD::ATOMIC_SWAP:
@@ -564,6 +555,8 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
}
} // end switch (N->getOpcode())
+ AddNodeIDFlags(ID, N);
+
// Target specific memory nodes could also have address spaces to check.
if (N->isTargetMemoryOpcode())
ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
@@ -960,14 +953,16 @@ void SelectionDAG::allnodes_clear() {
BinarySDNode *SelectionDAG::GetBinarySDNode(unsigned Opcode, SDLoc DL,
SDVTList VTs, SDValue N1,
- SDValue N2, bool nuw, bool nsw,
- bool exact) {
+ SDValue N2,
+ const SDNodeFlags *Flags) {
if (isBinOpWithFlags(Opcode)) {
+ // If no flags were passed in, use a default flags object.
+ SDNodeFlags F;
+ if (Flags == nullptr)
+ Flags = &F;
+
BinaryWithFlagsSDNode *FN = new (NodeAllocator) BinaryWithFlagsSDNode(
- Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2);
- FN->Flags.setNoUnsignedWrap(nuw);
- FN->Flags.setNoSignedWrap(nsw);
- FN->Flags.setExact(exact);
+ Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2, *Flags);
return FN;
}
@@ -2932,6 +2927,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
case ISD::TRUNCATE:
case ISD::UINT_TO_FP:
case ISD::SINT_TO_FP:
+ case ISD::BSWAP:
case ISD::CTLZ:
case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTTZ:
@@ -3081,6 +3077,14 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
if (OpOpcode == ISD::UNDEF)
return getUNDEF(VT);
break;
+ case ISD::BSWAP:
+ assert(VT.isInteger() && VT == Operand.getValueType() &&
+ "Invalid BSWAP!");
+ assert((VT.getScalarSizeInBits() % 16 == 0) &&
+ "BSWAP types must be a multiple of 16 bits!");
+ if (OpOpcode == ISD::UNDEF)
+ return getUNDEF(VT);
+ break;
case ISD::BITCAST:
// Basic sanity checking.
assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
@@ -3260,7 +3264,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, SDLoc DL, EVT VT,
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
- SDValue N2, bool nuw, bool nsw, bool exact) {
+ SDValue N2, const SDNodeFlags *Flags) {
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
switch (Opcode) {
@@ -3747,22 +3751,20 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
// Memoize this node if possible.
BinarySDNode *N;
SDVTList VTs = getVTList(VT);
- const bool BinOpHasFlags = isBinOpWithFlags(Opcode);
if (VT != MVT::Glue) {
SDValue Ops[] = {N1, N2};
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTs, Ops);
- if (BinOpHasFlags)
- AddBinaryNodeIDCustom(ID, Opcode, nuw, nsw, exact);
+ AddNodeIDFlags(ID, Opcode, Flags);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
return SDValue(E, 0);
- N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, nuw, nsw, exact);
+ N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags);
CSEMap.InsertNode(N, IP);
} else {
- N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, nuw, nsw, exact);
+ N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags);
}
InsertNode(N);
@@ -4023,10 +4025,10 @@ static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false);
}
-/// FindOptimalMemOpLowering - Determines the optimial series memory ops
-/// to replace the memset / memcpy. Return true if the number of memory ops
-/// is below the threshold. It returns the types of the sequence of
-/// memory ops to perform memset / memcpy by reference.
+/// Determines the optimal series of memory ops to replace the memset / memcpy.
+/// Return true if the number of memory ops is below the threshold (Limit).
+/// It returns the types of the sequence of memory ops to perform
+/// memset / memcpy by reference.
static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
unsigned Limit, uint64_t Size,
unsigned DstAlign, unsigned SrcAlign,
@@ -6066,13 +6068,12 @@ SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT,
/// getNodeIfExists - Get the specified node if it's already available, or
/// else return NULL.
SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
- ArrayRef<SDValue> Ops, bool nuw, bool nsw,
- bool exact) {
+ ArrayRef<SDValue> Ops,
+ const SDNodeFlags *Flags) {
if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTList, Ops);
- if (isBinOpWithFlags(Opcode))
- AddBinaryNodeIDCustom(ID, nuw, nsw, exact);
+ AddNodeIDFlags(ID, Opcode, Flags);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, DebugLoc(), IP))
return E;
@@ -6133,7 +6134,7 @@ public:
: SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
};
-}
+} // namespace
/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
/// This can cause recursive merging of nodes in the DAG.
@@ -6343,7 +6344,7 @@ namespace {
bool operator<(const UseMemo &L, const UseMemo &R) {
return (intptr_t)L.User < (intptr_t)R.User;
}
-}
+} // namespace
/// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
/// uses of other values produced by From.getNode() alone. The same value
@@ -6588,7 +6589,7 @@ namespace {
VTs.push_back(MVT((MVT::SimpleValueType)i));
}
};
-}
+} // namespace
static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
static ManagedStatic<EVTArray> SimpleVTArray;
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 8ba957d62870..8313a48c3467 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -78,12 +78,16 @@ LimitFPPrecision("limit-float-precision",
cl::location(LimitFloatPrecision),
cl::init(0));
+static cl::opt<bool>
+EnableFMFInDAG("enable-fmf-dag", cl::init(false), cl::Hidden,
+ cl::desc("Enable fast-math-flags for DAG nodes"));
+
// Limit the width of DAG chains. This is important in general to prevent
-// prevent DAG-based analysis from blowing up. For example, alias analysis and
+// DAG-based analysis from blowing up. For example, alias analysis and
// load clustering may not complete in reasonable time. It is difficult to
// recognize and avoid this situation within each individual analysis, and
// future analyses are likely to have the same behavior. Limiting DAG width is
-// the safe approach, and will be especially important with global DAGs.
+// the safe approach and will be especially important with global DAGs.
//
// MaxParallelChains default is arbitrarily high to avoid affecting
// optimization, but could be lowered to improve compile time. Any ld-ld-st-st
@@ -2148,6 +2152,8 @@ void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
bool nuw = false;
bool nsw = false;
bool exact = false;
+ FastMathFlags FMF;
+
if (const OverflowingBinaryOperator *OFBinOp =
dyn_cast<const OverflowingBinaryOperator>(&I)) {
nuw = OFBinOp->hasNoUnsignedWrap();
@@ -2156,9 +2162,22 @@ void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
if (const PossiblyExactOperator *ExactOp =
dyn_cast<const PossiblyExactOperator>(&I))
exact = ExactOp->isExact();
-
+ if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&I))
+ FMF = FPOp->getFastMathFlags();
+
+ SDNodeFlags Flags;
+ Flags.setExact(exact);
+ Flags.setNoSignedWrap(nsw);
+ Flags.setNoUnsignedWrap(nuw);
+ if (EnableFMFInDAG) {
+ Flags.setAllowReciprocal(FMF.allowReciprocal());
+ Flags.setNoInfs(FMF.noInfs());
+ Flags.setNoNaNs(FMF.noNaNs());
+ Flags.setNoSignedZeros(FMF.noSignedZeros());
+ Flags.setUnsafeAlgebra(FMF.unsafeAlgebra());
+ }
SDValue BinNodeValue = DAG.getNode(OpCode, getCurSDLoc(), Op1.getValueType(),
- Op1, Op2, nuw, nsw, exact);
+ Op1, Op2, &Flags);
setValue(&I, BinNodeValue);
}
@@ -2206,9 +2225,12 @@ void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
dyn_cast<const PossiblyExactOperator>(&I))
exact = ExactOp->isExact();
}
-
+ SDNodeFlags Flags;
+ Flags.setExact(exact);
+ Flags.setNoSignedWrap(nsw);
+ Flags.setNoUnsignedWrap(nuw);
SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
- nuw, nsw, exact);
+ &Flags);
setValue(&I, Res);
}
@@ -2892,7 +2914,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
// Serialize volatile loads with other side effects.
Root = getRoot();
else if (AA->pointsToConstantMemory(
- AliasAnalysis::Location(SV, AA->getTypeStoreSize(Ty), AAInfo))) {
+ MemoryLocation(SV, AA->getTypeStoreSize(Ty), AAInfo))) {
// Do not serialize (non-volatile) loads of constant memory with anything.
Root = DAG.getEntryNode();
ConstantMemory = true;
@@ -2907,8 +2929,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
SmallVector<SDValue, 4> Values(NumValues);
- SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
- NumValues));
+ SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
EVT PtrVT = Ptr.getValueType();
unsigned ChainI = 0;
for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
@@ -2972,8 +2993,7 @@ void SelectionDAGBuilder::visitStore(const StoreInst &I) {
SDValue Ptr = getValue(PtrV);
SDValue Root = getRoot();
- SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
- NumValues));
+ SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
EVT PtrVT = Ptr.getValueType();
bool isVolatile = I.isVolatile();
bool isNonTemporal = I.getMetadata(LLVMContext::MD_nontemporal) != nullptr;
@@ -3141,10 +3161,8 @@ void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I) {
const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
SDValue InChain = DAG.getRoot();
- if (AA->pointsToConstantMemory(
- AliasAnalysis::Location(PtrOperand,
- AA->getTypeStoreSize(I.getType()),
- AAInfo))) {
+ if (AA->pointsToConstantMemory(MemoryLocation(
+ PtrOperand, AA->getTypeStoreSize(I.getType()), AAInfo))) {
// Do not serialize (non-volatile) loads of constant memory with anything.
InChain = DAG.getEntryNode();
}
@@ -3186,10 +3204,9 @@ void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
Value *BasePtr = Ptr;
bool UniformBase = getUniformBase(BasePtr, Base, Index, this);
bool ConstantMemory = false;
- if (UniformBase && AA->pointsToConstantMemory(
- AliasAnalysis::Location(BasePtr,
- AA->getTypeStoreSize(I.getType()),
- AAInfo))) {
+ if (UniformBase &&
+ AA->pointsToConstantMemory(
+ MemoryLocation(BasePtr, AA->getTypeStoreSize(I.getType()), AAInfo))) {
// Do not serialize (non-volatile) loads of constant memory with anything.
Root = DAG.getEntryNode();
ConstantMemory = true;
@@ -4983,6 +5000,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
assert(Reg && "cannot get exception code on this platform");
MVT PtrVT = TLI.getPointerTy();
const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
+ assert(FuncInfo.MBB->isLandingPad() && "eh.exceptioncode in non-lpad");
unsigned VReg = FuncInfo.MBB->addLiveIn(Reg, PtrRC);
SDValue N =
DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT);
@@ -7486,6 +7504,31 @@ void SelectionDAGBuilder::findJumpTables(CaseClusterVector &Clusters,
const int64_t N = Clusters.size();
const unsigned MinJumpTableSize = TLI.getMinimumJumpTableEntries();
+ // TotalCases[i]: Total nbr of cases in Clusters[0..i].
+ SmallVector<unsigned, 8> TotalCases(N);
+
+ for (unsigned i = 0; i < N; ++i) {
+ APInt Hi = Clusters[i].High->getValue();
+ APInt Lo = Clusters[i].Low->getValue();
+ TotalCases[i] = (Hi - Lo).getLimitedValue() + 1;
+ if (i != 0)
+ TotalCases[i] += TotalCases[i - 1];
+ }
+
+ if (N >= MinJumpTableSize && isDense(Clusters, &TotalCases[0], 0, N - 1)) {
+ // Cheap case: the whole range might be suitable for jump table.
+ CaseCluster JTCluster;
+ if (buildJumpTable(Clusters, 0, N - 1, SI, DefaultMBB, JTCluster)) {
+ Clusters[0] = JTCluster;
+ Clusters.resize(1);
+ return;
+ }
+ }
+
+ // The algorithm below is not suitable for -O0.
+ if (TM.getOptLevel() == CodeGenOpt::None)
+ return;
+
// Split Clusters into minimum number of dense partitions. The algorithm uses
// the same idea as Kannan & Proebsting "Correction to 'Producing Good Code
// for the Case Statement'" (1994), but builds the MinPartitions array in
@@ -7499,16 +7542,6 @@ void SelectionDAGBuilder::findJumpTables(CaseClusterVector &Clusters,
SmallVector<unsigned, 8> LastElement(N);
// NumTables[i]: nbr of >= MinJumpTableSize partitions from Clusters[i..N-1].
SmallVector<unsigned, 8> NumTables(N);
- // TotalCases[i]: Total nbr of cases in Clusters[0..i].
- SmallVector<unsigned, 8> TotalCases(N);
-
- for (unsigned i = 0; i < N; ++i) {
- APInt Hi = Clusters[i].High->getValue();
- APInt Lo = Clusters[i].Low->getValue();
- TotalCases[i] = (Hi - Lo).getLimitedValue() + 1;
- if (i != 0)
- TotalCases[i] += TotalCases[i - 1];
- }
// Base case: There is only one way to partition Clusters[N-1].
MinPartitions[N - 1] = 1;
@@ -7696,6 +7729,10 @@ void SelectionDAGBuilder::findBitTestClusters(CaseClusterVector &Clusters,
assert(Clusters[i-1].High->getValue().slt(Clusters[i].Low->getValue()));
#endif
+ // The algorithm below is not suitable for -O0.
+ if (TM.getOptLevel() == CodeGenOpt::None)
+ return;
+
// If target does not have legal shift left, do not emit bit tests at all.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT PTy = TLI.getPointerTy();
@@ -7959,6 +7996,18 @@ void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
}
}
+unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
+ CaseClusterIt First,
+ CaseClusterIt Last) {
+ return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
+ if (X.Weight != CC.Weight)
+ return X.Weight > CC.Weight;
+
+ // Ties are broken by comparing the case value.
+ return X.Low->getValue().slt(CC.Low->getValue());
+ });
+}
+
void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
const SwitchWorkListItem &W,
Value *Cond,
@@ -7988,6 +8037,48 @@ void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
RightWeight += (--FirstRight)->Weight;
I++;
}
+
+ for (;;) {
+ // Our binary search tree differs from a typical BST in that ours can have up
+ // to three values in each leaf. The pivot selection above doesn't take that
+ // into account, which means the tree might require more nodes and be less
+ // efficient. We compensate for this here.
+
+ unsigned NumLeft = LastLeft - W.FirstCluster + 1;
+ unsigned NumRight = W.LastCluster - FirstRight + 1;
+
+ if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
+ // If one side has less than 3 clusters, and the other has more than 3,
+ // consider taking a cluster from the other side.
+
+ if (NumLeft < NumRight) {
+ // Consider moving the first cluster on the right to the left side.
+ CaseCluster &CC = *FirstRight;
+ unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
+ unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
+ if (LeftSideRank <= RightSideRank) {
+ // Moving the cluster to the left does not demote it.
+ ++LastLeft;
+ ++FirstRight;
+ continue;
+ }
+ } else {
+ assert(NumRight < NumLeft);
+ // Consider moving the last element on the left to the right side.
+ CaseCluster &CC = *LastLeft;
+ unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
+ unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
+ if (RightSideRank <= LeftSideRank) {
+ // Moving the cluster to the right does not demot it.
+ --LastLeft;
+ --FirstRight;
+ continue;
+ }
+ }
+ }
+ break;
+ }
+
assert(LastLeft + 1 == FirstRight);
assert(LastLeft >= W.FirstCluster);
assert(FirstRight <= W.LastCluster);
@@ -8111,11 +8202,8 @@ void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
return;
}
- if (TM.getOptLevel() != CodeGenOpt::None) {
- findJumpTables(Clusters, &SI, DefaultMBB);
- findBitTestClusters(Clusters, &SI);
- }
-
+ findJumpTables(Clusters, &SI, DefaultMBB);
+ findBitTestClusters(Clusters, &SI);
DEBUG({
dbgs() << "Case clusters: ";
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index f0c03af3f64b..f225d54d189d 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -342,6 +342,11 @@ private:
};
typedef SmallVector<SwitchWorkListItem, 4> SwitchWorkList;
+ /// Determine the rank by weight of CC in [First,Last]. If CC has more weight
+ /// than each cluster in the range, its rank is 0.
+ static unsigned caseClusterRank(const CaseCluster &CC, CaseClusterIt First,
+ CaseClusterIt Last);
+
/// Emit comparison and split W into two subtrees.
void splitWorkItem(SwitchWorkList &WorkList, const SwitchWorkListItem &W,
Value *Cond, MachineBasicBlock *SwitchMBB);
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 22f592afae71..c5562cd31067 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -307,7 +307,7 @@ namespace llvm {
"Unknown sched type!");
return createILPListDAGScheduler(IS, OptLevel);
}
-}
+} // namespace llvm
// EmitInstrWithCustomInserter - This method should be implemented by targets
// that mark instructions with the 'usesCustomInserter' flag. These
@@ -938,8 +938,10 @@ bool SelectionDAGISel::PrepareEHLandingPad() {
// pad into several BBs.
const BasicBlock *LLVMBB = MBB->getBasicBlock();
const LandingPadInst *LPadInst = LLVMBB->getLandingPadInst();
- MF->getMMI().addPersonality(
- MBB, cast<Function>(LPadInst->getPersonalityFn()->stripPointerCasts()));
+ MF->getMMI().addPersonality(MBB, cast<Function>(LPadInst->getParent()
+ ->getParent()
+ ->getPersonalityFn()
+ ->stripPointerCasts()));
EHPersonality Personality = MF->getMMI().getPersonalityType();
if (isMSVCEHPersonality(Personality)) {
@@ -2540,7 +2542,7 @@ public:
J.setNode(E);
}
};
-}
+} // namespace
SDNode *SelectionDAGISel::
SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
index 4df5ede388fc..19b5d160c8a9 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
@@ -132,7 +132,7 @@ namespace llvm {
"color=blue,style=dashed");
}
};
-}
+} // namespace llvm
std::string DOTGraphTraits<SelectionDAG*>::getNodeLabel(const SDNode *Node,
const SelectionDAG *G) {
diff --git a/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
index 8bbfa01e7594..a6b3fc6c4d4a 100644
--- a/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
+++ b/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
@@ -113,84 +113,137 @@ StatepointLoweringState::allocateStackSlot(EVT ValueType,
llvm_unreachable("infinite loop?");
}
+/// Utility function for reservePreviousStackSlotForValue. Tries to find
+/// stack slot index to which we have spilled value for previous statepoints.
+/// LookUpDepth specifies maximum DFS depth this function is allowed to look.
+static Optional<int> findPreviousSpillSlot(const Value *Val,
+ SelectionDAGBuilder &Builder,
+ int LookUpDepth) {
+ // Can not look any futher - give up now
+ if (LookUpDepth <= 0)
+ return Optional<int>();
+
+ // Spill location is known for gc relocates
+ if (isGCRelocate(Val)) {
+ GCRelocateOperands RelocOps(cast<Instruction>(Val));
+
+ FunctionLoweringInfo::StatepointSpilledValueMapTy &SpillMap =
+ Builder.FuncInfo.StatepointRelocatedValues[RelocOps.getStatepoint()];
+
+ auto It = SpillMap.find(RelocOps.getDerivedPtr());
+ if (It == SpillMap.end())
+ return Optional<int>();
+
+ return It->second;
+ }
+
+ // Look through bitcast instructions.
+ if (const BitCastInst *Cast = dyn_cast<BitCastInst>(Val)) {
+ return findPreviousSpillSlot(Cast->getOperand(0), Builder, LookUpDepth - 1);
+ }
+
+ // Look through phi nodes
+ // All incoming values should have same known stack slot, otherwise result
+ // is unknown.
+ if (const PHINode *Phi = dyn_cast<PHINode>(Val)) {
+ Optional<int> MergedResult = None;
+
+ for (auto &IncomingValue : Phi->incoming_values()) {
+ Optional<int> SpillSlot =
+ findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth - 1);
+ if (!SpillSlot.hasValue())
+ return Optional<int>();
+
+ if (MergedResult.hasValue() && *MergedResult != *SpillSlot)
+ return Optional<int>();
+
+ MergedResult = SpillSlot;
+ }
+ return MergedResult;
+ }
+
+ // TODO: We can do better for PHI nodes. In cases like this:
+ // ptr = phi(relocated_pointer, not_relocated_pointer)
+ // statepoint(ptr)
+ // We will return that stack slot for ptr is unknown. And later we might
+ // assign different stack slots for ptr and relocated_pointer. This limits
+ // llvm's ability to remove redundant stores.
+ // Unfortunately it's hard to accomplish in current infrastructure.
+ // We use this function to eliminate spill store completely, while
+ // in example we still need to emit store, but instead of any location
+ // we need to use special "preferred" location.
+
+ // TODO: handle simple updates. If a value is modified and the original
+ // value is no longer live, it would be nice to put the modified value in the
+ // same slot. This allows folding of the memory accesses for some
+ // instructions types (like an increment).
+ // statepoint (i)
+ // i1 = i+1
+ // statepoint (i1)
+ // However we need to be careful for cases like this:
+ // statepoint(i)
+ // i1 = i+1
+ // statepoint(i, i1)
+ // Here we want to reserve spill slot for 'i', but not for 'i+1'. If we just
+ // put handling of simple modifications in this function like it's done
+ // for bitcasts we might end up reserving i's slot for 'i+1' because order in
+ // which we visit values is unspecified.
+
+ // Don't know any information about this instruction
+ return Optional<int>();
+}
+
/// Try to find existing copies of the incoming values in stack slots used for
/// statepoint spilling. If we can find a spill slot for the incoming value,
/// mark that slot as allocated, and reuse the same slot for this safepoint.
/// This helps to avoid series of loads and stores that only serve to resuffle
/// values on the stack between calls.
-static void reservePreviousStackSlotForValue(SDValue Incoming,
+static void reservePreviousStackSlotForValue(const Value *IncomingValue,
SelectionDAGBuilder &Builder) {
+ SDValue Incoming = Builder.getValue(IncomingValue);
+
if (isa<ConstantSDNode>(Incoming) || isa<FrameIndexSDNode>(Incoming)) {
// We won't need to spill this, so no need to check for previously
// allocated stack slots
return;
}
- SDValue Loc = Builder.StatepointLowering.getLocation(Incoming);
- if (Loc.getNode()) {
+ SDValue OldLocation = Builder.StatepointLowering.getLocation(Incoming);
+ if (OldLocation.getNode())
// duplicates in input
return;
- }
-
- // Search back for the load from a stack slot pattern to find the original
- // slot we allocated for this value. We could extend this to deal with
- // simple modification patterns, but simple dealing with trivial load/store
- // sequences helps a lot already.
- if (LoadSDNode *Load = dyn_cast<LoadSDNode>(Incoming)) {
- if (auto *FI = dyn_cast<FrameIndexSDNode>(Load->getBasePtr())) {
- const int Index = FI->getIndex();
- auto Itr = std::find(Builder.FuncInfo.StatepointStackSlots.begin(),
- Builder.FuncInfo.StatepointStackSlots.end(), Index);
- if (Itr == Builder.FuncInfo.StatepointStackSlots.end()) {
- // not one of the lowering stack slots, can't reuse!
- // TODO: Actually, we probably could reuse the stack slot if the value
- // hasn't changed at all, but we'd need to look for intervening writes
- return;
- } else {
- // This is one of our dedicated lowering slots
- const int Offset =
- std::distance(Builder.FuncInfo.StatepointStackSlots.begin(), Itr);
- if (Builder.StatepointLowering.isStackSlotAllocated(Offset)) {
- // stack slot already assigned to someone else, can't use it!
- // TODO: currently we reserve space for gc arguments after doing
- // normal allocation for deopt arguments. We should reserve for
- // _all_ deopt and gc arguments, then start allocating. This
- // will prevent some moves being inserted when vm state changes,
- // but gc state doesn't between two calls.
- return;
- }
- // Reserve this stack slot
- Builder.StatepointLowering.reserveStackSlot(Offset);
- }
- // Cache this slot so we find it when going through the normal
- // assignment loop.
- SDValue Loc =
- Builder.DAG.getTargetFrameIndex(Index, Incoming.getValueType());
+ const int LookUpDepth = 6;
+ Optional<int> Index =
+ findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth);
+ if (!Index.hasValue())
+ return;
- Builder.StatepointLowering.setLocation(Incoming, Loc);
- }
+ auto Itr = std::find(Builder.FuncInfo.StatepointStackSlots.begin(),
+ Builder.FuncInfo.StatepointStackSlots.end(), *Index);
+ assert(Itr != Builder.FuncInfo.StatepointStackSlots.end() &&
+ "value spilled to the unknown stack slot");
+
+ // This is one of our dedicated lowering slots
+ const int Offset =
+ std::distance(Builder.FuncInfo.StatepointStackSlots.begin(), Itr);
+ if (Builder.StatepointLowering.isStackSlotAllocated(Offset)) {
+ // stack slot already assigned to someone else, can't use it!
+ // TODO: currently we reserve space for gc arguments after doing
+ // normal allocation for deopt arguments. We should reserve for
+ // _all_ deopt and gc arguments, then start allocating. This
+ // will prevent some moves being inserted when vm state changes,
+ // but gc state doesn't between two calls.
+ return;
}
+ // Reserve this stack slot
+ Builder.StatepointLowering.reserveStackSlot(Offset);
- // TODO: handle case where a reloaded value flows through a phi to
- // another safepoint. e.g.
- // bb1:
- // a' = relocated...
- // bb2: % pred: bb1, bb3, bb4, etc.
- // a_phi = phi(a', ...)
- // statepoint ... a_phi
- // NOTE: This will require reasoning about cross basic block values. This is
- // decidedly non trivial and this might not be the right place to do it. We
- // don't really have the information we need here...
-
- // TODO: handle simple updates. If a value is modified and the original
- // value is no longer live, it would be nice to put the modified value in the
- // same slot. This allows folding of the memory accesses for some
- // instructions types (like an increment).
- // statepoint (i)
- // i1 = i+1
- // statepoint (i1)
+ // Cache this slot so we find it when going through the normal
+ // assignment loop.
+ SDValue Loc = Builder.DAG.getTargetFrameIndex(*Index, Incoming.getValueType());
+ Builder.StatepointLowering.setLocation(Incoming, Loc);
}
/// Remove any duplicate (as SDValues) from the derived pointer pairs. This
@@ -319,8 +372,7 @@ static void getIncomingStatepointGCValues(
SmallVectorImpl<const Value *> &Bases, SmallVectorImpl<const Value *> &Ptrs,
SmallVectorImpl<const Value *> &Relocs, ImmutableStatepoint StatepointSite,
SelectionDAGBuilder &Builder) {
- for (GCRelocateOperands relocateOpers :
- StatepointSite.getRelocates(StatepointSite)) {
+ for (GCRelocateOperands relocateOpers : StatepointSite.getRelocates()) {
Relocs.push_back(relocateOpers.getUnderlyingCallSite().getInstruction());
Bases.push_back(relocateOpers.getBasePtr());
Ptrs.push_back(relocateOpers.getDerivedPtr());
@@ -458,15 +510,11 @@ static void lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
// doesn't change semantics at all. It is important for performance that we
// reserve slots for both deopt and gc values before lowering either.
for (const Value *V : StatepointSite.vm_state_args()) {
- SDValue Incoming = Builder.getValue(V);
- reservePreviousStackSlotForValue(Incoming, Builder);
+ reservePreviousStackSlotForValue(V, Builder);
}
for (unsigned i = 0; i < Bases.size(); ++i) {
- const Value *Base = Bases[i];
- reservePreviousStackSlotForValue(Builder.getValue(Base), Builder);
-
- const Value *Ptr = Ptrs[i];
- reservePreviousStackSlotForValue(Builder.getValue(Ptr), Builder);
+ reservePreviousStackSlotForValue(Bases[i], Builder);
+ reservePreviousStackSlotForValue(Ptrs[i], Builder);
}
// First, prefix the list with the number of unique values to be
@@ -524,8 +572,7 @@ static void lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
FunctionLoweringInfo::StatepointSpilledValueMapTy &SpillMap =
Builder.FuncInfo.StatepointRelocatedValues[StatepointInstr];
- for (GCRelocateOperands RelocateOpers :
- StatepointSite.getRelocates(StatepointSite)) {
+ for (GCRelocateOperands RelocateOpers : StatepointSite.getRelocates()) {
const Value *V = RelocateOpers.getDerivedPtr();
SDValue SDV = Builder.getValue(V);
SDValue Loc = Builder.StatepointLowering.getLocation(SDV);
diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 9daf2a50ad8f..c70c3a270403 100644
--- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -2671,8 +2671,9 @@ SDValue TargetLowering::BuildExactSDIV(SDValue Op1, SDValue Op2, SDLoc dl,
// TODO: For UDIV use SRL instead of SRA.
SDValue Amt =
DAG.getConstant(ShAmt, dl, getShiftAmountTy(Op1.getValueType()));
- Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt, false, false,
- true);
+ SDNodeFlags Flags;
+ Flags.setExact(true);
+ Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt, &Flags);
d = d.ashr(ShAmt);
}
diff --git a/lib/CodeGen/ShadowStackGCLowering.cpp b/lib/CodeGen/ShadowStackGCLowering.cpp
index 7c0b2bb45698..d60e5f9ba099 100644
--- a/lib/CodeGen/ShadowStackGCLowering.cpp
+++ b/lib/CodeGen/ShadowStackGCLowering.cpp
@@ -59,7 +59,7 @@ private:
Type *Ty, Value *BasePtr, int Idx1, int Idx2,
const char *Name);
};
-}
+} // namespace
INITIALIZE_PASS_BEGIN(ShadowStackGCLowering, "shadow-stack-gc-lowering",
"Shadow Stack GC Lowering", false, false)
@@ -144,10 +144,14 @@ public:
BasicBlock *CleanupBB = BasicBlock::Create(C, CleanupBBName, &F);
Type *ExnTy =
StructType::get(Type::getInt8PtrTy(C), Type::getInt32Ty(C), nullptr);
- Constant *PersFn = F.getParent()->getOrInsertFunction(
- "__gcc_personality_v0", FunctionType::get(Type::getInt32Ty(C), true));
+ if (!F.hasPersonalityFn()) {
+ Constant *PersFn = F.getParent()->getOrInsertFunction(
+ "__gcc_personality_v0",
+ FunctionType::get(Type::getInt32Ty(C), true));
+ F.setPersonalityFn(PersFn);
+ }
LandingPadInst *LPad =
- LandingPadInst::Create(ExnTy, PersFn, 1, "cleanup.lpad", CleanupBB);
+ LandingPadInst::Create(ExnTy, 1, "cleanup.lpad", CleanupBB);
LPad->setCleanup(true);
ResumeInst *RI = ResumeInst::Create(LPad, CleanupBB);
@@ -185,7 +189,7 @@ public:
}
}
};
-}
+} // namespace
Constant *ShadowStackGCLowering::GetFrameMap(Function &F) {
diff --git a/lib/CodeGen/SjLjEHPrepare.cpp b/lib/CodeGen/SjLjEHPrepare.cpp
index 42d277ebed0f..116fd5be0337 100644
--- a/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/lib/CodeGen/SjLjEHPrepare.cpp
@@ -227,7 +227,7 @@ Value *SjLjEHPrepare::setupFunctionContext(Function &F,
// Personality function
IRBuilder<> Builder(EntryBB->getTerminator());
if (!PersonalityFn)
- PersonalityFn = LPads[0]->getPersonalityFn();
+ PersonalityFn = F.getPersonalityFn();
Value *PersonalityFieldPtr = Builder.CreateConstGEP2_32(
FunctionContextTy, FuncCtx, 0, 3, "pers_fn_gep");
Builder.CreateStore(
diff --git a/lib/CodeGen/Spiller.h b/lib/CodeGen/Spiller.h
index 08f99ec78adc..b1019c1affd7 100644
--- a/lib/CodeGen/Spiller.h
+++ b/lib/CodeGen/Spiller.h
@@ -37,6 +37,6 @@ namespace llvm {
MachineFunction &mf,
VirtRegMap &vrm);
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/SplitKit.h b/lib/CodeGen/SplitKit.h
index a0627634a822..4eaf03ef2e63 100644
--- a/lib/CodeGen/SplitKit.h
+++ b/lib/CodeGen/SplitKit.h
@@ -466,6 +466,6 @@ public:
unsigned IntvOut, SlotIndex EnterAfter);
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/StatepointExampleGC.cpp b/lib/CodeGen/StatepointExampleGC.cpp
index 95dfd75018c1..b9523e55b0c3 100644
--- a/lib/CodeGen/StatepointExampleGC.cpp
+++ b/lib/CodeGen/StatepointExampleGC.cpp
@@ -45,7 +45,7 @@ public:
return (1 == PT->getAddressSpace());
}
};
-}
+} // namespace
static GCRegistry::Add<StatepointGC> X("statepoint-example",
"an example strategy for statepoint");
diff --git a/lib/CodeGen/TailDuplication.cpp b/lib/CodeGen/TailDuplication.cpp
index 23f41c8dd4bd..164badd29381 100644
--- a/lib/CodeGen/TailDuplication.cpp
+++ b/lib/CodeGen/TailDuplication.cpp
@@ -125,7 +125,7 @@ namespace {
};
char TailDuplicatePass::ID = 0;
-}
+} // namespace
char &llvm::TailDuplicateID = TailDuplicatePass::ID;
diff --git a/lib/CodeGen/TargetInstrInfo.cpp b/lib/CodeGen/TargetInstrInfo.cpp
index c809087d3da4..97ca0253d376 100644
--- a/lib/CodeGen/TargetInstrInfo.cpp
+++ b/lib/CodeGen/TargetInstrInfo.cpp
@@ -19,6 +19,7 @@
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
#include "llvm/CodeGen/StackMaps.h"
+#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInstrItineraries.h"
@@ -219,9 +220,8 @@ TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
return !isPredicated(MI);
}
-
-bool TargetInstrInfo::PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const {
+bool TargetInstrInfo::PredicateInstruction(
+ MachineInstr *MI, ArrayRef<MachineOperand> Pred) const {
bool MadeChange = false;
assert(!MI->isBundle() &&
@@ -802,9 +802,10 @@ getInstrLatency(const InstrItineraryData *ItinData,
return ItinData->getStageLatency(MI->getDesc().getSchedClass());
}
-bool TargetInstrInfo::hasLowDefLatency(const InstrItineraryData *ItinData,
+bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
const MachineInstr *DefMI,
unsigned DefIdx) const {
+ const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
if (!ItinData || ItinData->isEmpty())
return false;
diff --git a/lib/CodeGen/UnreachableBlockElim.cpp b/lib/CodeGen/UnreachableBlockElim.cpp
index d393e103104d..5c54cdbc1d5f 100644
--- a/lib/CodeGen/UnreachableBlockElim.cpp
+++ b/lib/CodeGen/UnreachableBlockElim.cpp
@@ -51,7 +51,7 @@ namespace {
AU.addPreserved<DominatorTreeWrapperPass>();
}
};
-}
+} // namespace
char UnreachableBlockElim::ID = 0;
INITIALIZE_PASS(UnreachableBlockElim, "unreachableblockelim",
"Remove unreachable blocks from the CFG", false, false)
diff --git a/lib/CodeGen/VirtRegMap.cpp b/lib/CodeGen/VirtRegMap.cpp
index 32d5100f8495..2912bdd63426 100644
--- a/lib/CodeGen/VirtRegMap.cpp
+++ b/lib/CodeGen/VirtRegMap.cpp
@@ -167,6 +167,7 @@ class VirtRegRewriter : public MachineFunctionPass {
void rewrite();
void addMBBLiveIns();
+ bool readsUndefSubreg(const MachineOperand &MO) const;
public:
static char ID;
VirtRegRewriter() : MachineFunctionPass(ID) {}
@@ -288,6 +289,31 @@ void VirtRegRewriter::addMBBLiveIns() {
MBB.sortUniqueLiveIns();
}
+/// Returns true if the given machine operand \p MO only reads undefined lanes.
+/// The function only works for use operands with a subregister set.
+bool VirtRegRewriter::readsUndefSubreg(const MachineOperand &MO) const {
+ // Shortcut if the operand is already marked undef.
+ if (MO.isUndef())
+ return true;
+
+ unsigned Reg = MO.getReg();
+ const LiveInterval &LI = LIS->getInterval(Reg);
+ const MachineInstr &MI = *MO.getParent();
+ SlotIndex BaseIndex = LIS->getInstructionIndex(&MI);
+ // This code is only meant to handle reading undefined subregisters which
+ // we couldn't properly detect before.
+ assert(LI.liveAt(BaseIndex) &&
+ "Reads of completely dead register should be marked undef already");
+ unsigned SubRegIdx = MO.getSubReg();
+ unsigned UseMask = TRI->getSubRegIndexLaneMask(SubRegIdx);
+ // See if any of the relevant subregister liveranges is defined at this point.
+ for (const LiveInterval::SubRange &SR : LI.subranges()) {
+ if ((SR.LaneMask & UseMask) != 0 && SR.liveAt(BaseIndex))
+ return false;
+ }
+ return true;
+}
+
void VirtRegRewriter::rewrite() {
bool NoSubRegLiveness = !MRI->subRegLivenessEnabled();
SmallVector<unsigned, 8> SuperDeads;
@@ -367,32 +393,51 @@ void VirtRegRewriter::rewrite() {
assert(!MRI->isReserved(PhysReg) && "Reserved register assignment");
// Preserve semantics of sub-register operands.
- if (MO.getSubReg()) {
- // A virtual register kill refers to the whole register, so we may
- // have to add <imp-use,kill> operands for the super-register. A
- // partial redef always kills and redefines the super-register.
- if (NoSubRegLiveness && MO.readsReg()
- && (MO.isDef() || MO.isKill()))
- SuperKills.push_back(PhysReg);
-
- if (MO.isDef()) {
- // The <def,undef> flag only makes sense for sub-register defs, and
- // we are substituting a full physreg. An <imp-use,kill> operand
- // from the SuperKills list will represent the partial read of the
- // super-register.
- MO.setIsUndef(false);
-
- // Also add implicit defs for the super-register.
- if (NoSubRegLiveness) {
+ unsigned SubReg = MO.getSubReg();
+ if (SubReg != 0) {
+ if (NoSubRegLiveness) {
+ // A virtual register kill refers to the whole register, so we may
+ // have to add <imp-use,kill> operands for the super-register. A
+ // partial redef always kills and redefines the super-register.
+ if (MO.readsReg() && (MO.isDef() || MO.isKill()))
+ SuperKills.push_back(PhysReg);
+
+ if (MO.isDef()) {
+ // Also add implicit defs for the super-register.
if (MO.isDead())
SuperDeads.push_back(PhysReg);
else
SuperDefs.push_back(PhysReg);
}
+ } else {
+ if (MO.isUse()) {
+ if (readsUndefSubreg(MO))
+ // We need to add an <undef> flag if the subregister is
+ // completely undefined (and we are not adding super-register
+ // defs).
+ MO.setIsUndef(true);
+ } else if (!MO.isDead()) {
+ assert(MO.isDef());
+ // Things get tricky when we ran out of lane mask bits and
+ // merged multiple lanes into the overflow bit: In this case
+ // our subregister liveness tracking isn't precise and we can't
+ // know what subregister parts are undefined, fall back to the
+ // implicit super-register def then.
+ unsigned LaneMask = TRI->getSubRegIndexLaneMask(SubReg);
+ if (TargetRegisterInfo::isImpreciseLaneMask(LaneMask))
+ SuperDefs.push_back(PhysReg);
+ }
}
+ // The <def,undef> flag only makes sense for sub-register defs, and
+ // we are substituting a full physreg. An <imp-use,kill> operand
+ // from the SuperKills list will represent the partial read of the
+ // super-register.
+ if (MO.isDef())
+ MO.setIsUndef(false);
+
// PhysReg operands cannot have subregister indexes.
- PhysReg = TRI->getSubReg(PhysReg, MO.getSubReg());
+ PhysReg = TRI->getSubReg(PhysReg, SubReg);
assert(PhysReg && "Invalid SubReg for physical register");
MO.setSubReg(0);
}
diff --git a/lib/CodeGen/WinEHPrepare.cpp b/lib/CodeGen/WinEHPrepare.cpp
index c2b3d84ca363..8c932cfc6b37 100644
--- a/lib/CodeGen/WinEHPrepare.cpp
+++ b/lib/CodeGen/WinEHPrepare.cpp
@@ -24,6 +24,7 @@
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Analysis/LibCallSemantics.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
@@ -110,7 +111,7 @@ private:
bool outlineHandler(ActionHandler *Action, Function *SrcFn,
LandingPadInst *LPad, BasicBlock *StartBB,
FrameVarInfoMap &VarInfo);
- void addStubInvokeToHandlerIfNeeded(Function *Handler, Value *PersonalityFn);
+ void addStubInvokeToHandlerIfNeeded(Function *Handler);
void mapLandingPadBlocks(LandingPadInst *LPad, LandingPadActions &Actions);
CatchHandler *findCatchHandler(BasicBlock *BB, BasicBlock *&NextBB,
@@ -124,6 +125,7 @@ private:
// All fields are reset by runOnFunction.
DominatorTree *DT = nullptr;
+ const TargetLibraryInfo *LibInfo = nullptr;
EHPersonality Personality = EHPersonality::Unknown;
CatchHandlerMapTy CatchHandlerMap;
CleanupHandlerMapTy CleanupHandlerMap;
@@ -377,13 +379,14 @@ bool WinEHPrepare::runOnFunction(Function &Fn) {
return false;
// Classify the personality to see what kind of preparation we need.
- Personality = classifyEHPersonality(LPads.back()->getPersonalityFn());
+ Personality = classifyEHPersonality(Fn.getPersonalityFn());
// Do nothing if this is not an MSVC personality.
if (!isMSVCEHPersonality(Personality))
return false;
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
// If there were any landing pads, prepareExceptionHandlers will make changes.
prepareExceptionHandlers(Fn, LPads);
@@ -394,6 +397,7 @@ bool WinEHPrepare::doFinalization(Module &M) { return false; }
void WinEHPrepare::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
}
static bool isSelectorDispatch(BasicBlock *BB, BasicBlock *&CatchHandler,
@@ -1016,10 +1020,17 @@ bool WinEHPrepare::prepareExceptionHandlers(
Builder.CreateCall(FrameEscapeFn, AllocasToEscape);
if (SEHExceptionCodeSlot) {
- if (SEHExceptionCodeSlot->hasNUses(0))
- SEHExceptionCodeSlot->eraseFromParent();
- else if (isAllocaPromotable(SEHExceptionCodeSlot))
+ if (isAllocaPromotable(SEHExceptionCodeSlot)) {
+ SmallPtrSet<BasicBlock *, 4> UserBlocks;
+ for (User *U : SEHExceptionCodeSlot->users()) {
+ if (auto *Inst = dyn_cast<Instruction>(U))
+ UserBlocks.insert(Inst->getParent());
+ }
PromoteMemToReg(SEHExceptionCodeSlot, *DT);
+ // After the promotion, kill off dead instructions.
+ for (BasicBlock *BB : UserBlocks)
+ SimplifyInstructionsInBlock(BB, LibInfo);
+ }
}
// Clean up the handler action maps we created for this function
@@ -1029,6 +1040,7 @@ bool WinEHPrepare::prepareExceptionHandlers(
CleanupHandlerMap.clear();
HandlerToParentFP.clear();
DT = nullptr;
+ LibInfo = nullptr;
SEHExceptionCodeSlot = nullptr;
EHBlocks.clear();
NormalBlocks.clear();
@@ -1143,7 +1155,6 @@ void WinEHPrepare::completeNestedLandingPad(Function *ParentFn,
++II;
// The instruction after the landing pad should now be a call to eh.actions.
const Instruction *Recover = II;
- assert(match(Recover, m_Intrinsic<Intrinsic::eh_actions>()));
const IntrinsicInst *EHActions = cast<IntrinsicInst>(Recover);
// Remap the return target in the nested handler.
@@ -1254,8 +1265,7 @@ static bool isCatchBlock(BasicBlock *BB) {
return false;
}
-static BasicBlock *createStubLandingPad(Function *Handler,
- Value *PersonalityFn) {
+static BasicBlock *createStubLandingPad(Function *Handler) {
// FIXME: Finish this!
LLVMContext &Context = Handler->getContext();
BasicBlock *StubBB = BasicBlock::Create(Context, "stub");
@@ -1264,7 +1274,7 @@ static BasicBlock *createStubLandingPad(Function *Handler,
LandingPadInst *LPad = Builder.CreateLandingPad(
llvm::StructType::get(Type::getInt8PtrTy(Context),
Type::getInt32Ty(Context), nullptr),
- PersonalityFn, 0);
+ 0);
// Insert a call to llvm.eh.actions so that we don't try to outline this lpad.
Function *ActionIntrin =
Intrinsic::getDeclaration(Handler->getParent(), Intrinsic::eh_actions);
@@ -1279,8 +1289,7 @@ static BasicBlock *createStubLandingPad(Function *Handler,
// landing pad if none is found. The code that generates the .xdata tables for
// the handler needs at least one landing pad to identify the parent function's
// personality.
-void WinEHPrepare::addStubInvokeToHandlerIfNeeded(Function *Handler,
- Value *PersonalityFn) {
+void WinEHPrepare::addStubInvokeToHandlerIfNeeded(Function *Handler) {
ReturnInst *Ret = nullptr;
UnreachableInst *Unreached = nullptr;
for (BasicBlock &BB : *Handler) {
@@ -1312,7 +1321,7 @@ void WinEHPrepare::addStubInvokeToHandlerIfNeeded(Function *Handler,
// parent block. We want to replace that with an invoke call, so we can
// erase it now.
OldRetBB->getTerminator()->eraseFromParent();
- BasicBlock *StubLandingPad = createStubLandingPad(Handler, PersonalityFn);
+ BasicBlock *StubLandingPad = createStubLandingPad(Handler);
Function *F =
Intrinsic::getDeclaration(Handler->getParent(), Intrinsic::donothing);
InvokeInst::Create(F, NewRetBB, StubLandingPad, None, "", OldRetBB);
@@ -1368,6 +1377,7 @@ bool WinEHPrepare::outlineHandler(ActionHandler *Action, Function *SrcFn,
Handler = createHandlerFunc(Type::getVoidTy(Context),
SrcFn->getName() + ".cleanup", M, ParentFP);
}
+ Handler->setPersonalityFn(SrcFn->getPersonalityFn());
HandlerToParentFP[Handler] = ParentFP;
Handler->addFnAttr("wineh-parent", SrcFn->getName());
BasicBlock *Entry = &Handler->getEntryBlock();
@@ -1445,7 +1455,7 @@ bool WinEHPrepare::outlineHandler(ActionHandler *Action, Function *SrcFn,
ClonedEntryBB->eraseFromParent();
// Make sure we can identify the handler's personality later.
- addStubInvokeToHandlerIfNeeded(Handler, LPad->getPersonalityFn());
+ addStubInvokeToHandlerIfNeeded(Handler);
if (auto *CatchAction = dyn_cast<CatchHandler>(Action)) {
WinEHCatchDirector *CatchDirector =
@@ -2286,7 +2296,7 @@ void WinEHPrepare::findCleanupHandlers(LandingPadActions &Actions,
// value for this block but the value is a nullptr. This means that
// we have previously analyzed the block and determined that it did
// not contain any cleanup code. Based on the earlier analysis, we
- // know the the block must end in either an unconditional branch, a
+ // know the block must end in either an unconditional branch, a
// resume or a conditional branch that is predicated on a comparison
// with a selector. Either the resume or the selector dispatch
// would terminate the search for cleanup code, so the unconditional
@@ -2454,6 +2464,8 @@ void WinEHPrepare::findCleanupHandlers(LandingPadActions &Actions,
void llvm::parseEHActions(
const IntrinsicInst *II,
SmallVectorImpl<std::unique_ptr<ActionHandler>> &Actions) {
+ assert(II->getIntrinsicID() == Intrinsic::eh_actions &&
+ "attempted to parse non eh.actions intrinsic");
for (unsigned I = 0, E = II->getNumArgOperands(); I != E;) {
uint64_t ActionKind =
cast<ConstantInt>(II->getArgOperand(I))->getZExtValue();
@@ -2506,7 +2518,7 @@ struct WinEHNumbering {
void calculateStateNumbers(const Function &F);
void findActionRootLPads(const Function &F);
};
-}
+} // namespace
void WinEHNumbering::createUnwindMapEntry(int ToState, ActionHandler *AH) {
WinEHUnwindMapEntry UME;
@@ -2766,7 +2778,6 @@ void WinEHNumbering::calculateStateNumbers(const Function &F) {
auto *ActionsCall = dyn_cast<IntrinsicInst>(LPI->getNextNode());
if (!ActionsCall)
continue;
- assert(ActionsCall->getIntrinsicID() == Intrinsic::eh_actions);
parseEHActions(ActionsCall, ActionList);
if (ActionList.empty())
continue;