aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/CodeGen/RegAllocFast.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/lib/CodeGen/RegAllocFast.cpp')
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/RegAllocFast.cpp1486
1 files changed, 843 insertions, 643 deletions
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/RegAllocFast.cpp b/contrib/llvm-project/llvm/lib/CodeGen/RegAllocFast.cpp
index cf3eaba23bee..6e548d4a93c8 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/RegAllocFast.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/RegAllocFast.cpp
@@ -56,6 +56,10 @@ STATISTIC(NumStores, "Number of stores added");
STATISTIC(NumLoads , "Number of loads added");
STATISTIC(NumCoalesced, "Number of copies coalesced");
+// FIXME: Remove this switch when all testcases are fixed!
+static cl::opt<bool> IgnoreMissingDefs("rafast-ignore-missing-defs",
+ cl::Hidden);
+
static RegisterRegAlloc
fastRegAlloc("fast", "fast register allocator", createFastRegisterAllocator);
@@ -85,8 +89,9 @@ namespace {
MachineInstr *LastUse = nullptr; ///< Last instr to use reg.
Register VirtReg; ///< Virtual register number.
MCPhysReg PhysReg = 0; ///< Currently held here.
- unsigned short LastOpNum = 0; ///< OpNum on LastUse.
- bool Dirty = false; ///< Register needs spill.
+ bool LiveOut = false; ///< Register is possibly live out.
+ bool Reloaded = false; ///< Register was reloaded.
+ bool Error = false; ///< Could not allocate.
explicit LiveReg(Register VirtReg) : VirtReg(VirtReg) {}
@@ -100,44 +105,51 @@ namespace {
/// available in a physical register.
LiveRegMap LiveVirtRegs;
+ /// Stores assigned virtual registers present in the bundle MI.
+ DenseMap<Register, MCPhysReg> BundleVirtRegsMap;
+
DenseMap<unsigned, SmallVector<MachineInstr *, 2>> LiveDbgValueMap;
+ /// List of DBG_VALUE that we encountered without the vreg being assigned
+ /// because they were placed after the last use of the vreg.
+ DenseMap<unsigned, SmallVector<MachineInstr *, 1>> DanglingDbgValues;
/// Has a bit set for every virtual register for which it was determined
/// that it is alive across blocks.
BitVector MayLiveAcrossBlocks;
- /// State of a physical register.
- enum RegState {
- /// A disabled register is not available for allocation, but an alias may
- /// be in use. A register can only be moved out of the disabled state if
- /// all aliases are disabled.
- regDisabled,
-
+ /// State of a register unit.
+ enum RegUnitState {
/// A free register is not currently in use and can be allocated
/// immediately without checking aliases.
regFree,
- /// A reserved register has been assigned explicitly (e.g., setting up a
- /// call parameter), and it remains reserved until it is used.
- regReserved
+ /// A pre-assigned register has been assigned before register allocation
+ /// (e.g., setting up a call parameter).
+ regPreAssigned,
+
+ /// Used temporarily in reloadAtBegin() to mark register units that are
+ /// live-in to the basic block.
+ regLiveIn,
/// A register state may also be a virtual register number, indication
/// that the physical register is currently allocated to a virtual
/// register. In that case, LiveVirtRegs contains the inverse mapping.
};
- /// Maps each physical register to a RegState enum or a virtual register.
- std::vector<unsigned> PhysRegState;
+ /// Maps each physical register to a RegUnitState enum or virtual register.
+ std::vector<unsigned> RegUnitStates;
- SmallVector<Register, 16> VirtDead;
SmallVector<MachineInstr *, 32> Coalesced;
using RegUnitSet = SparseSet<uint16_t, identity<uint16_t>>;
/// Set of register units that are used in the current instruction, and so
/// cannot be allocated.
RegUnitSet UsedInInstr;
+ RegUnitSet PhysRegUses;
+ SmallVector<uint16_t, 8> DefOperandIndexes;
void setPhysRegState(MCPhysReg PhysReg, unsigned NewState);
+ bool isPhysRegFree(MCPhysReg PhysReg) const;
/// Mark a physreg as used in this instruction.
void markRegUsedInInstr(MCPhysReg PhysReg) {
@@ -146,13 +158,29 @@ namespace {
}
/// Check if a physreg or any of its aliases are used in this instruction.
- bool isRegUsedInInstr(MCPhysReg PhysReg) const {
- for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units)
+ bool isRegUsedInInstr(MCPhysReg PhysReg, bool LookAtPhysRegUses) const {
+ for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
if (UsedInInstr.count(*Units))
return true;
+ if (LookAtPhysRegUses && PhysRegUses.count(*Units))
+ return true;
+ }
return false;
}
+ /// Mark physical register as being used in a register use operand.
+ /// This is only used by the special livethrough handling code.
+ void markPhysRegUsedInInstr(MCPhysReg PhysReg) {
+ for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units)
+ PhysRegUses.insert(*Units);
+ }
+
+ /// Remove mark of physical register being used in the instruction.
+ void unmarkRegUsedInInstr(MCPhysReg PhysReg) {
+ for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units)
+ UsedInInstr.erase(*Units);
+ }
+
enum : unsigned {
spillClean = 50,
spillDirty = 100,
@@ -178,27 +206,29 @@ namespace {
MachineFunctionProperties::Property::NoVRegs);
}
+ MachineFunctionProperties getClearedProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::IsSSA);
+ }
+
private:
bool runOnMachineFunction(MachineFunction &MF) override;
void allocateBasicBlock(MachineBasicBlock &MBB);
+
+ void addRegClassDefCounts(std::vector<unsigned> &RegClassDefCounts,
+ Register Reg) const;
+
void allocateInstruction(MachineInstr &MI);
void handleDebugValue(MachineInstr &MI);
- void handleThroughOperands(MachineInstr &MI,
- SmallVectorImpl<Register> &VirtDead);
- bool isLastUseOfLocalReg(const MachineOperand &MO) const;
-
- void addKillFlag(const LiveReg &LRI);
- void killVirtReg(LiveReg &LR);
- void killVirtReg(Register VirtReg);
- void spillVirtReg(MachineBasicBlock::iterator MI, LiveReg &LR);
- void spillVirtReg(MachineBasicBlock::iterator MI, Register VirtReg);
-
- void usePhysReg(MachineOperand &MO);
- void definePhysReg(MachineBasicBlock::iterator MI, MCPhysReg PhysReg,
- RegState NewState);
+ void handleBundle(MachineInstr &MI);
+
+ bool usePhysReg(MachineInstr &MI, MCPhysReg PhysReg);
+ bool definePhysReg(MachineInstr &MI, MCPhysReg PhysReg);
+ bool displacePhysReg(MachineInstr &MI, MCPhysReg PhysReg);
+ void freePhysReg(MCPhysReg PhysReg);
+
unsigned calcSpillCost(MCPhysReg PhysReg) const;
- void assignVirtToPhysReg(LiveReg &, MCPhysReg PhysReg);
LiveRegMap::iterator findLiveVirtReg(Register VirtReg) {
return LiveVirtRegs.find(Register::virtReg2Index(VirtReg));
@@ -208,28 +238,38 @@ namespace {
return LiveVirtRegs.find(Register::virtReg2Index(VirtReg));
}
- void allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint);
+ void assignVirtToPhysReg(MachineInstr &MI, LiveReg &, MCPhysReg PhysReg);
+ void allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint,
+ bool LookAtPhysRegUses = false);
void allocVirtRegUndef(MachineOperand &MO);
- MCPhysReg defineVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg,
- Register Hint);
- LiveReg &reloadVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg,
- Register Hint);
- void spillAll(MachineBasicBlock::iterator MI, bool OnlyLiveOut);
- bool setPhysReg(MachineInstr &MI, MachineOperand &MO, MCPhysReg PhysReg);
+ void assignDanglingDebugValues(MachineInstr &Def, Register VirtReg,
+ MCPhysReg Reg);
+ void defineLiveThroughVirtReg(MachineInstr &MI, unsigned OpNum,
+ Register VirtReg);
+ void defineVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg,
+ bool LookAtPhysRegUses = false);
+ void useVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg);
+
+ MachineBasicBlock::iterator
+ getMBBBeginInsertionPoint(MachineBasicBlock &MBB,
+ SmallSet<Register, 2> &PrologLiveIns) const;
+
+ void reloadAtBegin(MachineBasicBlock &MBB);
+ void setPhysReg(MachineInstr &MI, MachineOperand &MO, MCPhysReg PhysReg);
Register traceCopies(Register VirtReg) const;
Register traceCopyChain(Register Reg) const;
int getStackSpaceFor(Register VirtReg);
void spill(MachineBasicBlock::iterator Before, Register VirtReg,
- MCPhysReg AssignedReg, bool Kill);
+ MCPhysReg AssignedReg, bool Kill, bool LiveOut);
void reload(MachineBasicBlock::iterator Before, Register VirtReg,
MCPhysReg PhysReg);
bool mayLiveOut(Register VirtReg);
bool mayLiveIn(Register VirtReg);
- void dumpState();
+ void dumpState() const;
};
} // end anonymous namespace
@@ -240,7 +280,16 @@ INITIALIZE_PASS(RegAllocFast, "regallocfast", "Fast Register Allocator", false,
false)
void RegAllocFast::setPhysRegState(MCPhysReg PhysReg, unsigned NewState) {
- PhysRegState[PhysReg] = NewState;
+ for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI)
+ RegUnitStates[*UI] = NewState;
+}
+
+bool RegAllocFast::isPhysRegFree(MCPhysReg PhysReg) const {
+ for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) {
+ if (RegUnitStates[*UI] != regFree)
+ return false;
+ }
+ return true;
}
/// This allocates space for the specified virtual register to be held on the
@@ -263,6 +312,20 @@ int RegAllocFast::getStackSpaceFor(Register VirtReg) {
return FrameIdx;
}
+static bool dominates(MachineBasicBlock &MBB,
+ MachineBasicBlock::const_iterator A,
+ MachineBasicBlock::const_iterator B) {
+ auto MBBEnd = MBB.end();
+ if (B == MBBEnd)
+ return true;
+
+ MachineBasicBlock::const_iterator I = MBB.begin();
+ for (; &*I != A && &*I != B; ++I)
+ ;
+
+ return &*I == A;
+}
+
/// Returns false if \p VirtReg is known to not live out of the current block.
bool RegAllocFast::mayLiveOut(Register VirtReg) {
if (MayLiveAcrossBlocks.test(Register::virtReg2Index(VirtReg))) {
@@ -270,23 +333,38 @@ bool RegAllocFast::mayLiveOut(Register VirtReg) {
return !MBB->succ_empty();
}
- // If this block loops back to itself, it would be necessary to check whether
- // the use comes after the def.
+ const MachineInstr *SelfLoopDef = nullptr;
+
+ // If this block loops back to itself, it is necessary to check whether the
+ // use comes after the def.
if (MBB->isSuccessor(MBB)) {
- MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
- return true;
+ SelfLoopDef = MRI->getUniqueVRegDef(VirtReg);
+ if (!SelfLoopDef) {
+ MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
+ return true;
+ }
}
// See if the first \p Limit uses of the register are all in the current
// block.
static const unsigned Limit = 8;
unsigned C = 0;
- for (const MachineInstr &UseInst : MRI->reg_nodbg_instructions(VirtReg)) {
+ for (const MachineInstr &UseInst : MRI->use_nodbg_instructions(VirtReg)) {
if (UseInst.getParent() != MBB || ++C >= Limit) {
MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
// Cannot be live-out if there are no successors.
return !MBB->succ_empty();
}
+
+ if (SelfLoopDef) {
+ // Try to handle some simple cases to avoid spilling and reloading every
+ // value inside a self looping block.
+ if (SelfLoopDef == &UseInst ||
+ !dominates(*MBB, SelfLoopDef->getIterator(), UseInst.getIterator())) {
+ MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg));
+ return true;
+ }
+ }
}
return false;
@@ -313,7 +391,7 @@ bool RegAllocFast::mayLiveIn(Register VirtReg) {
/// Insert spill instruction for \p AssignedReg before \p Before. Update
/// DBG_VALUEs with \p VirtReg operands with the stack slot.
void RegAllocFast::spill(MachineBasicBlock::iterator Before, Register VirtReg,
- MCPhysReg AssignedReg, bool Kill) {
+ MCPhysReg AssignedReg, bool Kill, bool LiveOut) {
LLVM_DEBUG(dbgs() << "Spilling " << printReg(VirtReg, TRI)
<< " in " << printReg(AssignedReg, TRI));
int FI = getStackSpaceFor(VirtReg);
@@ -323,15 +401,32 @@ void RegAllocFast::spill(MachineBasicBlock::iterator Before, Register VirtReg,
TII->storeRegToStackSlot(*MBB, Before, AssignedReg, Kill, FI, &RC, TRI);
++NumStores;
- // If this register is used by DBG_VALUE then insert new DBG_VALUE to
- // identify spilled location as the place to find corresponding variable's
- // value.
+ MachineBasicBlock::iterator FirstTerm = MBB->getFirstTerminator();
+
+ // When we spill a virtual register, we will have spill instructions behind
+ // every definition of it, meaning we can switch all the DBG_VALUEs over
+ // to just reference the stack slot.
SmallVectorImpl<MachineInstr *> &LRIDbgValues = LiveDbgValueMap[VirtReg];
for (MachineInstr *DBG : LRIDbgValues) {
MachineInstr *NewDV = buildDbgValueForSpill(*MBB, Before, *DBG, FI);
assert(NewDV->getParent() == MBB && "dangling parent pointer");
(void)NewDV;
LLVM_DEBUG(dbgs() << "Inserting debug info due to spill:\n" << *NewDV);
+
+ if (LiveOut) {
+ // We need to insert a DBG_VALUE at the end of the block if the spill slot
+ // is live out, but there is another use of the value after the
+ // spill. This will allow LiveDebugValues to see the correct live out
+ // value to propagate to the successors.
+ MachineInstr *ClonedDV = MBB->getParent()->CloneMachineInstr(NewDV);
+ MBB->insert(FirstTerm, ClonedDV);
+ LLVM_DEBUG(dbgs() << "Cloning debug info due to live out spill\n");
+ }
+
+ // Rewrite unassigned dbg_values to use the stack slot.
+ MachineOperand &MO = DBG->getOperand(0);
+ if (MO.isReg() && MO.getReg() == 0)
+ updateDbgValueForSpill(*DBG, FI);
}
// Now this register is spilled there is should not be any DBG_VALUE
// pointing to this register because they are all pointing to spilled value
@@ -350,100 +445,75 @@ void RegAllocFast::reload(MachineBasicBlock::iterator Before, Register VirtReg,
++NumLoads;
}
-/// Return true if MO is the only remaining reference to its virtual register,
-/// and it is guaranteed to be a block-local register.
-bool RegAllocFast::isLastUseOfLocalReg(const MachineOperand &MO) const {
- // If the register has ever been spilled or reloaded, we conservatively assume
- // it is a global register used in multiple blocks.
- if (StackSlotForVirtReg[MO.getReg()] != -1)
- return false;
-
- // Check that the use/def chain has exactly one operand - MO.
- MachineRegisterInfo::reg_nodbg_iterator I = MRI->reg_nodbg_begin(MO.getReg());
- if (&*I != &MO)
- return false;
- return ++I == MRI->reg_nodbg_end();
-}
+/// Get basic block begin insertion point.
+/// This is not just MBB.begin() because surprisingly we have EH_LABEL
+/// instructions marking the begin of a basic block. This means we must insert
+/// new instructions after such labels...
+MachineBasicBlock::iterator
+RegAllocFast::getMBBBeginInsertionPoint(
+ MachineBasicBlock &MBB, SmallSet<Register, 2> &PrologLiveIns) const {
+ MachineBasicBlock::iterator I = MBB.begin();
+ while (I != MBB.end()) {
+ if (I->isLabel()) {
+ ++I;
+ continue;
+ }
-/// Set kill flags on last use of a virtual register.
-void RegAllocFast::addKillFlag(const LiveReg &LR) {
- if (!LR.LastUse) return;
- MachineOperand &MO = LR.LastUse->getOperand(LR.LastOpNum);
- if (MO.isUse() && !LR.LastUse->isRegTiedToDefOperand(LR.LastOpNum)) {
- if (MO.getReg() == LR.PhysReg)
- MO.setIsKill();
- // else, don't do anything we are problably redefining a
- // subreg of this register and given we don't track which
- // lanes are actually dead, we cannot insert a kill flag here.
- // Otherwise we may end up in a situation like this:
- // ... = (MO) physreg:sub1, implicit killed physreg
- // ... <== Here we would allow later pass to reuse physreg:sub1
- // which is potentially wrong.
- // LR:sub0 = ...
- // ... = LR.sub1 <== This is going to use physreg:sub1
- }
-}
+ // Most reloads should be inserted after prolog instructions.
+ if (!TII->isBasicBlockPrologue(*I))
+ break;
-/// Mark virtreg as no longer available.
-void RegAllocFast::killVirtReg(LiveReg &LR) {
- addKillFlag(LR);
- assert(PhysRegState[LR.PhysReg] == LR.VirtReg &&
- "Broken RegState mapping");
- setPhysRegState(LR.PhysReg, regFree);
- LR.PhysReg = 0;
-}
+ // However if a prolog instruction reads a register that needs to be
+ // reloaded, the reload should be inserted before the prolog.
+ for (MachineOperand &MO : I->operands()) {
+ if (MO.isReg())
+ PrologLiveIns.insert(MO.getReg());
+ }
-/// Mark virtreg as no longer available.
-void RegAllocFast::killVirtReg(Register VirtReg) {
- assert(Register::isVirtualRegister(VirtReg) &&
- "killVirtReg needs a virtual register");
- LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
- if (LRI != LiveVirtRegs.end() && LRI->PhysReg)
- killVirtReg(*LRI);
-}
+ ++I;
+ }
-/// This method spills the value specified by VirtReg into the corresponding
-/// stack slot if needed.
-void RegAllocFast::spillVirtReg(MachineBasicBlock::iterator MI,
- Register VirtReg) {
- assert(Register::isVirtualRegister(VirtReg) &&
- "Spilling a physical register is illegal!");
- LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
- assert(LRI != LiveVirtRegs.end() && LRI->PhysReg &&
- "Spilling unmapped virtual register");
- spillVirtReg(MI, *LRI);
+ return I;
}
-/// Do the actual work of spilling.
-void RegAllocFast::spillVirtReg(MachineBasicBlock::iterator MI, LiveReg &LR) {
- assert(PhysRegState[LR.PhysReg] == LR.VirtReg && "Broken RegState mapping");
+/// Reload all currently assigned virtual registers.
+void RegAllocFast::reloadAtBegin(MachineBasicBlock &MBB) {
+ if (LiveVirtRegs.empty())
+ return;
- if (LR.Dirty) {
- // If this physreg is used by the instruction, we want to kill it on the
- // instruction, not on the spill.
- bool SpillKill = MachineBasicBlock::iterator(LR.LastUse) != MI;
- LR.Dirty = false;
+ for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) {
+ MCPhysReg Reg = P.PhysReg;
+ // Set state to live-in. This possibly overrides mappings to virtual
+ // registers but we don't care anymore at this point.
+ setPhysRegState(Reg, regLiveIn);
+ }
- spill(MI, LR.VirtReg, LR.PhysReg, SpillKill);
- if (SpillKill)
- LR.LastUse = nullptr; // Don't kill register again
- }
- killVirtReg(LR);
-}
+ SmallSet<Register, 2> PrologLiveIns;
-/// Spill all dirty virtregs without killing them.
-void RegAllocFast::spillAll(MachineBasicBlock::iterator MI, bool OnlyLiveOut) {
- if (LiveVirtRegs.empty())
- return;
// The LiveRegMap is keyed by an unsigned (the virtreg number), so the order
// of spilling here is deterministic, if arbitrary.
- for (LiveReg &LR : LiveVirtRegs) {
- if (!LR.PhysReg)
+ MachineBasicBlock::iterator InsertBefore
+ = getMBBBeginInsertionPoint(MBB, PrologLiveIns);
+ for (const LiveReg &LR : LiveVirtRegs) {
+ MCPhysReg PhysReg = LR.PhysReg;
+ if (PhysReg == 0)
continue;
- if (OnlyLiveOut && !mayLiveOut(LR.VirtReg))
+
+ MCRegister FirstUnit = *MCRegUnitIterator(PhysReg, TRI);
+ if (RegUnitStates[FirstUnit] == regLiveIn)
continue;
- spillVirtReg(MI, LR);
+
+ assert((&MBB != &MBB.getParent()->front() || IgnoreMissingDefs) &&
+ "no reload in start block. Missing vreg def?");
+
+ if (PrologLiveIns.count(PhysReg)) {
+ // FIXME: Theoretically this should use an insert point skipping labels
+ // but I'm not sure how labels should interact with prolog instruction
+ // that need reloads.
+ reload(MBB.begin(), LR.VirtReg, PhysReg);
+ } else
+ reload(InsertBefore, LR.VirtReg, PhysReg);
}
LiveVirtRegs.clear();
}
@@ -451,105 +521,73 @@ void RegAllocFast::spillAll(MachineBasicBlock::iterator MI, bool OnlyLiveOut) {
/// Handle the direct use of a physical register. Check that the register is
/// not used by a virtreg. Kill the physreg, marking it free. This may add
/// implicit kills to MO->getParent() and invalidate MO.
-void RegAllocFast::usePhysReg(MachineOperand &MO) {
- // Ignore undef uses.
- if (MO.isUndef())
- return;
+bool RegAllocFast::usePhysReg(MachineInstr &MI, MCPhysReg Reg) {
+ assert(Register::isPhysicalRegister(Reg) && "expected physreg");
+ bool displacedAny = displacePhysReg(MI, Reg);
+ setPhysRegState(Reg, regPreAssigned);
+ markRegUsedInInstr(Reg);
+ return displacedAny;
+}
- Register PhysReg = MO.getReg();
- assert(PhysReg.isPhysical() && "Bad usePhysReg operand");
+bool RegAllocFast::definePhysReg(MachineInstr &MI, MCPhysReg Reg) {
+ bool displacedAny = displacePhysReg(MI, Reg);
+ setPhysRegState(Reg, regPreAssigned);
+ return displacedAny;
+}
- markRegUsedInInstr(PhysReg);
- switch (PhysRegState[PhysReg]) {
- case regDisabled:
- break;
- case regReserved:
- PhysRegState[PhysReg] = regFree;
- LLVM_FALLTHROUGH;
- case regFree:
- MO.setIsKill();
- return;
- default:
- // The physreg was allocated to a virtual register. That means the value we
- // wanted has been clobbered.
- llvm_unreachable("Instruction uses an allocated register");
- }
+/// Mark PhysReg as reserved or free after spilling any virtregs. This is very
+/// similar to defineVirtReg except the physreg is reserved instead of
+/// allocated.
+bool RegAllocFast::displacePhysReg(MachineInstr &MI, MCPhysReg PhysReg) {
+ bool displacedAny = false;
- // Maybe a superregister is reserved?
- for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) {
- MCPhysReg Alias = *AI;
- switch (PhysRegState[Alias]) {
- case regDisabled:
+ for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) {
+ unsigned Unit = *UI;
+ switch (unsigned VirtReg = RegUnitStates[Unit]) {
+ default: {
+ LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
+ assert(LRI != LiveVirtRegs.end() && "datastructures in sync");
+ MachineBasicBlock::iterator ReloadBefore =
+ std::next((MachineBasicBlock::iterator)MI.getIterator());
+ reload(ReloadBefore, VirtReg, LRI->PhysReg);
+
+ setPhysRegState(LRI->PhysReg, regFree);
+ LRI->PhysReg = 0;
+ LRI->Reloaded = true;
+ displacedAny = true;
+ break;
+ }
+ case regPreAssigned:
+ RegUnitStates[Unit] = regFree;
+ displacedAny = true;
break;
- case regReserved:
- // Either PhysReg is a subregister of Alias and we mark the
- // whole register as free, or PhysReg is the superregister of
- // Alias and we mark all the aliases as disabled before freeing
- // PhysReg.
- // In the latter case, since PhysReg was disabled, this means that
- // its value is defined only by physical sub-registers. This check
- // is performed by the assert of the default case in this loop.
- // Note: The value of the superregister may only be partial
- // defined, that is why regDisabled is a valid state for aliases.
- assert((TRI->isSuperRegister(PhysReg, Alias) ||
- TRI->isSuperRegister(Alias, PhysReg)) &&
- "Instruction is not using a subregister of a reserved register");
- LLVM_FALLTHROUGH;
case regFree:
- if (TRI->isSuperRegister(PhysReg, Alias)) {
- // Leave the superregister in the working set.
- setPhysRegState(Alias, regFree);
- MO.getParent()->addRegisterKilled(Alias, TRI, true);
- return;
- }
- // Some other alias was in the working set - clear it.
- setPhysRegState(Alias, regDisabled);
break;
- default:
- llvm_unreachable("Instruction uses an alias of an allocated register");
}
}
-
- // All aliases are disabled, bring register into working set.
- setPhysRegState(PhysReg, regFree);
- MO.setIsKill();
+ return displacedAny;
}
-/// Mark PhysReg as reserved or free after spilling any virtregs. This is very
-/// similar to defineVirtReg except the physreg is reserved instead of
-/// allocated.
-void RegAllocFast::definePhysReg(MachineBasicBlock::iterator MI,
- MCPhysReg PhysReg, RegState NewState) {
- markRegUsedInInstr(PhysReg);
- switch (Register VirtReg = PhysRegState[PhysReg]) {
- case regDisabled:
- break;
- default:
- spillVirtReg(MI, VirtReg);
- LLVM_FALLTHROUGH;
+void RegAllocFast::freePhysReg(MCPhysReg PhysReg) {
+ LLVM_DEBUG(dbgs() << "Freeing " << printReg(PhysReg, TRI) << ':');
+
+ MCRegister FirstUnit = *MCRegUnitIterator(PhysReg, TRI);
+ switch (unsigned VirtReg = RegUnitStates[FirstUnit]) {
case regFree:
- case regReserved:
- setPhysRegState(PhysReg, NewState);
+ LLVM_DEBUG(dbgs() << '\n');
return;
- }
-
- // This is a disabled register, disable all aliases.
- setPhysRegState(PhysReg, NewState);
- for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) {
- MCPhysReg Alias = *AI;
- switch (Register VirtReg = PhysRegState[Alias]) {
- case regDisabled:
- break;
- default:
- spillVirtReg(MI, VirtReg);
- LLVM_FALLTHROUGH;
- case regFree:
- case regReserved:
- setPhysRegState(Alias, regDisabled);
- if (TRI->isSuperRegister(PhysReg, Alias))
- return;
- break;
+ case regPreAssigned:
+ LLVM_DEBUG(dbgs() << '\n');
+ setPhysRegState(PhysReg, regFree);
+ return;
+ default: {
+ LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
+ assert(LRI != LiveVirtRegs.end());
+ LLVM_DEBUG(dbgs() << ' ' << printReg(LRI->VirtReg, TRI) << '\n');
+ setPhysRegState(LRI->PhysReg, regFree);
+ LRI->PhysReg = 0;
}
+ return;
}
}
@@ -558,57 +596,61 @@ void RegAllocFast::definePhysReg(MachineBasicBlock::iterator MI,
/// disabled - it can be allocated directly.
/// \returns spillImpossible when PhysReg or an alias can't be spilled.
unsigned RegAllocFast::calcSpillCost(MCPhysReg PhysReg) const {
- if (isRegUsedInInstr(PhysReg)) {
- LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI)
- << " is already used in instr.\n");
- return spillImpossible;
- }
- switch (Register VirtReg = PhysRegState[PhysReg]) {
- case regDisabled:
- break;
- case regFree:
- return 0;
- case regReserved:
- LLVM_DEBUG(dbgs() << printReg(VirtReg, TRI) << " corresponding "
- << printReg(PhysReg, TRI) << " is reserved already.\n");
- return spillImpossible;
- default: {
- LiveRegMap::const_iterator LRI = findLiveVirtReg(VirtReg);
- assert(LRI != LiveVirtRegs.end() && LRI->PhysReg &&
- "Missing VirtReg entry");
- return LRI->Dirty ? spillDirty : spillClean;
- }
- }
-
- // This is a disabled register, add up cost of aliases.
- LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << " is disabled.\n");
- unsigned Cost = 0;
- for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) {
- MCPhysReg Alias = *AI;
- switch (Register VirtReg = PhysRegState[Alias]) {
- case regDisabled:
- break;
+ for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) {
+ switch (unsigned VirtReg = RegUnitStates[*UI]) {
case regFree:
- ++Cost;
break;
- case regReserved:
+ case regPreAssigned:
+ LLVM_DEBUG(dbgs() << "Cannot spill pre-assigned "
+ << printReg(PhysReg, TRI) << '\n');
return spillImpossible;
default: {
- LiveRegMap::const_iterator LRI = findLiveVirtReg(VirtReg);
- assert(LRI != LiveVirtRegs.end() && LRI->PhysReg &&
- "Missing VirtReg entry");
- Cost += LRI->Dirty ? spillDirty : spillClean;
- break;
+ bool SureSpill = StackSlotForVirtReg[VirtReg] != -1 ||
+ findLiveVirtReg(VirtReg)->LiveOut;
+ return SureSpill ? spillClean : spillDirty;
}
}
}
- return Cost;
+ return 0;
+}
+
+void RegAllocFast::assignDanglingDebugValues(MachineInstr &Definition,
+ Register VirtReg, MCPhysReg Reg) {
+ auto UDBGValIter = DanglingDbgValues.find(VirtReg);
+ if (UDBGValIter == DanglingDbgValues.end())
+ return;
+
+ SmallVectorImpl<MachineInstr*> &Dangling = UDBGValIter->second;
+ for (MachineInstr *DbgValue : Dangling) {
+ assert(DbgValue->isDebugValue());
+ MachineOperand &MO = DbgValue->getOperand(0);
+ if (!MO.isReg())
+ continue;
+
+ // Test whether the physreg survives from the definition to the DBG_VALUE.
+ MCPhysReg SetToReg = Reg;
+ unsigned Limit = 20;
+ for (MachineBasicBlock::iterator I = std::next(Definition.getIterator()),
+ E = DbgValue->getIterator(); I != E; ++I) {
+ if (I->modifiesRegister(Reg, TRI) || --Limit == 0) {
+ LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue
+ << '\n');
+ SetToReg = 0;
+ break;
+ }
+ }
+ MO.setReg(SetToReg);
+ if (SetToReg != 0)
+ MO.setIsRenamable();
+ }
+ Dangling.clear();
}
/// This method updates local state so that we know that PhysReg is the
/// proper container for VirtReg now. The physical register must not be used
/// for anything else when this is called.
-void RegAllocFast::assignVirtToPhysReg(LiveReg &LR, MCPhysReg PhysReg) {
+void RegAllocFast::assignVirtToPhysReg(MachineInstr &AtMI, LiveReg &LR,
+ MCPhysReg PhysReg) {
Register VirtReg = LR.VirtReg;
LLVM_DEBUG(dbgs() << "Assigning " << printReg(VirtReg, TRI) << " to "
<< printReg(PhysReg, TRI) << '\n');
@@ -616,6 +658,8 @@ void RegAllocFast::assignVirtToPhysReg(LiveReg &LR, MCPhysReg PhysReg) {
assert(PhysReg != 0 && "Trying to assign no register");
LR.PhysReg = PhysReg;
setPhysRegState(PhysReg, VirtReg);
+
+ assignDanglingDebugValues(AtMI, VirtReg, PhysReg);
}
static bool isCoalescable(const MachineInstr &MI) {
@@ -659,11 +703,10 @@ Register RegAllocFast::traceCopies(Register VirtReg) const {
}
/// Allocates a physical register for VirtReg.
-void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint0) {
+void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR,
+ Register Hint0, bool LookAtPhysRegUses) {
const Register VirtReg = LR.VirtReg;
-
- assert(Register::isVirtualRegister(VirtReg) &&
- "Can only allocate virtual registers");
+ assert(LR.PhysReg == 0);
const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
LLVM_DEBUG(dbgs() << "Search register for " << printReg(VirtReg)
@@ -671,41 +714,36 @@ void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint0) {
<< " with hint " << printReg(Hint0, TRI) << '\n');
// Take hint when possible.
- if (Hint0.isPhysical() && MRI->isAllocatable(Hint0) &&
- RC.contains(Hint0)) {
- // Ignore the hint if we would have to spill a dirty register.
- unsigned Cost = calcSpillCost(Hint0);
- if (Cost < spillDirty) {
+ if (Hint0.isPhysical() && MRI->isAllocatable(Hint0) && RC.contains(Hint0) &&
+ !isRegUsedInInstr(Hint0, LookAtPhysRegUses)) {
+ // Take hint if the register is currently free.
+ if (isPhysRegFree(Hint0)) {
LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint0, TRI)
<< '\n');
- if (Cost)
- definePhysReg(MI, Hint0, regFree);
- assignVirtToPhysReg(LR, Hint0);
+ assignVirtToPhysReg(MI, LR, Hint0);
return;
} else {
- LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint0, TRI)
- << "occupied\n");
+ LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint0, TRI)
+ << " occupied\n");
}
} else {
Hint0 = Register();
}
+
// Try other hint.
Register Hint1 = traceCopies(VirtReg);
- if (Hint1.isPhysical() && MRI->isAllocatable(Hint1) &&
- RC.contains(Hint1) && !isRegUsedInInstr(Hint1)) {
- // Ignore the hint if we would have to spill a dirty register.
- unsigned Cost = calcSpillCost(Hint1);
- if (Cost < spillDirty) {
+ if (Hint1.isPhysical() && MRI->isAllocatable(Hint1) && RC.contains(Hint1) &&
+ !isRegUsedInInstr(Hint1, LookAtPhysRegUses)) {
+ // Take hint if the register is currently free.
+ if (isPhysRegFree(Hint1)) {
LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint1, TRI)
- << '\n');
- if (Cost)
- definePhysReg(MI, Hint1, regFree);
- assignVirtToPhysReg(LR, Hint1);
+ << '\n');
+ assignVirtToPhysReg(MI, LR, Hint1);
return;
} else {
- LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint1, TRI)
- << "occupied\n");
+ LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint1, TRI)
+ << " occupied\n");
}
} else {
Hint1 = Register();
@@ -716,15 +754,20 @@ void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint0) {
ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
for (MCPhysReg PhysReg : AllocationOrder) {
LLVM_DEBUG(dbgs() << "\tRegister: " << printReg(PhysReg, TRI) << ' ');
+ if (isRegUsedInInstr(PhysReg, LookAtPhysRegUses)) {
+ LLVM_DEBUG(dbgs() << "already used in instr.\n");
+ continue;
+ }
+
unsigned Cost = calcSpillCost(PhysReg);
LLVM_DEBUG(dbgs() << "Cost: " << Cost << " BestCost: " << BestCost << '\n');
// Immediate take a register with cost 0.
if (Cost == 0) {
- assignVirtToPhysReg(LR, PhysReg);
+ assignVirtToPhysReg(MI, LR, PhysReg);
return;
}
- if (PhysReg == Hint1 || PhysReg == Hint0)
+ if (PhysReg == Hint0 || PhysReg == Hint1)
Cost -= spillPrefBonus;
if (Cost < BestCost) {
@@ -740,13 +783,14 @@ void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint0) {
MI.emitError("inline assembly requires more registers than available");
else
MI.emitError("ran out of registers during register allocation");
- definePhysReg(MI, *AllocationOrder.begin(), regFree);
- assignVirtToPhysReg(LR, *AllocationOrder.begin());
+
+ LR.Error = true;
+ LR.PhysReg = 0;
return;
}
- definePhysReg(MI, BestReg, regFree);
- assignVirtToPhysReg(LR, BestReg);
+ displacePhysReg(MI, BestReg);
+ assignVirtToPhysReg(MI, LR, BestReg);
}
void RegAllocFast::allocVirtRegUndef(MachineOperand &MO) {
@@ -774,347 +818,491 @@ void RegAllocFast::allocVirtRegUndef(MachineOperand &MO) {
MO.setIsRenamable(true);
}
-/// Allocates a register for VirtReg and mark it as dirty.
-MCPhysReg RegAllocFast::defineVirtReg(MachineInstr &MI, unsigned OpNum,
- Register VirtReg, Register Hint) {
- assert(Register::isVirtualRegister(VirtReg) && "Not a virtual register");
+/// Variation of defineVirtReg() with special handling for livethrough regs
+/// (tied or earlyclobber) that may interfere with preassigned uses.
+void RegAllocFast::defineLiveThroughVirtReg(MachineInstr &MI, unsigned OpNum,
+ Register VirtReg) {
+ LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg);
+ if (LRI != LiveVirtRegs.end()) {
+ MCPhysReg PrevReg = LRI->PhysReg;
+ if (PrevReg != 0 && isRegUsedInInstr(PrevReg, true)) {
+ LLVM_DEBUG(dbgs() << "Need new assignment for " << printReg(PrevReg, TRI)
+ << " (tied/earlyclobber resolution)\n");
+ freePhysReg(PrevReg);
+ LRI->PhysReg = 0;
+ allocVirtReg(MI, *LRI, 0, true);
+ MachineBasicBlock::iterator InsertBefore =
+ std::next((MachineBasicBlock::iterator)MI.getIterator());
+ LLVM_DEBUG(dbgs() << "Copy " << printReg(LRI->PhysReg, TRI) << " to "
+ << printReg(PrevReg, TRI) << '\n');
+ BuildMI(*MBB, InsertBefore, MI.getDebugLoc(),
+ TII->get(TargetOpcode::COPY), PrevReg)
+ .addReg(LRI->PhysReg, llvm::RegState::Kill);
+ }
+ MachineOperand &MO = MI.getOperand(OpNum);
+ if (MO.getSubReg() && !MO.isUndef()) {
+ LRI->LastUse = &MI;
+ }
+ }
+ return defineVirtReg(MI, OpNum, VirtReg, true);
+}
+
+/// Allocates a register for VirtReg definition. Typically the register is
+/// already assigned from a use of the virtreg, however we still need to
+/// perform an allocation if:
+/// - It is a dead definition without any uses.
+/// - The value is live out and all uses are in different basic blocks.
+void RegAllocFast::defineVirtReg(MachineInstr &MI, unsigned OpNum,
+ Register VirtReg, bool LookAtPhysRegUses) {
+ assert(VirtReg.isVirtual() && "Not a virtual register");
+ MachineOperand &MO = MI.getOperand(OpNum);
LiveRegMap::iterator LRI;
bool New;
std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
- if (!LRI->PhysReg) {
- // If there is no hint, peek at the only use of this register.
- if ((!Hint || !Hint.isPhysical()) &&
- MRI->hasOneNonDBGUse(VirtReg)) {
- const MachineInstr &UseMI = *MRI->use_instr_nodbg_begin(VirtReg);
- // It's a copy, use the destination register as a hint.
- if (UseMI.isCopyLike())
- Hint = UseMI.getOperand(0).getReg();
+ if (New) {
+ if (!MO.isDead()) {
+ if (mayLiveOut(VirtReg)) {
+ LRI->LiveOut = true;
+ } else {
+ // It is a dead def without the dead flag; add the flag now.
+ MO.setIsDead(true);
+ }
}
- allocVirtReg(MI, *LRI, Hint);
- } else if (LRI->LastUse) {
- // Redefining a live register - kill at the last use, unless it is this
- // instruction defining VirtReg multiple times.
- if (LRI->LastUse != &MI || LRI->LastUse->getOperand(LRI->LastOpNum).isUse())
- addKillFlag(*LRI);
}
- assert(LRI->PhysReg && "Register not assigned");
- LRI->LastUse = &MI;
- LRI->LastOpNum = OpNum;
- LRI->Dirty = true;
- markRegUsedInInstr(LRI->PhysReg);
- return LRI->PhysReg;
+ if (LRI->PhysReg == 0)
+ allocVirtReg(MI, *LRI, 0, LookAtPhysRegUses);
+ else {
+ assert(!isRegUsedInInstr(LRI->PhysReg, LookAtPhysRegUses) &&
+ "TODO: preassign mismatch");
+ LLVM_DEBUG(dbgs() << "In def of " << printReg(VirtReg, TRI)
+ << " use existing assignment to "
+ << printReg(LRI->PhysReg, TRI) << '\n');
+ }
+
+ MCPhysReg PhysReg = LRI->PhysReg;
+ assert(PhysReg != 0 && "Register not assigned");
+ if (LRI->Reloaded || LRI->LiveOut) {
+ if (!MI.isImplicitDef()) {
+ MachineBasicBlock::iterator SpillBefore =
+ std::next((MachineBasicBlock::iterator)MI.getIterator());
+ LLVM_DEBUG(dbgs() << "Spill Reason: LO: " << LRI->LiveOut << " RL: "
+ << LRI->Reloaded << '\n');
+ bool Kill = LRI->LastUse == nullptr;
+ spill(SpillBefore, VirtReg, PhysReg, Kill, LRI->LiveOut);
+ LRI->LastUse = nullptr;
+ }
+ LRI->LiveOut = false;
+ LRI->Reloaded = false;
+ }
+ if (MI.getOpcode() == TargetOpcode::BUNDLE) {
+ BundleVirtRegsMap[VirtReg] = PhysReg;
+ }
+ markRegUsedInInstr(PhysReg);
+ setPhysReg(MI, MO, PhysReg);
}
-/// Make sure VirtReg is available in a physreg and return it.
-RegAllocFast::LiveReg &RegAllocFast::reloadVirtReg(MachineInstr &MI,
- unsigned OpNum,
- Register VirtReg,
- Register Hint) {
- assert(Register::isVirtualRegister(VirtReg) && "Not a virtual register");
+/// Allocates a register for a VirtReg use.
+void RegAllocFast::useVirtReg(MachineInstr &MI, unsigned OpNum,
+ Register VirtReg) {
+ assert(VirtReg.isVirtual() && "Not a virtual register");
+ MachineOperand &MO = MI.getOperand(OpNum);
LiveRegMap::iterator LRI;
bool New;
std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg));
- MachineOperand &MO = MI.getOperand(OpNum);
- if (!LRI->PhysReg) {
- allocVirtReg(MI, *LRI, Hint);
- reload(MI, VirtReg, LRI->PhysReg);
- } else if (LRI->Dirty) {
- if (isLastUseOfLocalReg(MO)) {
- LLVM_DEBUG(dbgs() << "Killing last use: " << MO << '\n');
- if (MO.isUse())
- MO.setIsKill();
- else
- MO.setIsDead();
- } else if (MO.isKill()) {
- LLVM_DEBUG(dbgs() << "Clearing dubious kill: " << MO << '\n');
- MO.setIsKill(false);
- } else if (MO.isDead()) {
- LLVM_DEBUG(dbgs() << "Clearing dubious dead: " << MO << '\n');
- MO.setIsDead(false);
+ if (New) {
+ MachineOperand &MO = MI.getOperand(OpNum);
+ if (!MO.isKill()) {
+ if (mayLiveOut(VirtReg)) {
+ LRI->LiveOut = true;
+ } else {
+ // It is a last (killing) use without the kill flag; add the flag now.
+ MO.setIsKill(true);
+ }
}
- } else if (MO.isKill()) {
- // We must remove kill flags from uses of reloaded registers because the
- // register would be killed immediately, and there might be a second use:
- // %foo = OR killed %x, %x
- // This would cause a second reload of %x into a different register.
- LLVM_DEBUG(dbgs() << "Clearing clean kill: " << MO << '\n');
- MO.setIsKill(false);
- } else if (MO.isDead()) {
- LLVM_DEBUG(dbgs() << "Clearing clean dead: " << MO << '\n');
- MO.setIsDead(false);
+ } else {
+ assert((!MO.isKill() || LRI->LastUse == &MI) && "Invalid kill flag");
}
- assert(LRI->PhysReg && "Register not assigned");
+
+ // If necessary allocate a register.
+ if (LRI->PhysReg == 0) {
+ assert(!MO.isTied() && "tied op should be allocated");
+ Register Hint;
+ if (MI.isCopy() && MI.getOperand(1).getSubReg() == 0) {
+ Hint = MI.getOperand(0).getReg();
+ assert(Hint.isPhysical() &&
+ "Copy destination should already be assigned");
+ }
+ allocVirtReg(MI, *LRI, Hint, false);
+ if (LRI->Error) {
+ const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
+ ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
+ setPhysReg(MI, MO, *AllocationOrder.begin());
+ return;
+ }
+ }
+
LRI->LastUse = &MI;
- LRI->LastOpNum = OpNum;
+
+ if (MI.getOpcode() == TargetOpcode::BUNDLE) {
+ BundleVirtRegsMap[VirtReg] = LRI->PhysReg;
+ }
markRegUsedInInstr(LRI->PhysReg);
- return *LRI;
+ setPhysReg(MI, MO, LRI->PhysReg);
}
/// Changes operand OpNum in MI the refer the PhysReg, considering subregs. This
/// may invalidate any operand pointers. Return true if the operand kills its
/// register.
-bool RegAllocFast::setPhysReg(MachineInstr &MI, MachineOperand &MO,
+void RegAllocFast::setPhysReg(MachineInstr &MI, MachineOperand &MO,
MCPhysReg PhysReg) {
- bool Dead = MO.isDead();
if (!MO.getSubReg()) {
MO.setReg(PhysReg);
MO.setIsRenamable(true);
- return MO.isKill() || Dead;
+ return;
}
// Handle subregister index.
- MO.setReg(PhysReg ? TRI->getSubReg(PhysReg, MO.getSubReg()) : Register());
+ MO.setReg(PhysReg ? TRI->getSubReg(PhysReg, MO.getSubReg()) : MCRegister());
MO.setIsRenamable(true);
- MO.setSubReg(0);
+ // Note: We leave the subreg number around a little longer in case of defs.
+ // This is so that the register freeing logic in allocateInstruction can still
+ // recognize this as subregister defs. The code there will clear the number.
+ if (!MO.isDef())
+ MO.setSubReg(0);
// A kill flag implies killing the full register. Add corresponding super
// register kill.
if (MO.isKill()) {
MI.addRegisterKilled(PhysReg, TRI, true);
- return true;
+ return;
}
// A <def,read-undef> of a sub-register requires an implicit def of the full
// register.
- if (MO.isDef() && MO.isUndef())
- MI.addRegisterDefined(PhysReg, TRI);
-
- return Dead;
-}
-
-// Handles special instruction operand like early clobbers and tied ops when
-// there are additional physreg defines.
-void RegAllocFast::handleThroughOperands(MachineInstr &MI,
- SmallVectorImpl<Register> &VirtDead) {
- LLVM_DEBUG(dbgs() << "Scanning for through registers:");
- SmallSet<Register, 8> ThroughRegs;
- for (const MachineOperand &MO : MI.operands()) {
- if (!MO.isReg()) continue;
- Register Reg = MO.getReg();
- if (!Reg.isVirtual())
- continue;
- if (MO.isEarlyClobber() || (MO.isUse() && MO.isTied()) ||
- (MO.getSubReg() && MI.readsVirtualRegister(Reg))) {
- if (ThroughRegs.insert(Reg).second)
- LLVM_DEBUG(dbgs() << ' ' << printReg(Reg));
- }
- }
-
- // If any physreg defines collide with preallocated through registers,
- // we must spill and reallocate.
- LLVM_DEBUG(dbgs() << "\nChecking for physdef collisions.\n");
- for (const MachineOperand &MO : MI.operands()) {
- if (!MO.isReg() || !MO.isDef()) continue;
- Register Reg = MO.getReg();
- if (!Reg || !Reg.isPhysical())
- continue;
- markRegUsedInInstr(Reg);
- for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
- if (ThroughRegs.count(PhysRegState[*AI]))
- definePhysReg(MI, *AI, regFree);
- }
- }
-
- SmallVector<Register, 8> PartialDefs;
- LLVM_DEBUG(dbgs() << "Allocating tied uses.\n");
- for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
- MachineOperand &MO = MI.getOperand(I);
- if (!MO.isReg()) continue;
- Register Reg = MO.getReg();
- if (!Register::isVirtualRegister(Reg))
- continue;
- if (MO.isUse()) {
- if (!MO.isTied()) continue;
- LLVM_DEBUG(dbgs() << "Operand " << I << "(" << MO
- << ") is tied to operand " << MI.findTiedOperandIdx(I)
- << ".\n");
- LiveReg &LR = reloadVirtReg(MI, I, Reg, 0);
- MCPhysReg PhysReg = LR.PhysReg;
- setPhysReg(MI, MO, PhysReg);
- // Note: we don't update the def operand yet. That would cause the normal
- // def-scan to attempt spilling.
- } else if (MO.getSubReg() && MI.readsVirtualRegister(Reg)) {
- LLVM_DEBUG(dbgs() << "Partial redefine: " << MO << '\n');
- // Reload the register, but don't assign to the operand just yet.
- // That would confuse the later phys-def processing pass.
- LiveReg &LR = reloadVirtReg(MI, I, Reg, 0);
- PartialDefs.push_back(LR.PhysReg);
- }
- }
-
- LLVM_DEBUG(dbgs() << "Allocating early clobbers.\n");
- for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
- const MachineOperand &MO = MI.getOperand(I);
- if (!MO.isReg()) continue;
- Register Reg = MO.getReg();
- if (!Register::isVirtualRegister(Reg))
- continue;
- if (!MO.isEarlyClobber())
- continue;
- // Note: defineVirtReg may invalidate MO.
- MCPhysReg PhysReg = defineVirtReg(MI, I, Reg, 0);
- if (setPhysReg(MI, MI.getOperand(I), PhysReg))
- VirtDead.push_back(Reg);
- }
-
- // Restore UsedInInstr to a state usable for allocating normal virtual uses.
- UsedInInstr.clear();
- for (const MachineOperand &MO : MI.operands()) {
- if (!MO.isReg() || (MO.isDef() && !MO.isEarlyClobber())) continue;
- Register Reg = MO.getReg();
- if (!Reg || !Reg.isPhysical())
- continue;
- LLVM_DEBUG(dbgs() << "\tSetting " << printReg(Reg, TRI)
- << " as used in instr\n");
- markRegUsedInInstr(Reg);
+ if (MO.isDef() && MO.isUndef()) {
+ if (MO.isDead())
+ MI.addRegisterDead(PhysReg, TRI, true);
+ else
+ MI.addRegisterDefined(PhysReg, TRI);
}
-
- // Also mark PartialDefs as used to avoid reallocation.
- for (Register PartialDef : PartialDefs)
- markRegUsedInInstr(PartialDef);
}
#ifndef NDEBUG
-void RegAllocFast::dumpState() {
- for (unsigned Reg = 1, E = TRI->getNumRegs(); Reg != E; ++Reg) {
- if (PhysRegState[Reg] == regDisabled) continue;
- dbgs() << " " << printReg(Reg, TRI);
- switch(PhysRegState[Reg]) {
+
+void RegAllocFast::dumpState() const {
+ for (unsigned Unit = 1, UnitE = TRI->getNumRegUnits(); Unit != UnitE;
+ ++Unit) {
+ switch (unsigned VirtReg = RegUnitStates[Unit]) {
case regFree:
break;
- case regReserved:
- dbgs() << "*";
+ case regPreAssigned:
+ dbgs() << " " << printRegUnit(Unit, TRI) << "[P]";
break;
+ case regLiveIn:
+ llvm_unreachable("Should not have regLiveIn in map");
default: {
- dbgs() << '=' << printReg(PhysRegState[Reg]);
- LiveRegMap::iterator LRI = findLiveVirtReg(PhysRegState[Reg]);
- assert(LRI != LiveVirtRegs.end() && LRI->PhysReg &&
- "Missing VirtReg entry");
- if (LRI->Dirty)
- dbgs() << "*";
- assert(LRI->PhysReg == Reg && "Bad inverse map");
+ dbgs() << ' ' << printRegUnit(Unit, TRI) << '=' << printReg(VirtReg);
+ LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg);
+ assert(I != LiveVirtRegs.end() && "have LiveVirtRegs entry");
+ if (I->LiveOut || I->Reloaded) {
+ dbgs() << '[';
+ if (I->LiveOut) dbgs() << 'O';
+ if (I->Reloaded) dbgs() << 'R';
+ dbgs() << ']';
+ }
+ assert(TRI->hasRegUnit(I->PhysReg, Unit) && "inverse mapping present");
break;
}
}
}
dbgs() << '\n';
// Check that LiveVirtRegs is the inverse.
- for (LiveRegMap::iterator i = LiveVirtRegs.begin(),
- e = LiveVirtRegs.end(); i != e; ++i) {
- if (!i->PhysReg)
- continue;
- assert(i->VirtReg.isVirtual() && "Bad map key");
- assert(Register::isPhysicalRegister(i->PhysReg) && "Bad map value");
- assert(PhysRegState[i->PhysReg] == i->VirtReg && "Bad inverse map");
+ for (const LiveReg &LR : LiveVirtRegs) {
+ Register VirtReg = LR.VirtReg;
+ assert(VirtReg.isVirtual() && "Bad map key");
+ MCPhysReg PhysReg = LR.PhysReg;
+ if (PhysReg != 0) {
+ assert(Register::isPhysicalRegister(PhysReg) &&
+ "mapped to physreg");
+ for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) {
+ assert(RegUnitStates[*UI] == VirtReg && "inverse map valid");
+ }
+ }
}
}
#endif
-void RegAllocFast::allocateInstruction(MachineInstr &MI) {
- const MCInstrDesc &MCID = MI.getDesc();
-
- // If this is a copy, we may be able to coalesce.
- Register CopySrcReg;
- Register CopyDstReg;
- unsigned CopySrcSub = 0;
- unsigned CopyDstSub = 0;
- if (MI.isCopy()) {
- CopyDstReg = MI.getOperand(0).getReg();
- CopySrcReg = MI.getOperand(1).getReg();
- CopyDstSub = MI.getOperand(0).getSubReg();
- CopySrcSub = MI.getOperand(1).getSubReg();
+/// Count number of defs consumed from each register class by \p Reg
+void RegAllocFast::addRegClassDefCounts(std::vector<unsigned> &RegClassDefCounts,
+ Register Reg) const {
+ assert(RegClassDefCounts.size() == TRI->getNumRegClasses());
+
+ if (Reg.isVirtual()) {
+ const TargetRegisterClass *OpRC = MRI->getRegClass(Reg);
+ for (unsigned RCIdx = 0, RCIdxEnd = TRI->getNumRegClasses();
+ RCIdx != RCIdxEnd; ++RCIdx) {
+ const TargetRegisterClass *IdxRC = TRI->getRegClass(RCIdx);
+ // FIXME: Consider aliasing sub/super registers.
+ if (OpRC->hasSubClassEq(IdxRC))
+ ++RegClassDefCounts[RCIdx];
+ }
+
+ return;
}
- // Track registers used by instruction.
- UsedInInstr.clear();
+ for (unsigned RCIdx = 0, RCIdxEnd = TRI->getNumRegClasses();
+ RCIdx != RCIdxEnd; ++RCIdx) {
+ const TargetRegisterClass *IdxRC = TRI->getRegClass(RCIdx);
+ for (MCRegAliasIterator Alias(Reg, TRI, true); Alias.isValid(); ++Alias) {
+ if (IdxRC->contains(*Alias)) {
+ ++RegClassDefCounts[RCIdx];
+ break;
+ }
+ }
+ }
+}
- // First scan.
- // Mark physreg uses and early clobbers as used.
- // Find the end of the virtreg operands
- unsigned VirtOpEnd = 0;
- bool hasTiedOps = false;
- bool hasEarlyClobbers = false;
- bool hasPartialRedefs = false;
- bool hasPhysDefs = false;
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI.getOperand(i);
- // Make sure MRI knows about registers clobbered by regmasks.
- if (MO.isRegMask()) {
- MRI->addPhysRegsUsedFromRegMask(MO.getRegMask());
- continue;
+void RegAllocFast::allocateInstruction(MachineInstr &MI) {
+ // The basic algorithm here is:
+ // 1. Mark registers of def operands as free
+ // 2. Allocate registers to use operands and place reload instructions for
+ // registers displaced by the allocation.
+ //
+ // However we need to handle some corner cases:
+ // - pre-assigned defs and uses need to be handled before the other def/use
+ // operands are processed to avoid the allocation heuristics clashing with
+ // the pre-assignment.
+ // - The "free def operands" step has to come last instead of first for tied
+ // operands and early-clobbers.
+
+ UsedInInstr.clear();
+ BundleVirtRegsMap.clear();
+
+ // Scan for special cases; Apply pre-assigned register defs to state.
+ bool HasPhysRegUse = false;
+ bool HasRegMask = false;
+ bool HasVRegDef = false;
+ bool HasDef = false;
+ bool HasEarlyClobber = false;
+ bool NeedToAssignLiveThroughs = false;
+ for (MachineOperand &MO : MI.operands()) {
+ if (MO.isReg()) {
+ Register Reg = MO.getReg();
+ if (Reg.isVirtual()) {
+ if (MO.isDef()) {
+ HasDef = true;
+ HasVRegDef = true;
+ if (MO.isEarlyClobber()) {
+ HasEarlyClobber = true;
+ NeedToAssignLiveThroughs = true;
+ }
+ if (MO.isTied() || (MO.getSubReg() != 0 && !MO.isUndef()))
+ NeedToAssignLiveThroughs = true;
+ }
+ } else if (Reg.isPhysical()) {
+ if (!MRI->isReserved(Reg)) {
+ if (MO.isDef()) {
+ HasDef = true;
+ bool displacedAny = definePhysReg(MI, Reg);
+ if (MO.isEarlyClobber())
+ HasEarlyClobber = true;
+ if (!displacedAny)
+ MO.setIsDead(true);
+ }
+ if (MO.readsReg())
+ HasPhysRegUse = true;
+ }
+ }
+ } else if (MO.isRegMask()) {
+ HasRegMask = true;
}
- if (!MO.isReg()) continue;
- Register Reg = MO.getReg();
- if (!Reg) continue;
- if (Register::isVirtualRegister(Reg)) {
- VirtOpEnd = i+1;
- if (MO.isUse()) {
- hasTiedOps = hasTiedOps ||
- MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1;
+ }
+
+ // Allocate virtreg defs.
+ if (HasDef) {
+ if (HasVRegDef) {
+ // Special handling for early clobbers, tied operands or subregister defs:
+ // Compared to "normal" defs these:
+ // - Must not use a register that is pre-assigned for a use operand.
+ // - In order to solve tricky inline assembly constraints we change the
+ // heuristic to figure out a good operand order before doing
+ // assignments.
+ if (NeedToAssignLiveThroughs) {
+ DefOperandIndexes.clear();
+ PhysRegUses.clear();
+
+ // Track number of defs which may consume a register from the class.
+ std::vector<unsigned> RegClassDefCounts(TRI->getNumRegClasses(), 0);
+ assert(RegClassDefCounts[0] == 0);
+
+ LLVM_DEBUG(dbgs() << "Need to assign livethroughs\n");
+ for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) {
+ const MachineOperand &MO = MI.getOperand(I);
+ if (!MO.isReg())
+ continue;
+ Register Reg = MO.getReg();
+ if (MO.readsReg()) {
+ if (Reg.isPhysical()) {
+ LLVM_DEBUG(dbgs() << "mark extra used: " << printReg(Reg, TRI)
+ << '\n');
+ markPhysRegUsedInInstr(Reg);
+ }
+ }
+
+ if (MO.isDef()) {
+ if (Reg.isVirtual())
+ DefOperandIndexes.push_back(I);
+
+ addRegClassDefCounts(RegClassDefCounts, Reg);
+ }
+ }
+
+ llvm::sort(DefOperandIndexes, [&](uint16_t I0, uint16_t I1) {
+ const MachineOperand &MO0 = MI.getOperand(I0);
+ const MachineOperand &MO1 = MI.getOperand(I1);
+ Register Reg0 = MO0.getReg();
+ Register Reg1 = MO1.getReg();
+ const TargetRegisterClass &RC0 = *MRI->getRegClass(Reg0);
+ const TargetRegisterClass &RC1 = *MRI->getRegClass(Reg1);
+
+ // Identify regclass that are easy to use up completely just in this
+ // instruction.
+ unsigned ClassSize0 = RegClassInfo.getOrder(&RC0).size();
+ unsigned ClassSize1 = RegClassInfo.getOrder(&RC1).size();
+
+ bool SmallClass0 = ClassSize0 < RegClassDefCounts[RC0.getID()];
+ bool SmallClass1 = ClassSize1 < RegClassDefCounts[RC1.getID()];
+ if (SmallClass0 > SmallClass1)
+ return true;
+ if (SmallClass0 < SmallClass1)
+ return false;
+
+ // Allocate early clobbers and livethrough operands first.
+ bool Livethrough0 = MO0.isEarlyClobber() || MO0.isTied() ||
+ (MO0.getSubReg() == 0 && !MO0.isUndef());
+ bool Livethrough1 = MO1.isEarlyClobber() || MO1.isTied() ||
+ (MO1.getSubReg() == 0 && !MO1.isUndef());
+ if (Livethrough0 > Livethrough1)
+ return true;
+ if (Livethrough0 < Livethrough1)
+ return false;
+
+ // Tie-break rule: operand index.
+ return I0 < I1;
+ });
+
+ for (uint16_t OpIdx : DefOperandIndexes) {
+ MachineOperand &MO = MI.getOperand(OpIdx);
+ LLVM_DEBUG(dbgs() << "Allocating " << MO << '\n');
+ unsigned Reg = MO.getReg();
+ if (MO.isEarlyClobber() || MO.isTied() ||
+ (MO.getSubReg() && !MO.isUndef())) {
+ defineLiveThroughVirtReg(MI, OpIdx, Reg);
+ } else {
+ defineVirtReg(MI, OpIdx, Reg);
+ }
+ }
} else {
- if (MO.isEarlyClobber())
- hasEarlyClobbers = true;
- if (MO.getSubReg() && MI.readsVirtualRegister(Reg))
- hasPartialRedefs = true;
+ // Assign virtual register defs.
+ for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) {
+ MachineOperand &MO = MI.getOperand(I);
+ if (!MO.isReg() || !MO.isDef())
+ continue;
+ Register Reg = MO.getReg();
+ if (Reg.isVirtual())
+ defineVirtReg(MI, I, Reg);
+ }
}
- continue;
}
- if (!MRI->isAllocatable(Reg)) continue;
- if (MO.isUse()) {
- usePhysReg(MO);
- } else if (MO.isEarlyClobber()) {
- definePhysReg(MI, Reg,
- (MO.isImplicit() || MO.isDead()) ? regFree : regReserved);
- hasEarlyClobbers = true;
- } else
- hasPhysDefs = true;
+
+ // Free registers occupied by defs.
+ // Iterate operands in reverse order, so we see the implicit super register
+ // defs first (we added them earlier in case of <def,read-undef>).
+ for (unsigned I = MI.getNumOperands(); I-- > 0;) {
+ MachineOperand &MO = MI.getOperand(I);
+ if (!MO.isReg() || !MO.isDef())
+ continue;
+
+ // subreg defs don't free the full register. We left the subreg number
+ // around as a marker in setPhysReg() to recognize this case here.
+ if (MO.getSubReg() != 0) {
+ MO.setSubReg(0);
+ continue;
+ }
+
+ // Do not free tied operands and early clobbers.
+ if (MO.isTied() || MO.isEarlyClobber())
+ continue;
+ Register Reg = MO.getReg();
+ if (!Reg)
+ continue;
+ assert(Reg.isPhysical());
+ if (MRI->isReserved(Reg))
+ continue;
+ freePhysReg(Reg);
+ unmarkRegUsedInInstr(Reg);
+ }
}
- // The instruction may have virtual register operands that must be allocated
- // the same register at use-time and def-time: early clobbers and tied
- // operands. If there are also physical defs, these registers must avoid
- // both physical defs and uses, making them more constrained than normal
- // operands.
- // Similarly, if there are multiple defs and tied operands, we must make
- // sure the same register is allocated to uses and defs.
- // We didn't detect inline asm tied operands above, so just make this extra
- // pass for all inline asm.
- if (MI.isInlineAsm() || hasEarlyClobbers || hasPartialRedefs ||
- (hasTiedOps && (hasPhysDefs || MCID.getNumDefs() > 1))) {
- handleThroughOperands(MI, VirtDead);
- // Don't attempt coalescing when we have funny stuff going on.
- CopyDstReg = Register();
- // Pretend we have early clobbers so the use operands get marked below.
- // This is not necessary for the common case of a single tied use.
- hasEarlyClobbers = true;
+ // Displace clobbered registers.
+ if (HasRegMask) {
+ for (const MachineOperand &MO : MI.operands()) {
+ if (MO.isRegMask()) {
+ // MRI bookkeeping.
+ MRI->addPhysRegsUsedFromRegMask(MO.getRegMask());
+
+ // Displace clobbered registers.
+ const uint32_t *Mask = MO.getRegMask();
+ for (LiveRegMap::iterator LRI = LiveVirtRegs.begin(),
+ LRIE = LiveVirtRegs.end(); LRI != LRIE; ++LRI) {
+ MCPhysReg PhysReg = LRI->PhysReg;
+ if (PhysReg != 0 && MachineOperand::clobbersPhysReg(Mask, PhysReg))
+ displacePhysReg(MI, PhysReg);
+ }
+ }
+ }
}
- // Second scan.
- // Allocate virtreg uses.
+ // Apply pre-assigned register uses to state.
+ if (HasPhysRegUse) {
+ for (MachineOperand &MO : MI.operands()) {
+ if (!MO.isReg() || !MO.readsReg())
+ continue;
+ Register Reg = MO.getReg();
+ if (!Reg.isPhysical())
+ continue;
+ if (MRI->isReserved(Reg))
+ continue;
+ bool displacedAny = usePhysReg(MI, Reg);
+ if (!displacedAny && !MRI->isReserved(Reg))
+ MO.setIsKill(true);
+ }
+ }
+
+ // Allocate virtreg uses and insert reloads as necessary.
bool HasUndefUse = false;
- for (unsigned I = 0; I != VirtOpEnd; ++I) {
+ for (unsigned I = 0; I < MI.getNumOperands(); ++I) {
MachineOperand &MO = MI.getOperand(I);
- if (!MO.isReg()) continue;
+ if (!MO.isReg() || !MO.isUse())
+ continue;
Register Reg = MO.getReg();
if (!Reg.isVirtual())
continue;
- if (MO.isUse()) {
- if (MO.isUndef()) {
- HasUndefUse = true;
- // There is no need to allocate a register for an undef use.
- continue;
- }
- // Populate MayLiveAcrossBlocks in case the use block is allocated before
- // the def block (removing the vreg uses).
- mayLiveIn(Reg);
-
- LiveReg &LR = reloadVirtReg(MI, I, Reg, CopyDstReg);
- MCPhysReg PhysReg = LR.PhysReg;
- CopySrcReg = (CopySrcReg == Reg || CopySrcReg == PhysReg) ? PhysReg : 0;
- if (setPhysReg(MI, MO, PhysReg))
- killVirtReg(LR);
+ if (MO.isUndef()) {
+ HasUndefUse = true;
+ continue;
}
+
+
+ // Populate MayLiveAcrossBlocks in case the use block is allocated before
+ // the def block (removing the vreg uses).
+ mayLiveIn(Reg);
+
+
+ assert(!MO.isInternalRead() && "Bundles not supported");
+ assert(MO.readsReg() && "reading use");
+ useVirtReg(MI, I, Reg);
}
// Allocate undef operands. This is a separate step because in a situation
@@ -1133,76 +1321,40 @@ void RegAllocFast::allocateInstruction(MachineInstr &MI) {
}
}
- // Track registers defined by instruction - early clobbers and tied uses at
- // this point.
- UsedInInstr.clear();
- if (hasEarlyClobbers) {
- for (const MachineOperand &MO : MI.operands()) {
- if (!MO.isReg()) continue;
- Register Reg = MO.getReg();
- if (!Reg || !Reg.isPhysical())
+ // Free early clobbers.
+ if (HasEarlyClobber) {
+ for (unsigned I = MI.getNumOperands(); I-- > 0; ) {
+ MachineOperand &MO = MI.getOperand(I);
+ if (!MO.isReg() || !MO.isDef() || !MO.isEarlyClobber())
continue;
- // Look for physreg defs and tied uses.
- if (!MO.isDef() && !MO.isTied()) continue;
- markRegUsedInInstr(Reg);
- }
- }
-
- unsigned DefOpEnd = MI.getNumOperands();
- if (MI.isCall()) {
- // Spill all virtregs before a call. This serves one purpose: If an
- // exception is thrown, the landing pad is going to expect to find
- // registers in their spill slots.
- // Note: although this is appealing to just consider all definitions
- // as call-clobbered, this is not correct because some of those
- // definitions may be used later on and we do not want to reuse
- // those for virtual registers in between.
- LLVM_DEBUG(dbgs() << " Spilling remaining registers before call.\n");
- spillAll(MI, /*OnlyLiveOut*/ false);
- }
-
- // Third scan.
- // Mark all physreg defs as used before allocating virtreg defs.
- for (unsigned I = 0; I != DefOpEnd; ++I) {
- const MachineOperand &MO = MI.getOperand(I);
- if (!MO.isReg() || !MO.isDef() || !MO.getReg() || MO.isEarlyClobber())
- continue;
- Register Reg = MO.getReg();
-
- if (!Reg || !Reg.isPhysical() || !MRI->isAllocatable(Reg))
- continue;
- definePhysReg(MI, Reg, MO.isDead() ? regFree : regReserved);
- }
+ // subreg defs don't free the full register. We left the subreg number
+ // around as a marker in setPhysReg() to recognize this case here.
+ if (MO.getSubReg() != 0) {
+ MO.setSubReg(0);
+ continue;
+ }
- // Fourth scan.
- // Allocate defs and collect dead defs.
- for (unsigned I = 0; I != DefOpEnd; ++I) {
- const MachineOperand &MO = MI.getOperand(I);
- if (!MO.isReg() || !MO.isDef() || !MO.getReg() || MO.isEarlyClobber())
- continue;
- Register Reg = MO.getReg();
+ Register Reg = MO.getReg();
+ if (!Reg)
+ continue;
+ assert(Reg.isPhysical() && "should have register assigned");
+
+ // We sometimes get odd situations like:
+ // early-clobber %x0 = INSTRUCTION %x0
+ // which is semantically questionable as the early-clobber should
+ // apply before the use. But in practice we consider the use to
+ // happen before the early clobber now. Don't free the early clobber
+ // register in this case.
+ if (MI.readsRegister(Reg, TRI))
+ continue;
- // We have already dealt with phys regs in the previous scan.
- if (Reg.isPhysical())
- continue;
- MCPhysReg PhysReg = defineVirtReg(MI, I, Reg, CopySrcReg);
- if (setPhysReg(MI, MI.getOperand(I), PhysReg)) {
- VirtDead.push_back(Reg);
- CopyDstReg = Register(); // cancel coalescing;
- } else
- CopyDstReg = (CopyDstReg == Reg || CopyDstReg == PhysReg) ? PhysReg : 0;
+ freePhysReg(Reg);
+ }
}
- // Kill dead defs after the scan to ensure that multiple defs of the same
- // register are allocated identically. We didn't need to do this for uses
- // because we are crerating our own kill flags, and they are always at the
- // last use.
- for (Register VirtReg : VirtDead)
- killVirtReg(VirtReg);
- VirtDead.clear();
-
LLVM_DEBUG(dbgs() << "<< " << MI);
- if (CopyDstReg && CopyDstReg == CopySrcReg && CopyDstSub == CopySrcSub) {
+ if (MI.isCopy() && MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
+ MI.getNumOperands() == 2) {
LLVM_DEBUG(dbgs() << "Mark identity copy for removal\n");
Coalesced.push_back(&MI);
}
@@ -1219,23 +1371,22 @@ void RegAllocFast::handleDebugValue(MachineInstr &MI) {
if (!Register::isVirtualRegister(Reg))
return;
+ // Already spilled to a stackslot?
+ int SS = StackSlotForVirtReg[Reg];
+ if (SS != -1) {
+ // Modify DBG_VALUE now that the value is in a spill slot.
+ updateDbgValueForSpill(MI, SS);
+ LLVM_DEBUG(dbgs() << "Rewrite DBG_VALUE for spilled memory: " << MI);
+ return;
+ }
+
// See if this virtual register has already been allocated to a physical
// register or spilled to a stack slot.
LiveRegMap::iterator LRI = findLiveVirtReg(Reg);
if (LRI != LiveVirtRegs.end() && LRI->PhysReg) {
setPhysReg(MI, MO, LRI->PhysReg);
} else {
- int SS = StackSlotForVirtReg[Reg];
- if (SS != -1) {
- // Modify DBG_VALUE now that the value is in a spill slot.
- updateDbgValueForSpill(MI, SS);
- LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << MI);
- return;
- }
-
- // We can't allocate a physreg for a DebugValue, sorry!
- LLVM_DEBUG(dbgs() << "Unable to allocate vreg used by DBG_VALUE");
- MO.setReg(Register());
+ DanglingDbgValues[Reg].push_back(&MI);
}
// If Reg hasn't been spilled, put this DBG_VALUE in LiveDbgValueMap so
@@ -1243,25 +1394,46 @@ void RegAllocFast::handleDebugValue(MachineInstr &MI) {
LiveDbgValueMap[Reg].push_back(&MI);
}
+void RegAllocFast::handleBundle(MachineInstr &MI) {
+ MachineBasicBlock::instr_iterator BundledMI = MI.getIterator();
+ ++BundledMI;
+ while (BundledMI->isBundledWithPred()) {
+ for (unsigned I = 0; I < BundledMI->getNumOperands(); ++I) {
+ MachineOperand &MO = BundledMI->getOperand(I);
+ if (!MO.isReg())
+ continue;
+
+ Register Reg = MO.getReg();
+ if (!Reg.isVirtual())
+ continue;
+
+ DenseMap<Register, MCPhysReg>::iterator DI;
+ DI = BundleVirtRegsMap.find(Reg);
+ assert(DI != BundleVirtRegsMap.end() && "Unassigned virtual register");
+
+ setPhysReg(MI, MO, DI->second);
+ }
+
+ ++BundledMI;
+ }
+}
+
void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
this->MBB = &MBB;
LLVM_DEBUG(dbgs() << "\nAllocating " << MBB);
- PhysRegState.assign(TRI->getNumRegs(), regDisabled);
+ RegUnitStates.assign(TRI->getNumRegUnits(), regFree);
assert(LiveVirtRegs.empty() && "Mapping not cleared from last block?");
- MachineBasicBlock::iterator MII = MBB.begin();
-
- // Add live-in registers as live.
- for (const MachineBasicBlock::RegisterMaskPair &LI : MBB.liveins())
- if (MRI->isAllocatable(LI.PhysReg))
- definePhysReg(MII, LI.PhysReg, regReserved);
+ for (MachineBasicBlock *Succ : MBB.successors()) {
+ for (const MachineBasicBlock::RegisterMaskPair &LI : Succ->liveins())
+ setPhysRegState(LI.PhysReg, regPreAssigned);
+ }
- VirtDead.clear();
Coalesced.clear();
- // Otherwise, sequentially allocate each instruction in the MBB.
- for (MachineInstr &MI : MBB) {
+ // Traverse block in reverse order allocating instructions one by one.
+ for (MachineInstr &MI : reverse(MBB)) {
LLVM_DEBUG(
dbgs() << "\n>> " << MI << "Regs:";
dumpState()
@@ -1275,11 +1447,22 @@ void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
}
allocateInstruction(MI);
+
+ // Once BUNDLE header is assigned registers, same assignments need to be
+ // done for bundled MIs.
+ if (MI.getOpcode() == TargetOpcode::BUNDLE) {
+ handleBundle(MI);
+ }
}
+ LLVM_DEBUG(
+ dbgs() << "Begin Regs:";
+ dumpState()
+ );
+
// Spill all physical registers holding virtual registers now.
- LLVM_DEBUG(dbgs() << "Spilling live registers at end of block.\n");
- spillAll(MBB.getFirstTerminator(), /*OnlyLiveOut*/ true);
+ LLVM_DEBUG(dbgs() << "Loading live registers at begin of block.\n");
+ reloadAtBegin(MBB);
// Erase all the coalesced copies. We are delaying it until now because
// LiveVirtRegs might refer to the instrs.
@@ -1287,6 +1470,20 @@ void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
MBB.erase(MI);
NumCoalesced += Coalesced.size();
+ for (auto &UDBGPair : DanglingDbgValues) {
+ for (MachineInstr *DbgValue : UDBGPair.second) {
+ assert(DbgValue->isDebugValue() && "expected DBG_VALUE");
+ MachineOperand &MO = DbgValue->getOperand(0);
+ // Nothing to do if the vreg was spilled in the meantime.
+ if (!MO.isReg())
+ continue;
+ LLVM_DEBUG(dbgs() << "Register did not survive for " << *DbgValue
+ << '\n');
+ MO.setReg(0);
+ }
+ }
+ DanglingDbgValues.clear();
+
LLVM_DEBUG(MBB.dump());
}
@@ -1300,8 +1497,11 @@ bool RegAllocFast::runOnMachineFunction(MachineFunction &MF) {
MFI = &MF.getFrameInfo();
MRI->freezeReservedRegs(MF);
RegClassInfo.runOnMachineFunction(MF);
+ unsigned NumRegUnits = TRI->getNumRegUnits();
UsedInInstr.clear();
- UsedInInstr.setUniverse(TRI->getNumRegUnits());
+ UsedInInstr.setUniverse(NumRegUnits);
+ PhysRegUses.clear();
+ PhysRegUses.setUniverse(NumRegUnits);
// initialize the virtual->physical register map to have a 'null'
// mapping for all virtual registers