aboutsummaryrefslogtreecommitdiff
path: root/include/llvm/Target/TargetInstrInfo.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/llvm/Target/TargetInstrInfo.h')
-rw-r--r--include/llvm/Target/TargetInstrInfo.h281
1 files changed, 159 insertions, 122 deletions
diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h
index 0cebcf1c6b5d..e0b9a22ed5d0 100644
--- a/include/llvm/Target/TargetInstrInfo.h
+++ b/include/llvm/Target/TargetInstrInfo.h
@@ -21,6 +21,7 @@
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
namespace llvm {
@@ -45,7 +46,6 @@ class DFAPacketizer;
template<class T> class SmallVectorImpl;
-
//---------------------------------------------------------------------------
///
/// TargetInstrInfo - Interface to description of machine instruction set
@@ -55,10 +55,11 @@ class TargetInstrInfo : public MCInstrInfo {
void operator=(const TargetInstrInfo &) = delete;
public:
TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u,
- unsigned CatchRetOpcode = ~0u)
+ unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u)
: CallFrameSetupOpcode(CFSetupOpcode),
CallFrameDestroyOpcode(CFDestroyOpcode),
- CatchRetOpcode(CatchRetOpcode) {}
+ CatchRetOpcode(CatchRetOpcode),
+ ReturnOpcode(ReturnOpcode) {}
virtual ~TargetInstrInfo();
@@ -78,10 +79,10 @@ public:
/// This means the only allowed uses are constants and unallocatable physical
/// registers so that the instructions result is independent of the place
/// in the function.
- bool isTriviallyReMaterializable(const MachineInstr *MI,
+ bool isTriviallyReMaterializable(const MachineInstr &MI,
AliasAnalysis *AA = nullptr) const {
- return MI->getOpcode() == TargetOpcode::IMPLICIT_DEF ||
- (MI->getDesc().isRematerializable() &&
+ return MI.getOpcode() == TargetOpcode::IMPLICIT_DEF ||
+ (MI.getDesc().isRematerializable() &&
(isReallyTriviallyReMaterializable(MI, AA) ||
isReallyTriviallyReMaterializableGeneric(MI, AA)));
}
@@ -94,7 +95,7 @@ protected:
/// than producing a value, or if it requres any address registers that are
/// not always available.
/// Requirements must be check as stated in isTriviallyReMaterializable() .
- virtual bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
+ virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI,
AliasAnalysis *AA) const {
return false;
}
@@ -114,8 +115,7 @@ protected:
/// Do not call this method for a non-commutable instruction.
/// Even though the instruction is commutable, the method may still
/// fail to commute the operands, null pointer is returned in such cases.
- virtual MachineInstr *commuteInstructionImpl(MachineInstr *MI,
- bool NewMI,
+ virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const;
@@ -139,7 +139,7 @@ private:
/// set and the target hook isReallyTriviallyReMaterializable returns false,
/// this function does target-independent tests to determine if the
/// instruction is really trivially rematerializable.
- bool isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
+ bool isReallyTriviallyReMaterializableGeneric(const MachineInstr &MI,
AliasAnalysis *AA) const;
public:
@@ -152,12 +152,13 @@ public:
unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
unsigned getCatchReturnOpcode() const { return CatchRetOpcode; }
+ unsigned getReturnOpcode() const { return ReturnOpcode; }
/// Returns the actual stack pointer adjustment made by an instruction
/// as part of a call sequence. By default, only call frame setup/destroy
/// instructions adjust the stack, but targets may want to override this
/// to enable more fine-grained adjustment, or adjust by a different value.
- virtual int getSPAdjust(const MachineInstr *MI) const;
+ virtual int getSPAdjust(const MachineInstr &MI) const;
/// Return true if the instruction is a "coalescable" extension instruction.
/// That is, it's like a copy where it's legal for the source to overlap the
@@ -175,14 +176,14 @@ public:
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
- virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ virtual unsigned isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
return 0;
}
/// Check for post-frame ptr elimination stack locations as well.
/// This uses a heuristic so it isn't reliable for correctness.
- virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
+ virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const {
return 0;
}
@@ -193,7 +194,7 @@ public:
/// If not, return false. Unlike isLoadFromStackSlot, this returns true for
/// any instructions that loads from the stack. This is just a hint, as some
/// cases may be missed.
- virtual bool hasLoadFromStackSlot(const MachineInstr *MI,
+ virtual bool hasLoadFromStackSlot(const MachineInstr &MI,
const MachineMemOperand *&MMO,
int &FrameIndex) const;
@@ -202,14 +203,14 @@ public:
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
- virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
+ virtual unsigned isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
return 0;
}
/// Check for post-frame ptr elimination stack locations as well.
/// This uses a heuristic, so it isn't reliable for correctness.
- virtual unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
+ virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const {
return 0;
}
@@ -220,14 +221,14 @@ public:
/// If not, return false. Unlike isStoreToStackSlot,
/// this returns true for any instructions that stores to the
/// stack. This is just a hint, as some cases may be missed.
- virtual bool hasStoreToStackSlot(const MachineInstr *MI,
+ virtual bool hasStoreToStackSlot(const MachineInstr &MI,
const MachineMemOperand *&MMO,
int &FrameIndex) const;
/// Return true if the specified machine instruction
/// is a copy of one stack slot to another and has no other effect.
/// Provide the identity of the two frame indices.
- virtual bool isStackSlotCopy(const MachineInstr *MI, int &DestFrameIndex,
+ virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
int &SrcFrameIndex) const {
return false;
}
@@ -253,8 +254,20 @@ public:
///
/// Targets for different archs need to override this, and different
/// micro-architectures can also be finely tuned inside.
- virtual bool isAsCheapAsAMove(const MachineInstr *MI) const {
- return MI->isAsCheapAsAMove();
+ virtual bool isAsCheapAsAMove(const MachineInstr &MI) const {
+ return MI.isAsCheapAsAMove();
+ }
+
+ /// Return true if the instruction should be sunk by MachineSink.
+ ///
+ /// MachineSink determines on its own whether the instruction is safe to sink;
+ /// this gives the target a hook to override the default behavior with regards
+ /// to which instructions should be sunk.
+ /// The default behavior is to not sink insert_subreg, subreg_to_reg, and
+ /// reg_sequence. These are meant to be close to the source to make it easier
+ /// to coalesce.
+ virtual bool shouldSink(const MachineInstr &MI) const {
+ return !MI.isInsertSubreg() && !MI.isSubregToReg() && !MI.isRegSequence();
}
/// Re-issue the specified 'original' instruction at the
@@ -263,9 +276,8 @@ public:
/// DestReg:SubIdx. Any existing subreg index is preserved or composed with
/// SubIdx.
virtual void reMaterialize(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SubIdx,
- const MachineInstr *Orig,
+ MachineBasicBlock::iterator MI, unsigned DestReg,
+ unsigned SubIdx, const MachineInstr &Orig,
const TargetRegisterInfo &TRI) const;
/// Create a duplicate of the Orig instruction in MF. This is like
@@ -273,7 +285,7 @@ public:
/// that are required to be unique.
///
/// The instruction must be duplicable as indicated by isNotDuplicable().
- virtual MachineInstr *duplicate(MachineInstr *Orig,
+ virtual MachineInstr *duplicate(MachineInstr &Orig,
MachineFunction &MF) const;
/// This method must be implemented by targets that
@@ -286,9 +298,9 @@ public:
/// This method returns a null pointer if the transformation cannot be
/// performed, otherwise it returns the last new instruction.
///
- virtual MachineInstr *
- convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI, LiveVariables *LV) const {
+ virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
+ MachineInstr &MI,
+ LiveVariables *LV) const {
return nullptr;
}
@@ -315,8 +327,7 @@ public:
/// Even though the instruction is commutable, the method may still
/// fail to commute the operands, null pointer is returned in such cases.
MachineInstr *
- commuteInstruction(MachineInstr *MI,
- bool NewMI = false,
+ commuteInstruction(MachineInstr &MI, bool NewMI = false,
unsigned OpIdx1 = CommuteAnyOperandIndex,
unsigned OpIdx2 = CommuteAnyOperandIndex) const;
@@ -337,7 +348,7 @@ public:
/// findCommutedOpIndices(MI, Op1, Op2);
/// can be interpreted as a query asking to find an operand that would be
/// commutable with the operand#1.
- virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
+ virtual bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const;
/// A pair composed of a register and a sub-register index.
@@ -424,8 +435,8 @@ public:
/// are deemed identical except for defs. If this function is called when the
/// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
/// aggressive checks.
- virtual bool produceSameValue(const MachineInstr *MI0,
- const MachineInstr *MI1,
+ virtual bool produceSameValue(const MachineInstr &MI0,
+ const MachineInstr &MI1,
const MachineRegisterInfo *MRI = nullptr) const;
/// Analyze the branching code at the end of MBB, returning
@@ -453,7 +464,9 @@ public:
/// If AllowModify is true, then this routine is allowed to modify the basic
/// block (e.g. delete instructions after the unconditional branch).
///
- virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
+ /// before calling this function.
+ virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify = false) const {
@@ -499,7 +512,7 @@ public:
/// If AllowModify is true, then this routine is allowed to modify the basic
/// block (e.g. delete instructions after the unconditional branch).
///
- virtual bool AnalyzeBranchPredicate(MachineBasicBlock &MBB,
+ virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB,
MachineBranchPredicate &MBP,
bool AllowModify = false) const {
return true;
@@ -522,10 +535,13 @@ public:
/// cases where AnalyzeBranch doesn't apply because there was no original
/// branch to analyze. At least this much must be implemented, else tail
/// merging needs to be disabled.
+ ///
+ /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
+ /// before calling this function.
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
- DebugLoc DL) const {
+ const DebugLoc &DL) const {
llvm_unreachable("Target didn't implement TargetInstrInfo::InsertBranch!");
}
@@ -672,7 +688,7 @@ public:
/// @param TrueReg Virtual register to copy when Cond is true.
/// @param FalseReg Virtual register to copy when Cons is false.
virtual void insertSelect(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I, DebugLoc DL,
+ MachineBasicBlock::iterator I, const DebugLoc &DL,
unsigned DstReg, ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg) const {
llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
@@ -696,11 +712,11 @@ public:
/// @param FalseOp Operand number of the value selected when Cond is false.
/// @param Optimizable Returned as true if MI is optimizable.
/// @returns False on success.
- virtual bool analyzeSelect(const MachineInstr *MI,
+ virtual bool analyzeSelect(const MachineInstr &MI,
SmallVectorImpl<MachineOperand> &Cond,
unsigned &TrueOp, unsigned &FalseOp,
bool &Optimizable) const {
- assert(MI && MI->getDesc().isSelect() && "MI must be a select instruction");
+ assert(MI.getDesc().isSelect() && "MI must be a select instruction");
return true;
}
@@ -719,7 +735,7 @@ public:
/// MI. Has to be updated with any newly created MI or deleted ones.
/// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
/// @returns Optimized instruction or NULL.
- virtual MachineInstr *optimizeSelect(MachineInstr *MI,
+ virtual MachineInstr *optimizeSelect(MachineInstr &MI,
SmallPtrSetImpl<MachineInstr *> &NewMIs,
bool PreferFalse = false) const {
// This function must be implemented if Optimizable is ever set.
@@ -735,7 +751,7 @@ public:
/// careful implementation when multiple copy instructions are required for
/// large registers. See for example the ARM target.
virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
+ MachineBasicBlock::iterator MI, const DebugLoc &DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
@@ -772,9 +788,7 @@ public:
/// into real instructions. The target can edit MI in place, or it can insert
/// new instructions and erase MI. The function should return true if
/// anything was changed.
- virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
- return false;
- }
+ virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; }
/// Attempt to fold a load or store of the specified stack
/// slot into the specified machine instruction for the specified operand(s).
@@ -782,14 +796,15 @@ public:
/// operand folded, otherwise NULL is returned.
/// The new instruction is inserted before MI, and the client is responsible
/// for removing the old instruction.
- MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI,
- ArrayRef<unsigned> Ops, int FrameIndex) const;
+ MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
+ int FrameIndex,
+ LiveIntervals *LIS = nullptr) const;
/// Same as the previous version except it allows folding of any load and
/// store from / to any address, not just from a specific stack slot.
- MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI,
- ArrayRef<unsigned> Ops,
- MachineInstr *LoadMI) const;
+ MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
+ MachineInstr &LoadMI,
+ LiveIntervals *LIS = nullptr) const;
/// Return true when there is potentially a faster code sequence
/// for an instruction chain ending in \p Root. All potential patterns are
@@ -802,6 +817,11 @@ public:
MachineInstr &Root,
SmallVectorImpl<MachineCombinerPattern> &Patterns) const;
+ /// Return true when a code sequence can improve throughput. It
+ /// should be called only for instructions in loops.
+ /// \param Pattern - combiner pattern
+ virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const;
+
/// Return true if the input \P Inst is part of a chain of dependent ops
/// that are suitable for reassociation, otherwise return false.
/// If the instruction's operands must be commuted to have a previous
@@ -850,8 +870,7 @@ public:
virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
MachineInstr &NewMI1,
MachineInstr &NewMI2) const {
- return;
- };
+ }
/// Return true when a target supports MachineCombiner.
virtual bool useMachineCombiner() const { return false; }
@@ -862,9 +881,11 @@ protected:
/// take care of adding a MachineMemOperand to the newly created instruction.
/// The instruction and any auxiliary instructions necessary will be inserted
/// at InsertPt.
- virtual MachineInstr *foldMemoryOperandImpl(
- MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
+ virtual MachineInstr *
+ foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
+ ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, int FrameIndex,
+ LiveIntervals *LIS = nullptr) const {
return nullptr;
}
@@ -874,8 +895,9 @@ protected:
/// The instruction and any auxiliary instructions necessary will be inserted
/// at InsertPt.
virtual MachineInstr *foldMemoryOperandImpl(
- MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
- MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
+ MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
+ LiveIntervals *LIS = nullptr) const {
return nullptr;
}
@@ -926,9 +948,10 @@ public:
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
/// a store or a load and a store into two or more instruction. If this is
/// possible, returns true as well as the new instructions by reference.
- virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
- unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
- SmallVectorImpl<MachineInstr*> &NewMIs) const{
+ virtual bool
+ unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg,
+ bool UnfoldLoad, bool UnfoldStore,
+ SmallVectorImpl<MachineInstr *> &NewMIs) const {
return false;
}
@@ -974,24 +997,26 @@ public:
/// Get the base register and byte offset of an instruction that reads/writes
/// memory.
- virtual bool getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg,
- unsigned &Offset,
+ virtual bool getMemOpBaseRegImmOfs(MachineInstr &MemOp, unsigned &BaseReg,
+ int64_t &Offset,
const TargetRegisterInfo *TRI) const {
return false;
}
virtual bool enableClusterLoads() const { return false; }
- virtual bool shouldClusterLoads(MachineInstr *FirstLdSt,
- MachineInstr *SecondLdSt,
- unsigned NumLoads) const {
+ virtual bool enableClusterStores() const { return false; }
+
+ virtual bool shouldClusterMemOps(MachineInstr &FirstLdSt,
+ MachineInstr &SecondLdSt,
+ unsigned NumLoads) const {
return false;
}
/// Can this target fuse the given instructions if they are scheduled
/// adjacent.
- virtual bool shouldScheduleAdjacent(MachineInstr* First,
- MachineInstr *Second) const {
+ virtual bool shouldScheduleAdjacent(MachineInstr &First,
+ MachineInstr &Second) const {
return false;
}
@@ -1012,19 +1037,18 @@ public:
/// Returns true if the instruction is already predicated.
- virtual bool isPredicated(const MachineInstr *MI) const {
+ virtual bool isPredicated(const MachineInstr &MI) const {
return false;
}
/// Returns true if the instruction is a
/// terminator instruction that has not been predicated.
- virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const;
+ virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const;
/// Convert the instruction into a predicated instruction.
/// It returns true if the operation was successful.
- virtual
- bool PredicateInstruction(MachineInstr *MI,
- ArrayRef<MachineOperand> Pred) const;
+ virtual bool PredicateInstruction(MachineInstr &MI,
+ ArrayRef<MachineOperand> Pred) const;
/// Returns true if the first specified predicate
/// subsumes the second, e.g. GE subsumes GT.
@@ -1037,7 +1061,7 @@ public:
/// If the specified instruction defines any predicate
/// or condition code register(s) used for predication, returns true as well
/// as the definition predicate(s) by reference.
- virtual bool DefinesPredicate(MachineInstr *MI,
+ virtual bool DefinesPredicate(MachineInstr &MI,
std::vector<MachineOperand> &Pred) const {
return false;
}
@@ -1045,8 +1069,8 @@ public:
/// Return true if the specified instruction can be predicated.
/// By default, this returns true for every instruction with a
/// PredicateOperand.
- virtual bool isPredicable(MachineInstr *MI) const {
- return MI->getDesc().isPredicable();
+ virtual bool isPredicable(MachineInstr &MI) const {
+ return MI.getDesc().isPredicable();
}
/// Return true if it's safe to move a machine
@@ -1057,7 +1081,7 @@ public:
/// Test if the given instruction should be considered a scheduling boundary.
/// This primarily includes labels and terminators.
- virtual bool isSchedulingBoundary(const MachineInstr *MI,
+ virtual bool isSchedulingBoundary(const MachineInstr &MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const;
@@ -1084,6 +1108,13 @@ public:
CreateTargetPostRAHazardRecognizer(const InstrItineraryData*,
const ScheduleDAG *DAG) const;
+ /// Allocate and return a hazard recognizer to use for by non-scheduling
+ /// passes.
+ virtual ScheduleHazardRecognizer*
+ CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const {
+ return nullptr;
+ }
+
/// Provide a global flag for disabling the PreRA hazard recognizer that
/// targets may choose to honor.
bool usePreRAHazardRecognizer() const;
@@ -1092,22 +1123,20 @@ public:
/// in SrcReg and SrcReg2 if having two register operands, and the value it
/// compares against in CmpValue. Return true if the comparison instruction
/// can be analyzed.
- virtual bool analyzeCompare(const MachineInstr *MI,
- unsigned &SrcReg, unsigned &SrcReg2,
- int &Mask, int &Value) const {
+ virtual bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
+ unsigned &SrcReg2, int &Mask, int &Value) const {
return false;
}
/// See if the comparison instruction can be converted
/// into something more efficient. E.g., on ARM most instructions can set the
/// flags register, obviating the need for a separate CMP.
- virtual bool optimizeCompareInstr(MachineInstr *CmpInstr,
- unsigned SrcReg, unsigned SrcReg2,
- int Mask, int Value,
+ virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
+ unsigned SrcReg2, int Mask, int Value,
const MachineRegisterInfo *MRI) const {
return false;
}
- virtual bool optimizeCondBranch(MachineInstr *MI) const { return false; }
+ virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; }
/// Try to remove the load by folding it to a register operand at the use.
/// We fold the load instructions if and only if the
@@ -1116,10 +1145,10 @@ public:
/// defined by the load we are trying to fold. DefMI returns the machine
/// instruction that defines FoldAsLoadDefReg, and the function returns
/// the machine instruction generated due to folding.
- virtual MachineInstr* optimizeLoadInstr(MachineInstr *MI,
- const MachineRegisterInfo *MRI,
- unsigned &FoldAsLoadDefReg,
- MachineInstr *&DefMI) const {
+ virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI,
+ const MachineRegisterInfo *MRI,
+ unsigned &FoldAsLoadDefReg,
+ MachineInstr *&DefMI) const {
return nullptr;
}
@@ -1129,7 +1158,7 @@ public:
/// then the caller may assume that DefMI has been erased from its parent
/// block. The caller may assume that it will not be erased by this
/// function otherwise.
- virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
+ virtual bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
unsigned Reg, MachineRegisterInfo *MRI) const {
return false;
}
@@ -1139,7 +1168,7 @@ public:
/// IssueWidth is the number of microops that can be dispatched each
/// cycle. An instruction with zero microops takes no dispatch resources.
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
- const MachineInstr *MI) const;
+ const MachineInstr &MI) const;
/// Return true for pseudo instructions that don't consume any
/// machine resources in their current form. These are common cases that the
@@ -1162,35 +1191,44 @@ public:
/// by a target. Use computeOperandLatency to get the best estimate of
/// latency.
virtual int getOperandLatency(const InstrItineraryData *ItinData,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI,
+ const MachineInstr &DefMI, unsigned DefIdx,
+ const MachineInstr &UseMI,
unsigned UseIdx) const;
- /// Compute and return the latency of the given data
- /// dependent def and use when the operand indices are already known.
+ /// Compute and return the latency of the given data dependent def and use
+ /// when the operand indices are already known. UseMI may be \c nullptr for
+ /// an unknown use.
+ ///
+ /// FindMin may be set to get the minimum vs. expected latency. Minimum
+ /// latency is used for scheduling groups, while expected latency is for
+ /// instruction cost and critical path.
+ ///
+ /// Depending on the subtarget's itinerary properties, this may or may not
+ /// need to call getOperandLatency(). For most subtargets, we don't need
+ /// DefIdx or UseIdx to compute min latency.
unsigned computeOperandLatency(const InstrItineraryData *ItinData,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI, unsigned UseIdx)
- const;
+ const MachineInstr &DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI,
+ unsigned UseIdx) const;
/// Compute the instruction latency of a given instruction.
/// If the instruction has higher cost when predicated, it's returned via
/// PredCost.
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
- const MachineInstr *MI,
+ const MachineInstr &MI,
unsigned *PredCost = nullptr) const;
- virtual unsigned getPredicationCost(const MachineInstr *MI) const;
+ virtual unsigned getPredicationCost(const MachineInstr &MI) const;
virtual int getInstrLatency(const InstrItineraryData *ItinData,
SDNode *Node) const;
- /// Return the default expected latency for a def based on it's opcode.
+ /// Return the default expected latency for a def based on its opcode.
unsigned defaultDefLatency(const MCSchedModel &SchedModel,
- const MachineInstr *DefMI) const;
+ const MachineInstr &DefMI) const;
int computeDefOperandLatency(const InstrItineraryData *ItinData,
- const MachineInstr *DefMI) const;
+ const MachineInstr &DefMI) const;
/// Return true if this opcode has high latency to its result.
virtual bool isHighLatencyDef(int opc) const { return false; }
@@ -1200,23 +1238,23 @@ public:
/// it 'high'. This is used by optimization passes such as machine LICM to
/// determine whether it makes sense to hoist an instruction out even in a
/// high register pressure situation.
- virtual
- bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
- const MachineRegisterInfo *MRI,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI, unsigned UseIdx) const {
+ virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
+ const MachineRegisterInfo *MRI,
+ const MachineInstr &DefMI, unsigned DefIdx,
+ const MachineInstr &UseMI,
+ unsigned UseIdx) const {
return false;
}
/// Compute operand latency of a def of 'Reg'. Return true
/// if the target considered it 'low'.
- virtual
- bool hasLowDefLatency(const TargetSchedModel &SchedModel,
- const MachineInstr *DefMI, unsigned DefIdx) const;
+ virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel,
+ const MachineInstr &DefMI,
+ unsigned DefIdx) const;
/// Perform target-specific instruction verification.
- virtual
- bool verifyInstruction(const MachineInstr *MI, StringRef &ErrInfo) const {
+ virtual bool verifyInstruction(const MachineInstr &MI,
+ StringRef &ErrInfo) const {
return true;
}
@@ -1240,7 +1278,7 @@ public:
/// execution domain.
///
virtual std::pair<uint16_t, uint16_t>
- getExecutionDomain(const MachineInstr *MI) const {
+ getExecutionDomain(const MachineInstr &MI) const {
return std::make_pair(0, 0);
}
@@ -1248,8 +1286,7 @@ public:
///
/// The bit (1 << Domain) must be set in the mask returned from
/// getExecutionDomain(MI).
- virtual void setExecutionDomain(MachineInstr *MI, unsigned Domain) const {}
-
+ virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {}
/// Returns the preferred minimum clearance
/// before an instruction with an unwanted partial register update.
@@ -1291,7 +1328,7 @@ public:
/// allows the target to insert a dependency breaking instruction.
///
virtual unsigned
- getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
+ getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const {
// The default implementation returns 0 for no partial register dependency.
return 0;
@@ -1311,7 +1348,7 @@ public:
/// This hook works similarly to getPartialRegUpdateClearance, except that it
/// does not take an operand index. Instead sets \p OpNum to the index of the
/// unused register.
- virtual unsigned getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum,
+ virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
const TargetRegisterInfo *TRI) const {
// The default implementation returns 0 for no undef register dependency.
return 0;
@@ -1334,9 +1371,8 @@ public:
/// An <imp-kill> operand should be added to MI if an instruction was
/// inserted. This ties the instructions together in the post-ra scheduler.
///
- virtual void
- breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
- const TargetRegisterInfo *TRI) const {}
+ virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const {}
/// Create machine specific model for scheduling.
virtual DFAPacketizer *
@@ -1349,11 +1385,11 @@ public:
// memory addresses. This function returns true if two MIs access different
// memory addresses and false otherwise.
virtual bool
- areMemAccessesTriviallyDisjoint(MachineInstr *MIa, MachineInstr *MIb,
+ areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb,
AliasAnalysis *AA = nullptr) const {
- assert(MIa && (MIa->mayLoad() || MIa->mayStore()) &&
+ assert((MIa.mayLoad() || MIa.mayStore()) &&
"MIa must load from or modify a memory location");
- assert(MIb && (MIb->mayLoad() || MIb->mayStore()) &&
+ assert((MIb.mayLoad() || MIb.mayStore()) &&
"MIb must load from or modify a memory location");
return false;
}
@@ -1406,6 +1442,7 @@ public:
private:
unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
unsigned CatchRetOpcode;
+ unsigned ReturnOpcode;
};
/// \brief Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair.
@@ -1435,6 +1472,6 @@ struct DenseMapInfo<TargetInstrInfo::RegSubRegPair> {
}
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_TARGET_TARGETINSTRINFO_H