aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/utils/TableGen/GlobalISelEmitter.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/utils/TableGen/GlobalISelEmitter.cpp')
-rw-r--r--contrib/llvm-project/llvm/utils/TableGen/GlobalISelEmitter.cpp802
1 files changed, 556 insertions, 246 deletions
diff --git a/contrib/llvm-project/llvm/utils/TableGen/GlobalISelEmitter.cpp b/contrib/llvm-project/llvm/utils/TableGen/GlobalISelEmitter.cpp
index 4e8dcc52fc20..0a6985a3eac2 100644
--- a/contrib/llvm-project/llvm/utils/TableGen/GlobalISelEmitter.cpp
+++ b/contrib/llvm-project/llvm/utils/TableGen/GlobalISelEmitter.cpp
@@ -187,17 +187,21 @@ class InstructionMatcher;
static Optional<LLTCodeGen> MVTToLLT(MVT::SimpleValueType SVT) {
MVT VT(SVT);
- if (VT.isVector() && VT.getVectorNumElements() != 1)
+ if (VT.isScalableVector())
+ return None;
+
+ if (VT.isFixedLengthVector() && VT.getVectorNumElements() != 1)
return LLTCodeGen(
LLT::vector(VT.getVectorNumElements(), VT.getScalarSizeInBits()));
if (VT.isInteger() || VT.isFloatingPoint())
return LLTCodeGen(LLT::scalar(VT.getSizeInBits()));
+
return None;
}
static std::string explainPredicates(const TreePatternNode *N) {
- std::string Explanation = "";
+ std::string Explanation;
StringRef Separator = "";
for (const TreePredicateCall &Call : N->getPredicateCalls()) {
const TreePredicateFn &P = Call.Fn;
@@ -301,8 +305,8 @@ static Error failedImport(const Twine &Reason) {
}
static Error isTrivialOperatorNode(const TreePatternNode *N) {
- std::string Explanation = "";
- std::string Separator = "";
+ std::string Explanation;
+ std::string Separator;
bool HasUnsupportedPredicate = false;
for (const TreePredicateCall &Call : N->getPredicateCalls()) {
@@ -389,6 +393,10 @@ getNameForFeatureBitset(const std::vector<Record *> &FeatureBitset) {
return Name;
}
+static std::string getScopedName(unsigned Scope, const std::string &Name) {
+ return ("pred:" + Twine(Scope) + ":" + Name).str();
+}
+
//===- MatchTable Helpers -------------------------------------------------===//
class MatchTable;
@@ -445,9 +453,8 @@ public:
MatchTableRecord(Optional<unsigned> LabelID_, StringRef EmitStr,
unsigned NumElements, unsigned Flags,
int64_t RawValue = std::numeric_limits<int64_t>::min())
- : LabelID(LabelID_.hasValue() ? LabelID_.getValue() : ~0u),
- EmitStr(EmitStr), NumElements(NumElements), Flags(Flags),
- RawValue(RawValue) {
+ : LabelID(LabelID_.getValueOr(~0u)), EmitStr(EmitStr),
+ NumElements(NumElements), Flags(Flags), RawValue(RawValue) {
assert((!LabelID_.hasValue() || LabelID != ~0u) &&
"This value is reserved for non-labels");
}
@@ -852,6 +859,11 @@ protected:
DefinedComplexPatternSubOperandMap;
/// A map of Symbolic Names to ComplexPattern sub-operands.
DefinedComplexPatternSubOperandMap ComplexSubOperands;
+ /// A map used to for multiple referenced error check of ComplexSubOperand.
+ /// ComplexSubOperand can't be referenced multiple from different operands,
+ /// however multiple references from same operand are allowed since that is
+ /// how 'same operand checks' are generated.
+ StringMap<std::string> ComplexSubOperandsParentName;
uint64_t RuleID;
static uint64_t NextRuleID;
@@ -917,14 +929,25 @@ public:
void definePhysRegOperand(Record *Reg, OperandMatcher &OM);
Error defineComplexSubOperand(StringRef SymbolicName, Record *ComplexPattern,
- unsigned RendererID, unsigned SubOperandID) {
- if (ComplexSubOperands.count(SymbolicName))
- return failedImport(
- "Complex suboperand referenced more than once (Operand: " +
- SymbolicName + ")");
+ unsigned RendererID, unsigned SubOperandID,
+ StringRef ParentSymbolicName) {
+ std::string ParentName(ParentSymbolicName);
+ if (ComplexSubOperands.count(SymbolicName)) {
+ const std::string &RecordedParentName =
+ ComplexSubOperandsParentName[SymbolicName];
+ if (RecordedParentName != ParentName)
+ return failedImport("Error: Complex suboperand " + SymbolicName +
+ " referenced by different operands: " +
+ RecordedParentName + " and " + ParentName + ".");
+ // Complex suboperand referenced more than once from same the operand is
+ // used to generate 'same operand check'. Emitting of
+ // GIR_ComplexSubOperandRenderer for them is already handled.
+ return Error::success();
+ }
ComplexSubOperands[SymbolicName] =
std::make_tuple(ComplexPattern, RendererID, SubOperandID);
+ ComplexSubOperandsParentName[SymbolicName] = ParentName;
return Error::success();
}
@@ -992,10 +1015,6 @@ protected:
bool Optimized = false;
public:
- /// Construct a new predicate and add it to the matcher.
- template <class Kind, class... Args>
- Optional<Kind *> addPredicate(Args &&... args);
-
typename PredicatesTy::iterator predicates_begin() {
return Predicates.begin();
}
@@ -1089,6 +1108,7 @@ public:
IPM_MemoryVsLLTSize,
IPM_MemoryAddressSpace,
IPM_MemoryAlignment,
+ IPM_VectorSplatImm,
IPM_GenericPredicate,
OPM_SameOperand,
OPM_ComplexPattern,
@@ -1101,6 +1121,7 @@ public:
OPM_PointerToAny,
OPM_RegBank,
OPM_MBB,
+ OPM_RecordNamedOperand,
};
protected:
@@ -1270,10 +1291,15 @@ public:
: OperandPredicateMatcher(OPM_PointerToAny, InsnVarID, OpIdx),
SizeInBits(SizeInBits) {}
- static bool classof(const OperandPredicateMatcher *P) {
+ static bool classof(const PredicateMatcher *P) {
return P->getKind() == OPM_PointerToAny;
}
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return OperandPredicateMatcher::isIdentical(B) &&
+ SizeInBits == cast<PointerToAnyOperandMatcher>(&B)->SizeInBits;
+ }
+
void emitPredicateOpcodes(MatchTable &Table,
RuleMatcher &Rule) const override {
Table << MatchTable::Opcode("GIM_CheckPointerToAny")
@@ -1284,6 +1310,40 @@ public:
}
};
+/// Generates code to record named operand in RecordedOperands list at StoreIdx.
+/// Predicates with 'let PredicateCodeUsesOperands = 1' get RecordedOperands as
+/// an argument to predicate's c++ code once all operands have been matched.
+class RecordNamedOperandMatcher : public OperandPredicateMatcher {
+protected:
+ unsigned StoreIdx;
+ std::string Name;
+
+public:
+ RecordNamedOperandMatcher(unsigned InsnVarID, unsigned OpIdx,
+ unsigned StoreIdx, StringRef Name)
+ : OperandPredicateMatcher(OPM_RecordNamedOperand, InsnVarID, OpIdx),
+ StoreIdx(StoreIdx), Name(Name) {}
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == OPM_RecordNamedOperand;
+ }
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return OperandPredicateMatcher::isIdentical(B) &&
+ StoreIdx == cast<RecordNamedOperandMatcher>(&B)->StoreIdx &&
+ Name == cast<RecordNamedOperandMatcher>(&B)->Name;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_RecordNamedOperand")
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("Op") << MatchTable::IntValue(OpIdx)
+ << MatchTable::Comment("StoreIdx") << MatchTable::IntValue(StoreIdx)
+ << MatchTable::Comment("Name : " + Name) << MatchTable::LineBreak;
+ }
+};
+
/// Generates code to check that an operand is a particular target constant.
class ComplexPatternOperandMatcher : public OperandPredicateMatcher {
protected:
@@ -1523,7 +1583,7 @@ public:
AllocatedTemporariesBaseID(AllocatedTemporariesBaseID) {}
bool hasSymbolicName() const { return !SymbolicName.empty(); }
- const StringRef getSymbolicName() const { return SymbolicName; }
+ StringRef getSymbolicName() const { return SymbolicName; }
void setSymbolicName(StringRef Name) {
assert(SymbolicName.empty() && "Operand already has a symbolic name");
SymbolicName = std::string(Name);
@@ -1670,10 +1730,23 @@ PredicateListMatcher<PredicateMatcher>::getNoPredicateComment() const {
/// Generates code to check the opcode of an instruction.
class InstructionOpcodeMatcher : public InstructionPredicateMatcher {
protected:
- const CodeGenInstruction *I;
+ // Allow matching one to several, similar opcodes that share properties. This
+ // is to handle patterns where one SelectionDAG operation maps to multiple
+ // GlobalISel ones (e.g. G_BUILD_VECTOR and G_BUILD_VECTOR_TRUNC). The first
+ // is treated as the canonical opcode.
+ SmallVector<const CodeGenInstruction *, 2> Insts;
static DenseMap<const CodeGenInstruction *, unsigned> OpcodeValues;
+
+ MatchTableRecord getInstValue(const CodeGenInstruction *I) const {
+ const auto VI = OpcodeValues.find(I);
+ if (VI != OpcodeValues.end())
+ return MatchTable::NamedValue(I->Namespace, I->TheDef->getName(),
+ VI->second);
+ return MatchTable::NamedValue(I->Namespace, I->TheDef->getName());
+ }
+
public:
static void initOpcodeValuesMap(const CodeGenTarget &Target) {
OpcodeValues.clear();
@@ -1683,8 +1756,13 @@ public:
OpcodeValues[I] = OpcodeValue++;
}
- InstructionOpcodeMatcher(unsigned InsnVarID, const CodeGenInstruction *I)
- : InstructionPredicateMatcher(IPM_Opcode, InsnVarID), I(I) {}
+ InstructionOpcodeMatcher(unsigned InsnVarID,
+ ArrayRef<const CodeGenInstruction *> I)
+ : InstructionPredicateMatcher(IPM_Opcode, InsnVarID),
+ Insts(I.begin(), I.end()) {
+ assert((Insts.size() == 1 || Insts.size() == 2) &&
+ "unexpected number of opcode alternatives");
+ }
static bool classof(const PredicateMatcher *P) {
return P->getKind() == IPM_Opcode;
@@ -1692,22 +1770,36 @@ public:
bool isIdentical(const PredicateMatcher &B) const override {
return InstructionPredicateMatcher::isIdentical(B) &&
- I == cast<InstructionOpcodeMatcher>(&B)->I;
+ Insts == cast<InstructionOpcodeMatcher>(&B)->Insts;
}
+
+ bool hasValue() const override {
+ return Insts.size() == 1 && OpcodeValues.count(Insts[0]);
+ }
+
+ // TODO: This is used for the SwitchMatcher optimization. We should be able to
+ // return a list of the opcodes to match.
MatchTableRecord getValue() const override {
+ assert(Insts.size() == 1);
+
+ const CodeGenInstruction *I = Insts[0];
const auto VI = OpcodeValues.find(I);
if (VI != OpcodeValues.end())
return MatchTable::NamedValue(I->Namespace, I->TheDef->getName(),
VI->second);
return MatchTable::NamedValue(I->Namespace, I->TheDef->getName());
}
- bool hasValue() const override { return OpcodeValues.count(I); }
void emitPredicateOpcodes(MatchTable &Table,
RuleMatcher &Rule) const override {
- Table << MatchTable::Opcode("GIM_CheckOpcode") << MatchTable::Comment("MI")
- << MatchTable::IntValue(InsnVarID) << getValue()
- << MatchTable::LineBreak;
+ StringRef CheckType = Insts.size() == 1 ?
+ "GIM_CheckOpcode" : "GIM_CheckOpcodeIsEither";
+ Table << MatchTable::Opcode(CheckType) << MatchTable::Comment("MI")
+ << MatchTable::IntValue(InsnVarID);
+
+ for (const CodeGenInstruction *I : Insts)
+ Table << getInstValue(I);
+ Table << MatchTable::LineBreak;
}
/// Compare the priority of this object and B.
@@ -1725,20 +1817,32 @@ public:
// using instruction frequency information to improve compile time.
if (const InstructionOpcodeMatcher *BO =
dyn_cast<InstructionOpcodeMatcher>(&B))
- return I->TheDef->getName() < BO->I->TheDef->getName();
+ return Insts[0]->TheDef->getName() < BO->Insts[0]->TheDef->getName();
return false;
};
bool isConstantInstruction() const {
- return I->TheDef->getName() == "G_CONSTANT";
+ return Insts.size() == 1 && Insts[0]->TheDef->getName() == "G_CONSTANT";
}
- StringRef getOpcode() const { return I->TheDef->getName(); }
- bool isVariadicNumOperands() const { return I->Operands.isVariadic; }
+ // The first opcode is the canonical opcode, and later are alternatives.
+ StringRef getOpcode() const {
+ return Insts[0]->TheDef->getName();
+ }
+
+ ArrayRef<const CodeGenInstruction *> getAlternativeOpcodes() {
+ return Insts;
+ }
+
+ bool isVariadicNumOperands() const {
+ // If one is variadic, they all should be.
+ return Insts[0]->Operands.isVariadic;
+ }
StringRef getOperandType(unsigned OpIdx) const {
- return I->Operands[OpIdx].OperandType;
+ // Types expected to be uniform for all alternatives.
+ return Insts[0]->Operands[OpIdx].OperandType;
}
};
@@ -2021,6 +2125,42 @@ public:
}
};
+// Matcher for immAllOnesV/immAllZerosV
+class VectorSplatImmPredicateMatcher : public InstructionPredicateMatcher {
+public:
+ enum SplatKind {
+ AllZeros,
+ AllOnes
+ };
+
+private:
+ SplatKind Kind;
+
+public:
+ VectorSplatImmPredicateMatcher(unsigned InsnVarID, SplatKind K)
+ : InstructionPredicateMatcher(IPM_VectorSplatImm, InsnVarID), Kind(K) {}
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == IPM_VectorSplatImm;
+ }
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return InstructionPredicateMatcher::isIdentical(B) &&
+ Kind == static_cast<const VectorSplatImmPredicateMatcher &>(B).Kind;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ if (Kind == AllOnes)
+ Table << MatchTable::Opcode("GIM_CheckIsBuildVectorAllOnes");
+ else
+ Table << MatchTable::Opcode("GIM_CheckIsBuildVectorAllZeros");
+
+ Table << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID);
+ Table << MatchTable::LineBreak;
+ }
+};
+
/// Generates code to check an arbitrary C++ instruction predicate.
class GenericInstructionPredicateMatcher : public InstructionPredicateMatcher {
protected:
@@ -2077,8 +2217,9 @@ protected:
SmallVector<std::pair<Record *, unsigned>, 2> PhysRegInputs;
public:
- InstructionMatcher(RuleMatcher &Rule, StringRef SymbolicName)
- : Rule(Rule), SymbolicName(SymbolicName) {
+ InstructionMatcher(RuleMatcher &Rule, StringRef SymbolicName,
+ bool NumOpsCheck = true)
+ : Rule(Rule), NumOperandsCheck(NumOpsCheck), SymbolicName(SymbolicName) {
// We create a new instruction matcher.
// Get a new ID for that instruction.
InsnVarID = Rule.implicitlyDefineInsnVar(*this);
@@ -2108,10 +2249,10 @@ public:
}
OperandMatcher &getOperand(unsigned OpIdx) {
- auto I = std::find_if(Operands.begin(), Operands.end(),
- [&OpIdx](const std::unique_ptr<OperandMatcher> &X) {
- return X->getOpIdx() == OpIdx;
- });
+ auto I = llvm::find_if(Operands,
+ [&OpIdx](const std::unique_ptr<OperandMatcher> &X) {
+ return X->getOpIdx() == OpIdx;
+ });
if (I != Operands.end())
return **I;
llvm_unreachable("Failed to lookup operand");
@@ -2267,9 +2408,10 @@ protected:
public:
InstructionOperandMatcher(unsigned InsnVarID, unsigned OpIdx,
- RuleMatcher &Rule, StringRef SymbolicName)
+ RuleMatcher &Rule, StringRef SymbolicName,
+ bool NumOpsCheck = true)
: OperandPredicateMatcher(OPM_Instruction, InsnVarID, OpIdx),
- InsnMatcher(new InstructionMatcher(Rule, SymbolicName)) {}
+ InsnMatcher(new InstructionMatcher(Rule, SymbolicName, NumOpsCheck)) {}
static bool classof(const PredicateMatcher *P) {
return P->getKind() == OPM_Instruction;
@@ -2395,7 +2537,7 @@ public:
return R->getKind() == OR_Copy;
}
- const StringRef getSymbolicName() const { return SymbolicName; }
+ StringRef getSymbolicName() const { return SymbolicName; }
void emitRenderOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
const OperandMatcher &Operand = Rule.getOperandMatcher(SymbolicName);
@@ -2462,7 +2604,7 @@ public:
return R->getKind() == OR_CopyOrAddZeroReg;
}
- const StringRef getSymbolicName() const { return SymbolicName; }
+ StringRef getSymbolicName() const { return SymbolicName; }
void emitRenderOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
const OperandMatcher &Operand = Rule.getOperandMatcher(SymbolicName);
@@ -2499,7 +2641,7 @@ public:
return R->getKind() == OR_CopyConstantAsImm;
}
- const StringRef getSymbolicName() const { return SymbolicName; }
+ StringRef getSymbolicName() const { return SymbolicName; }
void emitRenderOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
InstructionMatcher &InsnMatcher = Rule.getInstructionMatcher(SymbolicName);
@@ -2530,7 +2672,7 @@ public:
return R->getKind() == OR_CopyFConstantAsFPImm;
}
- const StringRef getSymbolicName() const { return SymbolicName; }
+ StringRef getSymbolicName() const { return SymbolicName; }
void emitRenderOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
InstructionMatcher &InsnMatcher = Rule.getInstructionMatcher(SymbolicName);
@@ -2564,7 +2706,7 @@ public:
return R->getKind() == OR_CopySubReg;
}
- const StringRef getSymbolicName() const { return SymbolicName; }
+ StringRef getSymbolicName() const { return SymbolicName; }
void emitRenderOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
const OperandMatcher &Operand = Rule.getOperandMatcher(SymbolicName);
@@ -2587,12 +2729,13 @@ protected:
unsigned InsnID;
const Record *RegisterDef;
bool IsDef;
+ const CodeGenTarget &Target;
public:
- AddRegisterRenderer(unsigned InsnID, const Record *RegisterDef,
- bool IsDef = false)
+ AddRegisterRenderer(unsigned InsnID, const CodeGenTarget &Target,
+ const Record *RegisterDef, bool IsDef = false)
: OperandRenderer(OR_Register), InsnID(InsnID), RegisterDef(RegisterDef),
- IsDef(IsDef) {}
+ IsDef(IsDef), Target(Target) {}
static bool classof(const OperandRenderer *R) {
return R->getKind() == OR_Register;
@@ -2600,13 +2743,17 @@ public:
void emitRenderOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
Table << MatchTable::Opcode("GIR_AddRegister")
- << MatchTable::Comment("InsnID") << MatchTable::IntValue(InsnID)
- << MatchTable::NamedValue(
- (RegisterDef->getValue("Namespace")
- ? RegisterDef->getValueAsString("Namespace")
- : ""),
- RegisterDef->getName())
- << MatchTable::Comment("AddRegisterRegFlags");
+ << MatchTable::Comment("InsnID") << MatchTable::IntValue(InsnID);
+ if (RegisterDef->getName() != "zero_reg") {
+ Table << MatchTable::NamedValue(
+ (RegisterDef->getValue("Namespace")
+ ? RegisterDef->getValueAsString("Namespace")
+ : ""),
+ RegisterDef->getName());
+ } else {
+ Table << MatchTable::NamedValue(Target.getRegNamespace(), "NoRegister");
+ }
+ Table << MatchTable::Comment("AddRegisterRegFlags");
// TODO: This is encoded as a 64-bit element, but only 16 or 32-bits are
// really needed for a physical register reference. We can pack the
@@ -2628,12 +2775,14 @@ protected:
unsigned TempRegID;
const CodeGenSubRegIndex *SubRegIdx;
bool IsDef;
+ bool IsDead;
public:
TempRegRenderer(unsigned InsnID, unsigned TempRegID, bool IsDef = false,
- const CodeGenSubRegIndex *SubReg = nullptr)
+ const CodeGenSubRegIndex *SubReg = nullptr,
+ bool IsDead = false)
: OperandRenderer(OR_Register), InsnID(InsnID), TempRegID(TempRegID),
- SubRegIdx(SubReg), IsDef(IsDef) {}
+ SubRegIdx(SubReg), IsDef(IsDef), IsDead(IsDead) {}
static bool classof(const OperandRenderer *R) {
return R->getKind() == OR_TempRegister;
@@ -2650,9 +2799,13 @@ public:
<< MatchTable::Comment("TempRegID") << MatchTable::IntValue(TempRegID)
<< MatchTable::Comment("TempRegFlags");
- if (IsDef)
- Table << MatchTable::NamedValue("RegState::Define");
- else
+ if (IsDef) {
+ SmallString<32> RegFlags;
+ RegFlags += "RegState::Define";
+ if (IsDead)
+ RegFlags += "|RegState::Dead";
+ Table << MatchTable::NamedValue(RegFlags);
+ } else
Table << MatchTable::IntValue(0);
if (SubRegIdx)
@@ -3365,6 +3518,16 @@ private:
// Rule coverage information.
Optional<CodeGenCoverage> RuleCoverage;
+ /// Variables used to help with collecting of named operands for predicates
+ /// with 'let PredicateCodeUsesOperands = 1'. WaitingForNamedOperands is set
+ /// to the number of named operands that predicate expects. Store locations in
+ /// StoreIdxForName correspond to the order in which operand names appear in
+ /// predicate's argument list.
+ /// When we visit named leaf operand and WaitingForNamedOperands is not zero,
+ /// add matcher that will record operand and decrease counter.
+ unsigned WaitingForNamedOperands = 0;
+ StringMap<unsigned> StoreIdxForName;
+
void gatherOpcodeValues();
void gatherTypeIDValues();
void gatherNodeEquivs();
@@ -3394,7 +3557,11 @@ private:
Expected<action_iterator>
createInstructionRenderer(action_iterator InsertPt, RuleMatcher &M,
const TreePatternNode *Dst);
- void importExplicitDefRenderers(BuildMIAction &DstMIBuilder);
+
+ Expected<action_iterator>
+ importExplicitDefRenderers(action_iterator InsertPt, RuleMatcher &M,
+ BuildMIAction &DstMIBuilder,
+ const TreePatternNode *Dst);
Expected<action_iterator>
importExplicitUseRenderers(action_iterator InsertPt, RuleMatcher &M,
@@ -3413,7 +3580,8 @@ private:
void emitCxxPredicateFns(raw_ostream &OS, StringRef CodeFieldName,
StringRef TypeIdentifier, StringRef ArgType,
- StringRef ArgName, StringRef AdditionalDeclarations,
+ StringRef ArgName, StringRef AdditionalArgs,
+ StringRef AdditionalDeclarations,
std::function<bool(const Record *R)> Filter);
void emitImmPredicateFns(raw_ostream &OS, StringRef TypeIdentifier,
StringRef ArgType,
@@ -3455,6 +3623,12 @@ private:
Optional<const CodeGenRegisterClass *>
inferRegClassFromPattern(TreePatternNode *N);
+ // Add builtin predicates.
+ Expected<InstructionMatcher &>
+ addBuiltinPredicates(const Record *SrcGIEquivOrNull,
+ const TreePredicateFn &Predicate,
+ InstructionMatcher &InsnMatcher, bool &HasAddedMatcher);
+
public:
/// Takes a sequence of \p Rules and group them based on the predicates
/// they share. \p MatcherStorage is used as a memory container
@@ -3562,6 +3736,147 @@ GlobalISelEmitter::importRulePredicates(RuleMatcher &M,
return Error::success();
}
+Expected<InstructionMatcher &> GlobalISelEmitter::addBuiltinPredicates(
+ const Record *SrcGIEquivOrNull, const TreePredicateFn &Predicate,
+ InstructionMatcher &InsnMatcher, bool &HasAddedMatcher) {
+ if (Predicate.isLoad() || Predicate.isStore() || Predicate.isAtomic()) {
+ if (const ListInit *AddrSpaces = Predicate.getAddressSpaces()) {
+ SmallVector<unsigned, 4> ParsedAddrSpaces;
+
+ for (Init *Val : AddrSpaces->getValues()) {
+ IntInit *IntVal = dyn_cast<IntInit>(Val);
+ if (!IntVal)
+ return failedImport("Address space is not an integer");
+ ParsedAddrSpaces.push_back(IntVal->getValue());
+ }
+
+ if (!ParsedAddrSpaces.empty()) {
+ InsnMatcher.addPredicate<MemoryAddressSpacePredicateMatcher>(
+ 0, ParsedAddrSpaces);
+ }
+ }
+
+ int64_t MinAlign = Predicate.getMinAlignment();
+ if (MinAlign > 0)
+ InsnMatcher.addPredicate<MemoryAlignmentPredicateMatcher>(0, MinAlign);
+ }
+
+ // G_LOAD is used for both non-extending and any-extending loads.
+ if (Predicate.isLoad() && Predicate.isNonExtLoad()) {
+ InsnMatcher.addPredicate<MemoryVsLLTSizePredicateMatcher>(
+ 0, MemoryVsLLTSizePredicateMatcher::EqualTo, 0);
+ return InsnMatcher;
+ }
+ if (Predicate.isLoad() && Predicate.isAnyExtLoad()) {
+ InsnMatcher.addPredicate<MemoryVsLLTSizePredicateMatcher>(
+ 0, MemoryVsLLTSizePredicateMatcher::LessThan, 0);
+ return InsnMatcher;
+ }
+
+ if (Predicate.isStore()) {
+ if (Predicate.isTruncStore()) {
+ // FIXME: If MemoryVT is set, we end up with 2 checks for the MMO size.
+ InsnMatcher.addPredicate<MemoryVsLLTSizePredicateMatcher>(
+ 0, MemoryVsLLTSizePredicateMatcher::LessThan, 0);
+ return InsnMatcher;
+ }
+ if (Predicate.isNonTruncStore()) {
+ // We need to check the sizes match here otherwise we could incorrectly
+ // match truncating stores with non-truncating ones.
+ InsnMatcher.addPredicate<MemoryVsLLTSizePredicateMatcher>(
+ 0, MemoryVsLLTSizePredicateMatcher::EqualTo, 0);
+ }
+ }
+
+ // No check required. We already did it by swapping the opcode.
+ if (!SrcGIEquivOrNull->isValueUnset("IfSignExtend") &&
+ Predicate.isSignExtLoad())
+ return InsnMatcher;
+
+ // No check required. We already did it by swapping the opcode.
+ if (!SrcGIEquivOrNull->isValueUnset("IfZeroExtend") &&
+ Predicate.isZeroExtLoad())
+ return InsnMatcher;
+
+ // No check required. G_STORE by itself is a non-extending store.
+ if (Predicate.isNonTruncStore())
+ return InsnMatcher;
+
+ if (Predicate.isLoad() || Predicate.isStore() || Predicate.isAtomic()) {
+ if (Predicate.getMemoryVT() != nullptr) {
+ Optional<LLTCodeGen> MemTyOrNone =
+ MVTToLLT(getValueType(Predicate.getMemoryVT()));
+
+ if (!MemTyOrNone)
+ return failedImport("MemVT could not be converted to LLT");
+
+ // MMO's work in bytes so we must take care of unusual types like i1
+ // don't round down.
+ unsigned MemSizeInBits =
+ llvm::alignTo(MemTyOrNone->get().getSizeInBits(), 8);
+
+ InsnMatcher.addPredicate<MemorySizePredicateMatcher>(0,
+ MemSizeInBits / 8);
+ return InsnMatcher;
+ }
+ }
+
+ if (Predicate.isLoad() || Predicate.isStore()) {
+ // No check required. A G_LOAD/G_STORE is an unindexed load.
+ if (Predicate.isUnindexed())
+ return InsnMatcher;
+ }
+
+ if (Predicate.isAtomic()) {
+ if (Predicate.isAtomicOrderingMonotonic()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("Monotonic");
+ return InsnMatcher;
+ }
+ if (Predicate.isAtomicOrderingAcquire()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("Acquire");
+ return InsnMatcher;
+ }
+ if (Predicate.isAtomicOrderingRelease()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("Release");
+ return InsnMatcher;
+ }
+ if (Predicate.isAtomicOrderingAcquireRelease()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "AcquireRelease");
+ return InsnMatcher;
+ }
+ if (Predicate.isAtomicOrderingSequentiallyConsistent()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "SequentiallyConsistent");
+ return InsnMatcher;
+ }
+ }
+
+ if (Predicate.isAtomicOrderingAcquireOrStronger()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "Acquire", AtomicOrderingMMOPredicateMatcher::AO_OrStronger);
+ return InsnMatcher;
+ }
+ if (Predicate.isAtomicOrderingWeakerThanAcquire()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "Acquire", AtomicOrderingMMOPredicateMatcher::AO_WeakerThan);
+ return InsnMatcher;
+ }
+
+ if (Predicate.isAtomicOrderingReleaseOrStronger()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "Release", AtomicOrderingMMOPredicateMatcher::AO_OrStronger);
+ return InsnMatcher;
+ }
+ if (Predicate.isAtomicOrderingWeakerThanRelease()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "Release", AtomicOrderingMMOPredicateMatcher::AO_WeakerThan);
+ return InsnMatcher;
+ }
+ HasAddedMatcher = false;
+ return InsnMatcher;
+}
+
Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
RuleMatcher &Rule, InstructionMatcher &InsnMatcher,
const TreePatternNode *Src, unsigned &TempOpIdx) {
@@ -3603,6 +3918,7 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
for (const TreePredicateCall &Call : Src->getPredicateCalls()) {
const TreePredicateFn &Predicate = Call.Fn;
+ bool HasAddedBuiltinMatcher = true;
if (Predicate.isAlwaysTrue())
continue;
@@ -3611,154 +3927,35 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
continue;
}
- // An address space check is needed in all contexts if there is one.
- if (Predicate.isLoad() || Predicate.isStore() || Predicate.isAtomic()) {
- if (const ListInit *AddrSpaces = Predicate.getAddressSpaces()) {
- SmallVector<unsigned, 4> ParsedAddrSpaces;
-
- for (Init *Val : AddrSpaces->getValues()) {
- IntInit *IntVal = dyn_cast<IntInit>(Val);
- if (!IntVal)
- return failedImport("Address space is not an integer");
- ParsedAddrSpaces.push_back(IntVal->getValue());
- }
-
- if (!ParsedAddrSpaces.empty()) {
- InsnMatcher.addPredicate<MemoryAddressSpacePredicateMatcher>(
- 0, ParsedAddrSpaces);
- }
- }
-
- int64_t MinAlign = Predicate.getMinAlignment();
- if (MinAlign > 0)
- InsnMatcher.addPredicate<MemoryAlignmentPredicateMatcher>(0, MinAlign);
- }
-
- // G_LOAD is used for both non-extending and any-extending loads.
- if (Predicate.isLoad() && Predicate.isNonExtLoad()) {
- InsnMatcher.addPredicate<MemoryVsLLTSizePredicateMatcher>(
- 0, MemoryVsLLTSizePredicateMatcher::EqualTo, 0);
- continue;
- }
- if (Predicate.isLoad() && Predicate.isAnyExtLoad()) {
- InsnMatcher.addPredicate<MemoryVsLLTSizePredicateMatcher>(
- 0, MemoryVsLLTSizePredicateMatcher::LessThan, 0);
- continue;
- }
-
- if (Predicate.isStore()) {
- if (Predicate.isTruncStore()) {
- // FIXME: If MemoryVT is set, we end up with 2 checks for the MMO size.
- InsnMatcher.addPredicate<MemoryVsLLTSizePredicateMatcher>(
- 0, MemoryVsLLTSizePredicateMatcher::LessThan, 0);
- continue;
- }
- if (Predicate.isNonTruncStore()) {
- // We need to check the sizes match here otherwise we could incorrectly
- // match truncating stores with non-truncating ones.
- InsnMatcher.addPredicate<MemoryVsLLTSizePredicateMatcher>(
- 0, MemoryVsLLTSizePredicateMatcher::EqualTo, 0);
- }
- }
-
- // No check required. We already did it by swapping the opcode.
- if (!SrcGIEquivOrNull->isValueUnset("IfSignExtend") &&
- Predicate.isSignExtLoad())
- continue;
-
- // No check required. We already did it by swapping the opcode.
- if (!SrcGIEquivOrNull->isValueUnset("IfZeroExtend") &&
- Predicate.isZeroExtLoad())
- continue;
-
- // No check required. G_STORE by itself is a non-extending store.
- if (Predicate.isNonTruncStore())
- continue;
-
- if (Predicate.isLoad() || Predicate.isStore() || Predicate.isAtomic()) {
- if (Predicate.getMemoryVT() != nullptr) {
- Optional<LLTCodeGen> MemTyOrNone =
- MVTToLLT(getValueType(Predicate.getMemoryVT()));
-
- if (!MemTyOrNone)
- return failedImport("MemVT could not be converted to LLT");
-
- // MMO's work in bytes so we must take care of unusual types like i1
- // don't round down.
- unsigned MemSizeInBits =
- llvm::alignTo(MemTyOrNone->get().getSizeInBits(), 8);
-
- InsnMatcher.addPredicate<MemorySizePredicateMatcher>(
- 0, MemSizeInBits / 8);
- continue;
- }
- }
-
- if (Predicate.isLoad() || Predicate.isStore()) {
- // No check required. A G_LOAD/G_STORE is an unindexed load.
- if (Predicate.isUnindexed())
- continue;
- }
-
- if (Predicate.isAtomic()) {
- if (Predicate.isAtomicOrderingMonotonic()) {
- InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
- "Monotonic");
- continue;
- }
- if (Predicate.isAtomicOrderingAcquire()) {
- InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("Acquire");
- continue;
- }
- if (Predicate.isAtomicOrderingRelease()) {
- InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("Release");
- continue;
- }
- if (Predicate.isAtomicOrderingAcquireRelease()) {
- InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
- "AcquireRelease");
- continue;
- }
- if (Predicate.isAtomicOrderingSequentiallyConsistent()) {
- InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
- "SequentiallyConsistent");
- continue;
- }
-
- if (Predicate.isAtomicOrderingAcquireOrStronger()) {
- InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
- "Acquire", AtomicOrderingMMOPredicateMatcher::AO_OrStronger);
- continue;
- }
- if (Predicate.isAtomicOrderingWeakerThanAcquire()) {
- InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
- "Acquire", AtomicOrderingMMOPredicateMatcher::AO_WeakerThan);
- continue;
- }
-
- if (Predicate.isAtomicOrderingReleaseOrStronger()) {
- InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
- "Release", AtomicOrderingMMOPredicateMatcher::AO_OrStronger);
- continue;
- }
- if (Predicate.isAtomicOrderingWeakerThanRelease()) {
- InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
- "Release", AtomicOrderingMMOPredicateMatcher::AO_WeakerThan);
- continue;
- }
- }
+ auto InsnMatcherOrError = addBuiltinPredicates(
+ SrcGIEquivOrNull, Predicate, InsnMatcher, HasAddedBuiltinMatcher);
+ if (auto Error = InsnMatcherOrError.takeError())
+ return std::move(Error);
if (Predicate.hasGISelPredicateCode()) {
+ if (Predicate.usesOperands()) {
+ assert(WaitingForNamedOperands == 0 &&
+ "previous predicate didn't find all operands or "
+ "nested predicate that uses operands");
+ TreePattern *TP = Predicate.getOrigPatFragRecord();
+ WaitingForNamedOperands = TP->getNumArgs();
+ for (unsigned i = 0; i < WaitingForNamedOperands; ++i)
+ StoreIdxForName[getScopedName(Call.Scope, TP->getArgName(i))] = i;
+ }
InsnMatcher.addPredicate<GenericInstructionPredicateMatcher>(Predicate);
continue;
}
-
- return failedImport("Src pattern child has predicate (" +
- explainPredicates(Src) + ")");
+ if (!HasAddedBuiltinMatcher) {
+ return failedImport("Src pattern child has predicate (" +
+ explainPredicates(Src) + ")");
+ }
}
+
+ bool IsAtomic = false;
if (SrcGIEquivOrNull && SrcGIEquivOrNull->getValueAsBit("CheckMMOIsNonAtomic"))
InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("NotAtomic");
else if (SrcGIEquivOrNull && SrcGIEquivOrNull->getValueAsBit("CheckMMOIsAtomic")) {
+ IsAtomic = true;
InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
"Unordered", AtomicOrderingMMOPredicateMatcher::AO_OrStronger);
}
@@ -3812,6 +4009,27 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
}
}
+ // Hack around an unfortunate mistake in how atomic store (and really
+ // atomicrmw in general) operands were ordered. A ISD::STORE used the order
+ // <stored value>, <pointer> order. ISD::ATOMIC_STORE used the opposite,
+ // <pointer>, <stored value>. In GlobalISel there's just the one store
+ // opcode, so we need to swap the operands here to get the right type check.
+ if (IsAtomic && SrcGIOrNull->TheDef->getName() == "G_STORE") {
+ assert(NumChildren == 2 && "wrong operands for atomic store");
+
+ TreePatternNode *PtrChild = Src->getChild(0);
+ TreePatternNode *ValueChild = Src->getChild(1);
+
+ if (auto Error = importChildMatcher(Rule, InsnMatcher, PtrChild, true,
+ false, 1, TempOpIdx))
+ return std::move(Error);
+
+ if (auto Error = importChildMatcher(Rule, InsnMatcher, ValueChild, false,
+ false, 0, TempOpIdx))
+ return std::move(Error);
+ return InsnMatcher;
+ }
+
// Match the used operands (i.e. the children of the operator).
bool IsIntrinsic =
SrcGIOrNull->TheDef->getName() == "G_INTRINSIC" ||
@@ -3902,12 +4120,22 @@ Error GlobalISelEmitter::importChildMatcher(
bool OperandIsImmArg, unsigned OpIdx, unsigned &TempOpIdx) {
Record *PhysReg = nullptr;
- StringRef SrcChildName = getSrcChildName(SrcChild, PhysReg);
+ std::string SrcChildName = std::string(getSrcChildName(SrcChild, PhysReg));
+ if (!SrcChild->isLeaf() &&
+ SrcChild->getOperator()->isSubClassOf("ComplexPattern")) {
+ // The "name" of a non-leaf complex pattern (MY_PAT $op1, $op2) is
+ // "MY_PAT:op1:op2" and the ones with same "name" represent same operand.
+ std::string PatternName = std::string(SrcChild->getOperator()->getName());
+ for (unsigned i = 0; i < SrcChild->getNumChildren(); ++i) {
+ PatternName += ":";
+ PatternName += SrcChild->getChild(i)->getName();
+ }
+ SrcChildName = PatternName;
+ }
OperandMatcher &OM =
- PhysReg
- ? InsnMatcher.addPhysRegInput(PhysReg, OpIdx, TempOpIdx)
- : InsnMatcher.addOperand(OpIdx, std::string(SrcChildName), TempOpIdx);
+ PhysReg ? InsnMatcher.addPhysRegInput(PhysReg, OpIdx, TempOpIdx)
+ : InsnMatcher.addOperand(OpIdx, SrcChildName, TempOpIdx);
if (OM.isSameAsAnotherOperand())
return Error::success();
@@ -3954,9 +4182,9 @@ Error GlobalISelEmitter::importChildMatcher(
for (unsigned i = 0, e = SrcChild->getNumChildren(); i != e; ++i) {
auto *SubOperand = SrcChild->getChild(i);
if (!SubOperand->getName().empty()) {
- if (auto Error = Rule.defineComplexSubOperand(SubOperand->getName(),
- SrcChild->getOperator(),
- RendererID, i))
+ if (auto Error = Rule.defineComplexSubOperand(
+ SubOperand->getName(), SrcChild->getOperator(), RendererID, i,
+ SrcChildName))
return Error;
}
}
@@ -4002,6 +4230,13 @@ Error GlobalISelEmitter::importChildMatcher(
if (auto *ChildDefInit = dyn_cast<DefInit>(SrcChild->getLeafValue())) {
auto *ChildRec = ChildDefInit->getDef();
+ if (WaitingForNamedOperands) {
+ auto PA = SrcChild->getNamesAsPredicateArg().begin();
+ std::string Name = getScopedName(PA->getScope(), PA->getIdentifier());
+ OM.addPredicate<RecordNamedOperandMatcher>(StoreIdxForName[Name], Name);
+ --WaitingForNamedOperands;
+ }
+
// Check for register classes.
if (ChildRec->isSubClassOf("RegisterClass") ||
ChildRec->isSubClassOf("RegisterOperand")) {
@@ -4044,6 +4279,40 @@ Error GlobalISelEmitter::importChildMatcher(
if (ChildRec->getName() == "srcvalue")
return Error::success();
+ const bool ImmAllOnesV = ChildRec->getName() == "immAllOnesV";
+ if (ImmAllOnesV || ChildRec->getName() == "immAllZerosV") {
+ auto MaybeInsnOperand = OM.addPredicate<InstructionOperandMatcher>(
+ InsnMatcher.getRuleMatcher(), SrcChild->getName(), false);
+ InstructionOperandMatcher &InsnOperand = **MaybeInsnOperand;
+
+ ValueTypeByHwMode VTy = ChildTypes.front().getValueTypeByHwMode();
+
+ const CodeGenInstruction &BuildVector
+ = Target.getInstruction(RK.getDef("G_BUILD_VECTOR"));
+ const CodeGenInstruction &BuildVectorTrunc
+ = Target.getInstruction(RK.getDef("G_BUILD_VECTOR_TRUNC"));
+
+ // Treat G_BUILD_VECTOR as the canonical opcode, and G_BUILD_VECTOR_TRUNC
+ // as an alternative.
+ InsnOperand.getInsnMatcher().addPredicate<InstructionOpcodeMatcher>(
+ makeArrayRef({&BuildVector, &BuildVectorTrunc}));
+
+ // TODO: Handle both G_BUILD_VECTOR and G_BUILD_VECTOR_TRUNC We could
+ // theoretically not emit any opcode check, but getOpcodeMatcher currently
+ // has to succeed.
+ OperandMatcher &OM =
+ InsnOperand.getInsnMatcher().addOperand(0, "", TempOpIdx);
+ if (auto Error =
+ OM.addTypeCheckPredicate(VTy, false /* OperandIsAPointer */))
+ return failedImport(toString(std::move(Error)) +
+ " for result of Src pattern operator");
+
+ InsnOperand.getInsnMatcher().addPredicate<VectorSplatImmPredicateMatcher>(
+ ImmAllOnesV ? VectorSplatImmPredicateMatcher::AllOnes
+ : VectorSplatImmPredicateMatcher::AllZeros);
+ return Error::success();
+ }
+
return failedImport(
"Src pattern child def is an unsupported tablegen class");
}
@@ -4155,7 +4424,7 @@ Expected<action_iterator> GlobalISelEmitter::importExplicitUseRenderer(
return failedImport("Dst operand has an unsupported type");
if (ChildRec->isSubClassOf("Register")) {
- DstMIBuilder.addRenderer<AddRegisterRenderer>(ChildRec);
+ DstMIBuilder.addRenderer<AddRegisterRenderer>(Target, ChildRec);
return InsertPt;
}
@@ -4215,12 +4484,15 @@ Expected<BuildMIAction &> GlobalISelEmitter::createAndImportInstructionRenderer(
&Target.getInstruction(RK.getDef("COPY")));
BuildMIAction &CopyToPhysRegMIBuilder =
*static_cast<BuildMIAction *>(InsertPt->get());
- CopyToPhysRegMIBuilder.addRenderer<AddRegisterRenderer>(PhysInput.first,
+ CopyToPhysRegMIBuilder.addRenderer<AddRegisterRenderer>(Target,
+ PhysInput.first,
true);
CopyToPhysRegMIBuilder.addRenderer<CopyPhysRegRenderer>(PhysInput.first);
}
- importExplicitDefRenderers(DstMIBuilder);
+ if (auto Error = importExplicitDefRenderers(InsertPt, M, DstMIBuilder, Dst)
+ .takeError())
+ return std::move(Error);
if (auto Error = importExplicitUseRenderers(InsertPt, M, DstMIBuilder, Dst)
.takeError())
@@ -4372,13 +4644,39 @@ Expected<action_iterator> GlobalISelEmitter::createInstructionRenderer(
DstI);
}
-void GlobalISelEmitter::importExplicitDefRenderers(
- BuildMIAction &DstMIBuilder) {
+Expected<action_iterator> GlobalISelEmitter::importExplicitDefRenderers(
+ action_iterator InsertPt, RuleMatcher &M, BuildMIAction &DstMIBuilder,
+ const TreePatternNode *Dst) {
const CodeGenInstruction *DstI = DstMIBuilder.getCGI();
- for (unsigned I = 0; I < DstI->Operands.NumDefs; ++I) {
- const CGIOperandList::OperandInfo &DstIOperand = DstI->Operands[I];
- DstMIBuilder.addRenderer<CopyRenderer>(DstIOperand.Name);
+ const unsigned NumDefs = DstI->Operands.NumDefs;
+ if (NumDefs == 0)
+ return InsertPt;
+
+ DstMIBuilder.addRenderer<CopyRenderer>(DstI->Operands[0].Name);
+
+ // Some instructions have multiple defs, but are missing a type entry
+ // (e.g. s_cc_out operands).
+ if (Dst->getExtTypes().size() < NumDefs)
+ return failedImport("unhandled discarded def");
+
+ // Patterns only handle a single result, so any result after the first is an
+ // implicitly dead def.
+ for (unsigned I = 1; I < NumDefs; ++I) {
+ const TypeSetByHwMode &ExtTy = Dst->getExtType(I);
+ if (!ExtTy.isMachineValueType())
+ return failedImport("unsupported typeset");
+
+ auto OpTy = MVTToLLT(ExtTy.getMachineValueType().SimpleTy);
+ if (!OpTy)
+ return failedImport("unsupported type");
+
+ unsigned TempRegID = M.allocateTempRegID();
+ InsertPt =
+ M.insertAction<MakeTempRegisterAction>(InsertPt, *OpTy, TempRegID);
+ DstMIBuilder.addRenderer<TempRegRenderer>(TempRegID, true, nullptr, true);
}
+
+ return InsertPt;
}
Expected<action_iterator> GlobalISelEmitter::importExplicitUseRenderers(
@@ -4392,6 +4690,8 @@ Expected<action_iterator> GlobalISelEmitter::importExplicitUseRenderers(
// EXTRACT_SUBREG needs to use a subregister COPY.
if (Name == "EXTRACT_SUBREG") {
+ if (!Dst->getChild(1)->isLeaf())
+ return failedImport("EXTRACT_SUBREG child #1 is not a leaf");
DefInit *SubRegInit = dyn_cast<DefInit>(Dst->getChild(1)->getLeafValue());
if (!SubRegInit)
return failedImport("EXTRACT_SUBREG child #1 is not a subreg index");
@@ -4578,7 +4878,7 @@ Error GlobalISelEmitter::importDefaultOperandRenderers(
IDMIBuilder.addRenderer<TempRegRenderer>(TempRegID);
DstMIBuilder.addRenderer<TempRegRenderer>(TempRegID);
} else {
- DstMIBuilder.addRenderer<AddRegisterRenderer>(Def);
+ DstMIBuilder.addRenderer<AddRegisterRenderer>(Target, Def);
}
continue;
}
@@ -4653,6 +4953,17 @@ GlobalISelEmitter::inferRegClassFromPattern(TreePatternNode *N) {
return None;
return getRegClassFromLeaf(RCChild);
}
+ if (InstName == "INSERT_SUBREG") {
+ TreePatternNode *Child0 = N->getChild(0);
+ assert(Child0->getNumTypes() == 1 && "Unexpected number of types!");
+ const TypeSetByHwMode &VTy = Child0->getExtType(0);
+ return inferSuperRegisterClassForNode(VTy, Child0, N->getChild(2));
+ }
+ if (InstName == "EXTRACT_SUBREG") {
+ assert(N->getNumTypes() == 1 && "Unexpected number of types!");
+ const TypeSetByHwMode &VTy = N->getExtType(0);
+ return inferSuperRegisterClass(VTy, N->getChild(1));
+ }
// Handle destination record types that we can safely infer a register class
// from.
@@ -4689,7 +5000,8 @@ GlobalISelEmitter::inferSuperRegisterClass(const TypeSetByHwMode &Ty,
// Use the information we found above to find a minimal register class which
// supports the subregister and type we want.
auto RC =
- Target.getSuperRegForSubReg(Ty.getValueTypeByHwMode(), CGRegs, SubIdx);
+ Target.getSuperRegForSubReg(Ty.getValueTypeByHwMode(), CGRegs, SubIdx,
+ /* MustBeAllocatable */ true);
if (!RC)
return None;
return *RC;
@@ -4777,9 +5089,9 @@ Expected<RuleMatcher> GlobalISelEmitter::runOnPattern(const PatternToMatch &P) {
if (Dst->isLeaf()) {
Record *RCDef = getInitValueAsRegClass(Dst->getLeafValue());
-
- const CodeGenRegisterClass &RC = Target.getRegisterClass(RCDef);
if (RCDef) {
+ const CodeGenRegisterClass &RC = Target.getRegisterClass(RCDef);
+
// We need to replace the def and all its uses with the specified
// operand. However, we must also insert COPY's wherever needed.
// For now, emit a copy and let the register allocator clean up.
@@ -4814,8 +5126,8 @@ Expected<RuleMatcher> GlobalISelEmitter::runOnPattern(const PatternToMatch &P) {
auto &DstI = Target.getInstruction(DstOp);
StringRef DstIName = DstI.TheDef->getName();
- if (DstI.Operands.NumDefs != Src->getExtTypes().size())
- return failedImport("Src pattern results and dst MI defs are different (" +
+ if (DstI.Operands.NumDefs < Src->getExtTypes().size())
+ return failedImport("Src pattern result has more defs than dst MI (" +
to_string(Src->getExtTypes().size()) + " def(s) vs " +
to_string(DstI.Operands.NumDefs) + " def(s))");
@@ -5035,7 +5347,8 @@ Expected<RuleMatcher> GlobalISelEmitter::runOnPattern(const PatternToMatch &P) {
// trouble than it's worth.
void GlobalISelEmitter::emitCxxPredicateFns(
raw_ostream &OS, StringRef CodeFieldName, StringRef TypeIdentifier,
- StringRef ArgType, StringRef ArgName, StringRef AdditionalDeclarations,
+ StringRef ArgType, StringRef ArgName, StringRef AdditionalArgs,
+ StringRef AdditionalDeclarations,
std::function<bool(const Record *R)> Filter) {
std::vector<const Record *> MatchedRecords;
const auto &Defs = RK.getAllDerivedDefinitions("PatFrag");
@@ -5060,7 +5373,7 @@ void GlobalISelEmitter::emitCxxPredicateFns(
OS << "bool " << Target.getName() << "InstructionSelector::test" << ArgName
<< "Predicate_" << TypeIdentifier << "(unsigned PredicateID, " << ArgType << " "
- << ArgName << ") const {\n"
+ << ArgName << AdditionalArgs <<") const {\n"
<< AdditionalDeclarations;
if (!AdditionalDeclarations.empty())
OS << "\n";
@@ -5086,12 +5399,13 @@ void GlobalISelEmitter::emitImmPredicateFns(
raw_ostream &OS, StringRef TypeIdentifier, StringRef ArgType,
std::function<bool(const Record *R)> Filter) {
return emitCxxPredicateFns(OS, "ImmediateCode", TypeIdentifier, ArgType,
- "Imm", "", Filter);
+ "Imm", "", "", Filter);
}
void GlobalISelEmitter::emitMIPredicateFns(raw_ostream &OS) {
return emitCxxPredicateFns(
OS, "GISelPredicateCode", "MI", "const MachineInstr &", "MI",
+ ", const std::array<const MachineOperand *, 3> &Operands",
" const MachineFunction &MF = *MI.getParent()->getParent();\n"
" const MachineRegisterInfo &MRI = MF.getRegInfo();\n"
" (void)MRI;",
@@ -5117,8 +5431,7 @@ std::vector<Matcher *> GlobalISelEmitter::optimizeRules(
// added rules out of it and make sure to re-create the group to properly
// re-initialize it:
if (CurrentGroup->size() < 2)
- for (Matcher *M : CurrentGroup->matchers())
- OptRules.push_back(M);
+ append_range(OptRules, CurrentGroup->matchers());
else {
CurrentGroup->finalize();
OptRules.push_back(CurrentGroup.get());
@@ -5167,15 +5480,13 @@ GlobalISelEmitter::buildMatchTable(MutableArrayRef<RuleMatcher> Rules,
OpcodeOrder[Opcode] = CurrentOrdering++;
}
- std::stable_sort(InputRules.begin(), InputRules.end(),
- [&OpcodeOrder](const Matcher *A, const Matcher *B) {
- auto *L = static_cast<const RuleMatcher *>(A);
- auto *R = static_cast<const RuleMatcher *>(B);
- return std::make_tuple(OpcodeOrder[L->getOpcode()],
- L->getNumOperands()) <
- std::make_tuple(OpcodeOrder[R->getOpcode()],
- R->getNumOperands());
- });
+ llvm::stable_sort(InputRules, [&OpcodeOrder](const Matcher *A,
+ const Matcher *B) {
+ auto *L = static_cast<const RuleMatcher *>(A);
+ auto *R = static_cast<const RuleMatcher *>(B);
+ return std::make_tuple(OpcodeOrder[L->getOpcode()], L->getNumOperands()) <
+ std::make_tuple(OpcodeOrder[R->getOpcode()], R->getNumOperands());
+ });
for (Matcher *Rule : InputRules)
Rule->optimize();
@@ -5308,7 +5619,7 @@ void GlobalISelEmitter::run(raw_ostream &OS) {
<< " typedef void(" << Target.getName()
<< "InstructionSelector::*CustomRendererFn)(MachineInstrBuilder &, const "
- "MachineInstr&, int) "
+ "MachineInstr &, int) "
"const;\n"
<< " const ISelInfoTy<PredicateBitset, ComplexMatcherMemFn, "
"CustomRendererFn> "
@@ -5324,7 +5635,8 @@ void GlobalISelEmitter::run(raw_ostream &OS) {
<< " bool testImmPredicate_APFloat(unsigned PredicateID, const APFloat "
"&Imm) const override;\n"
<< " const int64_t *getMatchTable() const override;\n"
- << " bool testMIPredicate_MI(unsigned PredicateID, const MachineInstr &MI) "
+ << " bool testMIPredicate_MI(unsigned PredicateID, const MachineInstr &MI"
+ ", const std::array<const MachineOperand *, 3> &Operands) "
"const override;\n"
<< "#endif // ifdef GET_GLOBALISEL_TEMPORARIES_DECL\n\n";
@@ -5360,7 +5672,7 @@ void GlobalISelEmitter::run(raw_ostream &OS) {
OS << "void " << Target.getName() << "InstructionSelector"
"::setupGeneratedPerFunctionState(MachineFunction &MF) {\n"
" AvailableFunctionFeatures = computeAvailableFunctionFeatures("
- "(const " << Target.getName() << "Subtarget*)&MF.getSubtarget(), &MF);\n"
+ "(const " << Target.getName() << "Subtarget *)&MF.getSubtarget(), &MF);\n"
"}\n";
if (Target.getName() == "X86" || Target.getName() == "AArch64") {
@@ -5378,8 +5690,7 @@ void GlobalISelEmitter::run(raw_ostream &OS) {
// Emit a table containing the LLT objects needed by the matcher and an enum
// for the matcher to reference them with.
std::vector<LLTCodeGen> TypeObjects;
- for (const auto &Ty : KnownTypes)
- TypeObjects.push_back(Ty);
+ append_range(TypeObjects, KnownTypes);
llvm::sort(TypeObjects);
OS << "// LLT Objects.\n"
<< "enum {\n";
@@ -5481,7 +5792,7 @@ void GlobalISelEmitter::run(raw_ostream &OS) {
<< "enum {\n"
<< " GICR_Invalid,\n";
for (const auto &Record : CustomRendererFns)
- OS << " GICR_" << Record->getValueAsString("RendererFn") << ", \n";
+ OS << " GICR_" << Record->getValueAsString("RendererFn") << ",\n";
OS << "};\n";
OS << Target.getName() << "InstructionSelector::CustomRendererFn\n"
@@ -5774,11 +6085,10 @@ void SwitchMatcher::finalize() {
if (empty())
return;
- std::stable_sort(Matchers.begin(), Matchers.end(),
- [](const Matcher *L, const Matcher *R) {
- return L->getFirstCondition().getValue() <
- R->getFirstCondition().getValue();
- });
+ llvm::stable_sort(Matchers, [](const Matcher *L, const Matcher *R) {
+ return L->getFirstCondition().getValue() <
+ R->getFirstCondition().getValue();
+ });
Condition = Matchers[0]->popFirstCondition();
for (unsigned I = 1, E = Values.size(); I < E; ++I)
Matchers[I]->popFirstCondition();