diff options
Diffstat (limited to 'llvm/include/llvm/CodeGen/TargetLowering.h')
-rw-r--r-- | llvm/include/llvm/CodeGen/TargetLowering.h | 237 |
1 files changed, 170 insertions, 67 deletions
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index c3221aac8eea..692dc4d7d4cf 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -40,7 +40,6 @@ #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" -#include "llvm/IR/IRBuilder.h" #include "llvm/IR/InlineAsm.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" @@ -49,6 +48,7 @@ #include "llvm/Support/AtomicOrdering.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/InstructionCost.h" #include "llvm/Support/MachineValueType.h" #include <algorithm> #include <cassert> @@ -71,6 +71,7 @@ class FunctionLoweringInfo; class GlobalValue; class GISelKnownBits; class IntrinsicInst; +class IRBuilderBase; struct KnownBits; class LegacyDivergenceAnalysis; class LLVMContext; @@ -93,14 +94,16 @@ class Value; namespace Sched { - enum Preference { - None, // No preference - Source, // Follow source order. - RegPressure, // Scheduling for lowest register pressure. - Hybrid, // Scheduling for both latency and register pressure. - ILP, // Scheduling for ILP in low register pressure mode. - VLIW // Scheduling for VLIW targets. - }; +enum Preference { + None, // No preference + Source, // Follow source order. + RegPressure, // Scheduling for lowest register pressure. + Hybrid, // Scheduling for both latency and register pressure. + ILP, // Scheduling for ILP in low register pressure mode. + VLIW, // Scheduling for VLIW targets. + Fast, // Fast suboptimal list scheduling + Linearize // Linearize DAG, no scheduling +}; } // end namespace Sched @@ -283,17 +286,17 @@ public: bool IsPreallocated : 1; bool IsReturned : 1; bool IsSwiftSelf : 1; + bool IsSwiftAsync : 1; bool IsSwiftError : 1; bool IsCFGuardTarget : 1; MaybeAlign Alignment = None; - Type *ByValType = nullptr; - Type *PreallocatedType = nullptr; + Type *IndirectType = nullptr; ArgListEntry() : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false), IsNest(false), IsByVal(false), IsByRef(false), IsInAlloca(false), IsPreallocated(false), IsReturned(false), IsSwiftSelf(false), - IsSwiftError(false), IsCFGuardTarget(false) {} + IsSwiftAsync(false), IsSwiftError(false), IsCFGuardTarget(false) {} void setAttributes(const CallBase *Call, unsigned ArgIdx); }; @@ -346,7 +349,7 @@ public: /// Return the in-memory pointer type for the given address space, defaults to /// the pointer type from the data layout. FIXME: The default needs to be /// removed once all the code is updated. - MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const { + virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const { return MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); } @@ -389,6 +392,12 @@ public: return getPointerTy(DL); } + /// Returns the type to be used for the EVL/AVL operand of VP nodes: + /// ISD::VP_ADD, ISD::VP_SUB, etc. It must be a legal scalar integer type, + /// and must be at least as large as i32. The EVL is implicitly zero-extended + /// to any larger type. + virtual MVT getVPExplicitVectorLengthTy() const { return MVT::i32; } + /// This callback is used to inspect load/store instructions and add /// target-specific MachineMemOperand flags to them. The default /// implementation does nothing. @@ -528,10 +537,6 @@ public: return false; } - /// If a branch or a select condition is skewed in one direction by more than - /// this factor, it is very likely to be predicted correctly. - virtual BranchProbability getPredictableBranchThreshold() const; - /// Return true if the following transform is beneficial: /// fold (conv (load x)) -> (load (conv*)x) /// On architectures that don't natively support some vector loads @@ -615,6 +620,10 @@ public: /// with instruction generated for signed comparison. virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; } + /// Return true if the heuristic to prefer icmp eq zero should be used in code + /// gen prepare. + virtual bool preferZeroCompareBranch() const { return false; } + /// Return true if it is safe to transform an integer-domain bitwise operation /// into the equivalent floating-point operation. This should be set to true /// if the target has IEEE-754-compliant fabs/fneg operations for the input @@ -785,7 +794,7 @@ public: return false; } - /// Return true if target always beneficiates from combining into FMA for a + /// Return true if target always benefits from combining into FMA for a /// given value type. This must typically return false on targets where FMA /// takes more cycles to execute than FADD. virtual bool enableAggressiveFMAFusion(EVT VT) const { @@ -891,7 +900,7 @@ public: class ValueTypeActionImpl { /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum /// that indicates how instruction selection should deal with the type. - LegalizeTypeAction ValueTypeActions[MVT::LAST_VALUETYPE]; + LegalizeTypeAction ValueTypeActions[MVT::VALUETYPE_SIZE]; public: ValueTypeActionImpl() { @@ -1148,7 +1157,7 @@ public: /// Return true if lowering to a jump table is allowed. virtual bool areJTsAllowed(const Function *Fn) const { - if (Fn->getFnAttribute("no-jump-tables").getValueAsString() == "true") + if (Fn->getFnAttribute("no-jump-tables").getValueAsBool()) return false; return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) || @@ -1218,8 +1227,8 @@ public: if (ValVT.isExtended() || MemVT.isExtended()) return Expand; unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy; unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy; - assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE && - MemI < MVT::LAST_VALUETYPE && "Table isn't big enough!"); + assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::VALUETYPE_SIZE && + MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!"); unsigned Shift = 4 * ExtType; return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf); } @@ -1243,7 +1252,7 @@ public: if (ValVT.isExtended() || MemVT.isExtended()) return Expand; unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy; unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy; - assert(ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE && + assert(ValI < MVT::VALUETYPE_SIZE && MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!"); return TruncStoreActions[ValI][MemI]; } @@ -1262,6 +1271,14 @@ public: getTruncStoreAction(ValVT, MemVT) == Custom); } + virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT, + bool LegalOnly) const { + if (LegalOnly) + return isTruncStoreLegal(ValVT, MemVT); + + return isTruncStoreLegalOrCustom(ValVT, MemVT); + } + /// Return how the indexed load should be treated: either it is legal, needs /// to be promoted to a larger size, needs to be expanded to some other code /// sequence, or the target has a custom expander for it. @@ -1318,6 +1335,10 @@ public: getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Custom); } + /// Returns true if the index type for a masked gather/scatter requires + /// extending + virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const { return false; } + // Returns true if VT is a legal index type for masked gathers/scatters // on this target virtual bool shouldRemoveExtendFromGSIndex(EVT VT) const { return false; } @@ -1464,7 +1485,12 @@ public: /// like i140, which are first promoted then expanded, it is the number of /// registers needed to hold all the bits of the original type. For an i140 /// on a 32 bit machine this means 5 registers. - unsigned getNumRegisters(LLVMContext &Context, EVT VT) const { + /// + /// RegisterVT may be passed as a way to override the default settings, for + /// instance with i128 inline assembly operands on SystemZ. + virtual unsigned + getNumRegisters(LLVMContext &Context, EVT VT, + Optional<MVT> RegisterVT = None) const { if (VT.isSimple()) { assert((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(NumRegistersForVT)); @@ -1501,10 +1527,10 @@ public: return getNumRegisters(Context, VT); } - /// Certain targets have context senstive alignment requirements, where one + /// Certain targets have context sensitive alignment requirements, where one /// type has the alignment requirement of another type. virtual Align getABIAlignmentForCallingConv(Type *ArgTy, - DataLayout DL) const { + const DataLayout &DL) const { return DL.getABITypeAlign(ArgTy); } @@ -1607,7 +1633,7 @@ public: /// helps to ensure that such replacements don't generate code that causes an /// alignment error (trap) on the target machine. virtual bool allowsMisalignedMemoryAccesses( - EVT, unsigned AddrSpace = 0, unsigned Align = 1, + EVT, unsigned AddrSpace = 0, Align Alignment = Align(1), MachineMemOperand::Flags Flags = MachineMemOperand::MONone, bool * /*Fast*/ = nullptr) const { return false; @@ -1751,7 +1777,7 @@ public: /// returns the address of that location. Otherwise, returns nullptr. /// DEPRECATED: please override useLoadStackGuardNode and customize /// LOAD_STACK_GUARD, or customize \@llvm.stackguard(). - virtual Value *getIRStackGuard(IRBuilder<> &IRB) const; + virtual Value *getIRStackGuard(IRBuilderBase &IRB) const; /// Inserts necessary declarations for SSP (stack protection) purpose. /// Should be used only when getIRStackGuard returns nullptr. @@ -1774,13 +1800,19 @@ public: /// Should be used only when getIRStackGuard returns nullptr. virtual Function *getSSPStackGuardCheck(const Module &M) const; + /// \returns true if a constant G_UBFX is legal on the target. + virtual bool isConstantUnsignedBitfieldExtactLegal(unsigned Opc, LLT Ty1, + LLT Ty2) const { + return false; + } + protected: - Value *getDefaultSafeStackPointerLocation(IRBuilder<> &IRB, + Value *getDefaultSafeStackPointerLocation(IRBuilderBase &IRB, bool UseTLS) const; public: /// Returns the target-specific address of the unsafe stack pointer. - virtual Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const; + virtual Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const; /// Returns the name of the symbol used to emit stack probes or the empty /// string if not applicable. @@ -1814,8 +1846,8 @@ public: int InstructionOpcodeToISD(unsigned Opcode) const; /// Estimate the cost of type-legalization and the legalized type. - std::pair<int, MVT> getTypeLegalizationCost(const DataLayout &DL, - Type *Ty) const; + std::pair<InstructionCost, MVT> getTypeLegalizationCost(const DataLayout &DL, + Type *Ty) const; /// @} @@ -1854,14 +1886,14 @@ public: /// corresponding pointee type. This may entail some non-trivial operations to /// truncate or reconstruct types that will be illegal in the backend. See /// ARMISelLowering for an example implementation. - virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr, - AtomicOrdering Ord) const { + virtual Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, + Value *Addr, AtomicOrdering Ord) const { llvm_unreachable("Load linked unimplemented on this target"); } /// Perform a store-conditional operation to Addr. Return the status of the /// store. This should be 0 if the store succeeded, non-zero otherwise. - virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val, + virtual Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const { llvm_unreachable("Store conditional unimplemented on this target"); } @@ -1869,7 +1901,7 @@ public: /// Perform a masked atomicrmw using a target-specific intrinsic. This /// represents the core LL/SC loop which will be lowered at a late stage by /// the backend. - virtual Value *emitMaskedAtomicRMWIntrinsic(IRBuilder<> &Builder, + virtual Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, @@ -1881,7 +1913,7 @@ public: /// represents the core LL/SC loop which will be lowered at a late stage by /// the backend. virtual Value *emitMaskedAtomicCmpXchgIntrinsic( - IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, + IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { llvm_unreachable("Masked cmpxchg expansion unimplemented on this target"); } @@ -1919,22 +1951,13 @@ public: /// seq_cst. But if they are lowered to monotonic accesses, no amount of /// IR-level fences can prevent it. /// @{ - virtual Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst, - AtomicOrdering Ord) const { - if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore()) - return Builder.CreateFence(Ord); - else - return nullptr; - } + virtual Instruction *emitLeadingFence(IRBuilderBase &Builder, + Instruction *Inst, + AtomicOrdering Ord) const; - virtual Instruction *emitTrailingFence(IRBuilder<> &Builder, + virtual Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, - AtomicOrdering Ord) const { - if (isAcquireOrStronger(Ord)) - return Builder.CreateFence(Ord); - else - return nullptr; - } + AtomicOrdering Ord) const; /// @} // Emits code that executes when the comparison result in the ll/sc @@ -1943,7 +1966,7 @@ public: // a dedicated instruction, if desired. // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would // be unnecessarily held, except if clrex, inserted by this hook, is executed. - virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const {} + virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const {} /// Returns true if the given (atomic) store should be expanded by the /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input. @@ -2347,8 +2370,9 @@ public: /// If the AM is not supported, it returns a negative value. /// TODO: Handle pre/postinc as well. /// TODO: Remove default argument - virtual int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, - Type *Ty, unsigned AS = 0) const { + virtual InstructionCost getScalingFactorCost(const DataLayout &DL, + const AddrMode &AM, Type *Ty, + unsigned AS = 0) const { // Default: assume that any scaling factor used in a legal AM is free. if (isLegalAddressingMode(DL, AM, Ty, AS)) return 0; @@ -2454,6 +2478,8 @@ public: case ISD::UDIV: case ISD::SREM: case ISD::UREM: + case ISD::SSUBSAT: + case ISD::USUBSAT: case ISD::FSUB: case ISD::FDIV: case ISD::FREM: @@ -2707,6 +2733,13 @@ public: return isOperationLegal(ISD::FMAD, N->getValueType(0)); } + // Return true when the decision to generate FMA's (or FMS, FMLA etc) rather + // than FMUL and ADD is delegated to the machine combiner. + virtual bool generateFMAsInMachineCombiner(EVT VT, + CodeGenOpt::Level OptLevel) const { + return false; + } + /// Return true if it's profitable to narrow operations of type VT1 to /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from /// i32 to i16. @@ -2785,6 +2818,10 @@ public: return false; } + /// Does this target require the clearing of high-order bits in a register + /// passed to the fp16 to fp conversion library function. + virtual bool shouldKeepZExtForFP16Conv() const { return false; } + //===--------------------------------------------------------------------===// // Runtime Library hooks // @@ -2908,9 +2945,9 @@ private: /// This indicates the default register class to use for each ValueType the /// target supports natively. - const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE]; - uint16_t NumRegistersForVT[MVT::LAST_VALUETYPE]; - MVT RegisterTypeForVT[MVT::LAST_VALUETYPE]; + const TargetRegisterClass *RegClassForVT[MVT::VALUETYPE_SIZE]; + uint16_t NumRegistersForVT[MVT::VALUETYPE_SIZE]; + MVT RegisterTypeForVT[MVT::VALUETYPE_SIZE]; /// This indicates the "representative" register class to use for each /// ValueType the target supports natively. This information is used by the @@ -2918,36 +2955,36 @@ private: /// register class is the largest legal super-reg register class of the /// register class of the specified type. e.g. On x86, i8, i16, and i32's /// representative class would be GR32. - const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE]; + const TargetRegisterClass *RepRegClassForVT[MVT::VALUETYPE_SIZE]; /// This indicates the "cost" of the "representative" register class for each /// ValueType. The cost is used by the scheduler to approximate register /// pressure. - uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE]; + uint8_t RepRegClassCostForVT[MVT::VALUETYPE_SIZE]; /// For any value types we are promoting or expanding, this contains the value /// type that we are changing to. For Expanded types, this contains one step /// of the expand (e.g. i64 -> i32), even if there are multiple steps required /// (e.g. i64 -> i16). For types natively supported by the system, this holds /// the same type (e.g. i32 -> i32). - MVT TransformToType[MVT::LAST_VALUETYPE]; + MVT TransformToType[MVT::VALUETYPE_SIZE]; /// For each operation and each value type, keep a LegalizeAction that /// indicates how instruction selection should deal with the operation. Most /// operations are Legal (aka, supported natively by the target), but /// operations that are not should be described. Note that operations on /// non-legal value types are not described here. - LegalizeAction OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END]; + LegalizeAction OpActions[MVT::VALUETYPE_SIZE][ISD::BUILTIN_OP_END]; /// For each load extension type and each value type, keep a LegalizeAction /// that indicates how instruction selection should deal with a load of a /// specific value type and extension type. Uses 4-bits to store the action /// for each of the 4 load ext types. - uint16_t LoadExtActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE]; + uint16_t LoadExtActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE]; /// For each value type pair keep a LegalizeAction that indicates whether a /// truncating store of a specific value type and truncating type is legal. - LegalizeAction TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE]; + LegalizeAction TruncStoreActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE]; /// For each indexed mode and each value type, keep a quad of LegalizeAction /// that indicates how instruction selection should deal with the load / @@ -2955,15 +2992,15 @@ private: /// /// The first dimension is the value_type for the reference. The second /// dimension represents the various modes for load store. - uint16_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE]; + uint16_t IndexedModeActions[MVT::VALUETYPE_SIZE][ISD::LAST_INDEXED_MODE]; /// For each condition code (ISD::CondCode) keep a LegalizeAction that /// indicates how instruction selection should deal with the condition code. /// /// Because each CC action takes up 4 bits, we need to have the array size be /// large enough to fit all of the value types. This can be done by rounding - /// up the MVT::LAST_VALUETYPE value to the next multiple of 8. - uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 7) / 8]; + /// up the MVT::VALUETYPE_SIZE value to the next multiple of 8. + uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::VALUETYPE_SIZE + 7) / 8]; ValueTypeActionImpl ValueTypeActions; @@ -3454,6 +3491,13 @@ public: SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth) const; + /// Return true if this function can prove that \p Op is never poison + /// and, if \p PoisonOnly is false, does not have undef bits. The DemandedElts + /// argument limits the check to the requested vector elements. + virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode( + SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, + bool PoisonOnly, unsigned Depth) const; + /// Tries to build a legal vector shuffle using the provided parameters /// or equivalent variations. The Mask argument maybe be modified as the /// function tries different variations. @@ -3971,7 +4015,8 @@ public: /// must be passed in a block of consecutive registers. virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, - bool isVarArg) const { + bool isVarArg, + const DataLayout &DL) const { return false; } @@ -4189,6 +4234,10 @@ public: virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const { if (ConstraintCode == "m") return InlineAsm::Constraint_m; + if (ConstraintCode == "o") + return InlineAsm::Constraint_o; + if (ConstraintCode == "X") + return InlineAsm::Constraint_X; return InlineAsm::Constraint_Unknown; } @@ -4335,6 +4384,13 @@ public: bool expandROT(SDNode *N, bool AllowVectorOps, SDValue &Result, SelectionDAG &DAG) const; + /// Expand shift-by-parts. + /// \param N Node to expand + /// \param Lo lower-output-part after conversion + /// \param Hi upper-output-part after conversion + void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi, + SelectionDAG &DAG) const; + /// Expand float(f32) to SINT(i64) conversion /// \param N Node to expand /// \param Result output after conversion @@ -4396,6 +4452,18 @@ public: bool expandABS(SDNode *N, SDValue &Result, SelectionDAG &DAG, bool IsNegative = false) const; + /// Expand BSWAP nodes. Expands scalar/vector BSWAP nodes with i16/i32/i64 + /// scalar types. Returns SDValue() if expand fails. + /// \param N Node to expand + /// \returns The expansion result or SDValue() if it fails. + SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const; + + /// Expand BITREVERSE nodes. Expands scalar/vector BITREVERSE nodes. + /// Returns SDValue() if expand fails. + /// \param N Node to expand + /// \returns The expansion result or SDValue() if it fails. + SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const; + /// Turn load of vector type into a load of the individual elements. /// \param LD load to expand /// \returns BUILD_VECTOR and TokenFactor nodes. @@ -4434,6 +4502,14 @@ public: SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const; + /// Get a pointer to a sub-vector of type \p SubVecVT at index \p Idx located + /// in memory for a vector of type \p VecVT starting at a base address of + /// \p VecPtr. If \p Idx plus the size of \p SubVecVT is out of bounds the + /// returned pointer is unspecified, but the value returned will be such that + /// the entire subvector would be within the vector bounds. + SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, + EVT SubVecVT, SDValue Index) const; + /// Method for building the DAG expansion of ISD::[US][MIN|MAX]. This /// method accepts integers as its arguments. SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const; @@ -4484,6 +4560,33 @@ public: /// Returns true if the expansion was successful. bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const; + /// Method for building the DAG expansion of ISD::VECTOR_SPLICE. This + /// method accepts vectors as its arguments. + SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const; + + /// Legalize a SETCC with given LHS and RHS and condition code CC on the + /// current target. + /// + /// If the SETCC has been legalized using AND / OR, then the legalized node + /// will be stored in LHS. RHS and CC will be set to SDValue(). NeedInvert + /// will be set to false. + /// + /// If the SETCC has been legalized by using getSetCCSwappedOperands(), + /// then the values of LHS and RHS will be swapped, CC will be set to the + /// new condition, and NeedInvert will be set to false. + /// + /// If the SETCC has been legalized using the inverse condcode, then LHS and + /// RHS will be unchanged, CC will set to the inverted condcode, and + /// NeedInvert will be set to true. The caller must invert the result of the + /// SETCC with SelectionDAG::getLogicalNOT() or take equivalent action to swap + /// the effect of a true/false result. + /// + /// \returns true if the SetCC has been legalized, false if it hasn't. + bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS, + SDValue &RHS, SDValue &CC, bool &NeedInvert, + const SDLoc &dl, SDValue &Chain, + bool IsSignaling = false) const; + //===--------------------------------------------------------------------===// // Instruction Emitting Hooks // |