diff options
Diffstat (limited to 'llvm/include/llvm/CodeGen/TargetLowering.h')
-rw-r--r-- | llvm/include/llvm/CodeGen/TargetLowering.h | 298 |
1 files changed, 162 insertions, 136 deletions
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index a58fca7e73f5..24daf70dc008 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -28,6 +28,7 @@ #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" +#include "llvm/Analysis/ProfileSummaryInfo.h" #include "llvm/CodeGen/DAGCombine.h" #include "llvm/CodeGen/ISDOpcodes.h" #include "llvm/CodeGen/RuntimeLibcalls.h" @@ -53,6 +54,7 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MachineValueType.h" #include "llvm/Target/TargetMachine.h" +#include "llvm/Transforms/Utils/SizeOpts.h" #include <algorithm> #include <cassert> #include <climits> @@ -188,13 +190,14 @@ public: bool IsReturned : 1; bool IsSwiftSelf : 1; bool IsSwiftError : 1; + bool IsCFGuardTarget : 1; uint16_t Alignment = 0; Type *ByValType = nullptr; ArgListEntry() : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false), IsNest(false), IsByVal(false), IsInAlloca(false), IsReturned(false), - IsSwiftSelf(false), IsSwiftError(false) {} + IsSwiftSelf(false), IsSwiftError(false), IsCFGuardTarget(false) {} void setAttributes(const CallBase *Call, unsigned ArgIdx); @@ -222,12 +225,16 @@ public: llvm_unreachable("Invalid content kind"); } - /// NOTE: The TargetMachine owns TLOF. explicit TargetLoweringBase(const TargetMachine &TM); TargetLoweringBase(const TargetLoweringBase &) = delete; TargetLoweringBase &operator=(const TargetLoweringBase &) = delete; virtual ~TargetLoweringBase() = default; + /// Return true if the target support strict float operation + bool isStrictFPEnabled() const { + return IsStrictFPEnabled; + } + protected: /// Initialize all of the actions to default values. void initActions(); @@ -469,6 +476,10 @@ public: return false; } + /// Return true if instruction generated for equality comparison is folded + /// with instruction generated for signed comparison. + virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; } + /// Return true if it is safe to transform an integer-domain bitwise operation /// into the equivalent floating-point operation. This should be set to true /// if the target has IEEE-754-compliant fabs/fneg operations for the input @@ -924,6 +935,8 @@ public: case ISD::SMULFIXSAT: case ISD::UMULFIX: case ISD::UMULFIXSAT: + case ISD::SDIVFIX: + case ISD::UDIVFIX: Supported = isSupportedFixedPointOperation(Op, VT, Scale); break; } @@ -937,38 +950,11 @@ public: unsigned EqOpc; switch (Op) { default: llvm_unreachable("Unexpected FP pseudo-opcode"); - case ISD::STRICT_FADD: EqOpc = ISD::FADD; break; - case ISD::STRICT_FSUB: EqOpc = ISD::FSUB; break; - case ISD::STRICT_FMUL: EqOpc = ISD::FMUL; break; - case ISD::STRICT_FDIV: EqOpc = ISD::FDIV; break; - case ISD::STRICT_FREM: EqOpc = ISD::FREM; break; - case ISD::STRICT_FSQRT: EqOpc = ISD::FSQRT; break; - case ISD::STRICT_FPOW: EqOpc = ISD::FPOW; break; - case ISD::STRICT_FPOWI: EqOpc = ISD::FPOWI; break; - case ISD::STRICT_FMA: EqOpc = ISD::FMA; break; - case ISD::STRICT_FSIN: EqOpc = ISD::FSIN; break; - case ISD::STRICT_FCOS: EqOpc = ISD::FCOS; break; - case ISD::STRICT_FEXP: EqOpc = ISD::FEXP; break; - case ISD::STRICT_FEXP2: EqOpc = ISD::FEXP2; break; - case ISD::STRICT_FLOG: EqOpc = ISD::FLOG; break; - case ISD::STRICT_FLOG10: EqOpc = ISD::FLOG10; break; - case ISD::STRICT_FLOG2: EqOpc = ISD::FLOG2; break; - case ISD::STRICT_LRINT: EqOpc = ISD::LRINT; break; - case ISD::STRICT_LLRINT: EqOpc = ISD::LLRINT; break; - case ISD::STRICT_FRINT: EqOpc = ISD::FRINT; break; - case ISD::STRICT_FNEARBYINT: EqOpc = ISD::FNEARBYINT; break; - case ISD::STRICT_FMAXNUM: EqOpc = ISD::FMAXNUM; break; - case ISD::STRICT_FMINNUM: EqOpc = ISD::FMINNUM; break; - case ISD::STRICT_FCEIL: EqOpc = ISD::FCEIL; break; - case ISD::STRICT_FFLOOR: EqOpc = ISD::FFLOOR; break; - case ISD::STRICT_LROUND: EqOpc = ISD::LROUND; break; - case ISD::STRICT_LLROUND: EqOpc = ISD::LLROUND; break; - case ISD::STRICT_FROUND: EqOpc = ISD::FROUND; break; - case ISD::STRICT_FTRUNC: EqOpc = ISD::FTRUNC; break; - case ISD::STRICT_FP_TO_SINT: EqOpc = ISD::FP_TO_SINT; break; - case ISD::STRICT_FP_TO_UINT: EqOpc = ISD::FP_TO_UINT; break; - case ISD::STRICT_FP_ROUND: EqOpc = ISD::FP_ROUND; break; - case ISD::STRICT_FP_EXTEND: EqOpc = ISD::FP_EXTEND; break; +#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ + case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break; +#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ + case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break; +#include "llvm/IR/ConstrainedOps.def" } return getOperationAction(EqOpc, VT); @@ -1029,24 +1015,8 @@ public: /// Return true if lowering to a jump table is suitable for a set of case /// clusters which may contain \p NumCases cases, \p Range range of values. virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, - uint64_t Range) const { - // FIXME: This function check the maximum table size and density, but the - // minimum size is not checked. It would be nice if the minimum size is - // also combined within this function. Currently, the minimum size check is - // performed in findJumpTable() in SelectionDAGBuiler and - // getEstimatedNumberOfCaseClusters() in BasicTTIImpl. - const bool OptForSize = SI->getParent()->getParent()->hasOptSize(); - const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize); - const unsigned MaxJumpTableSize = getMaximumJumpTableSize(); - - // Check whether the number of cases is small enough and - // the range is dense enough for a jump table. - if ((OptForSize || Range <= MaxJumpTableSize) && - (NumCases * 100 >= Range * MinDensity)) { - return true; - } - return false; - } + uint64_t Range, ProfileSummaryInfo *PSI, + BlockFrequencyInfo *BFI) const; /// Return true if lowering to a bit test is suitable for a set of case /// clusters which contains \p NumDests unique destinations, \p Low and @@ -1143,12 +1113,8 @@ public: /// Return how the indexed load should be treated: either it is legal, needs /// to be promoted to a larger size, needs to be expanded to some other code /// sequence, or the target has a custom expander for it. - LegalizeAction - getIndexedLoadAction(unsigned IdxMode, MVT VT) const { - assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() && - "Table isn't big enough!"); - unsigned Ty = (unsigned)VT.SimpleTy; - return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4); + LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const { + return getIndexedModeAction(IdxMode, VT, IMAB_Load); } /// Return true if the specified indexed load is legal on this target. @@ -1161,12 +1127,8 @@ public: /// Return how the indexed store should be treated: either it is legal, needs /// to be promoted to a larger size, needs to be expanded to some other code /// sequence, or the target has a custom expander for it. - LegalizeAction - getIndexedStoreAction(unsigned IdxMode, MVT VT) const { - assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() && - "Table isn't big enough!"); - unsigned Ty = (unsigned)VT.SimpleTy; - return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f); + LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const { + return getIndexedModeAction(IdxMode, VT, IMAB_Store); } /// Return true if the specified indexed load is legal on this target. @@ -1176,6 +1138,34 @@ public: getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom); } + /// Return how the indexed load should be treated: either it is legal, needs + /// to be promoted to a larger size, needs to be expanded to some other code + /// sequence, or the target has a custom expander for it. + LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const { + return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad); + } + + /// Return true if the specified indexed load is legal on this target. + bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const { + return VT.isSimple() && + (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal || + getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Custom); + } + + /// Return how the indexed store should be treated: either it is legal, needs + /// to be promoted to a larger size, needs to be expanded to some other code + /// sequence, or the target has a custom expander for it. + LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const { + return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore); + } + + /// Return true if the specified indexed load is legal on this target. + bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const { + return VT.isSimple() && + (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal || + getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Custom); + } + /// Return how the condition code should be treated: either it is legal, needs /// to be expanded to some other code sequence, or the target has a custom /// expander for it. @@ -1265,7 +1255,7 @@ public: Elm = PointerTy.getTypeForEVT(Ty->getContext()); } return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false), - VTy->getNumElements()); + VTy->getElementCount()); } return getValueType(DL, Ty, AllowUnknown); @@ -1550,16 +1540,6 @@ public: /// have to be legal as the hook is used before type legalization. virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; } - /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp. - bool usesUnderscoreSetJmp() const { - return UseUnderscoreSetJmp; - } - - /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp. - bool usesUnderscoreLongJmp() const { - return UseUnderscoreLongJmp; - } - /// Return lower limit for number of blocks in a jump table. virtual unsigned getMinimumJumpTableEntries() const; @@ -1960,18 +1940,6 @@ protected: SchedPreferenceInfo = Pref; } - /// Indicate whether this target prefers to use _setjmp to implement - /// llvm.setjmp or the version without _. Defaults to false. - void setUseUnderscoreSetJmp(bool Val) { - UseUnderscoreSetJmp = Val; - } - - /// Indicate whether this target prefers to use _longjmp to implement - /// llvm.longjmp or the version without _. Defaults to false. - void setUseUnderscoreLongJmp(bool Val) { - UseUnderscoreLongJmp = Val; - } - /// Indicate the minimum number of blocks to generate jump tables. void setMinimumJumpTableEntries(unsigned Val); @@ -2063,13 +2031,8 @@ protected: /// /// NOTE: All indexed mode loads are initialized to Expand in /// TargetLowering.cpp - void setIndexedLoadAction(unsigned IdxMode, MVT VT, - LegalizeAction Action) { - assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE && - (unsigned)Action < 0xf && "Table isn't big enough!"); - // Load action are kept in the upper half. - IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0; - IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4; + void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action) { + setIndexedModeAction(IdxMode, VT, IMAB_Load, Action); } /// Indicate that the specified indexed store does or does not work with the @@ -2077,13 +2040,28 @@ protected: /// /// NOTE: All indexed mode stores are initialized to Expand in /// TargetLowering.cpp - void setIndexedStoreAction(unsigned IdxMode, MVT VT, - LegalizeAction Action) { - assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE && - (unsigned)Action < 0xf && "Table isn't big enough!"); - // Store action are kept in the lower half. - IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f; - IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action); + void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action) { + setIndexedModeAction(IdxMode, VT, IMAB_Store, Action); + } + + /// Indicate that the specified indexed masked load does or does not work with + /// the specified type and indicate what to do about it. + /// + /// NOTE: All indexed mode masked loads are initialized to Expand in + /// TargetLowering.cpp + void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT, + LegalizeAction Action) { + setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action); + } + + /// Indicate that the specified indexed masked store does or does not work + /// with the specified type and indicate what to do about it. + /// + /// NOTE: All indexed mode masked stores are initialized to Expand in + /// TargetLowering.cpp + void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT, + LegalizeAction Action) { + setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action); } /// Indicate that the specified condition code is or isn't supported on the @@ -2504,7 +2482,8 @@ public: /// Return true if an fpext operation input to an \p Opcode operation is free /// (for instance, because half-precision floating-point numbers are /// implicitly extended to float-precision) for an FMA instruction. - virtual bool isFPExtFoldable(unsigned Opcode, EVT DestVT, EVT SrcVT) const { + virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, + EVT DestVT, EVT SrcVT) const { assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && "invalid fpext types"); return isFPExtFree(DestVT, SrcVT); @@ -2536,10 +2515,24 @@ public: /// not legal, but should return true if those types will eventually legalize /// to types that support FMAs. After legalization, it will only be called on /// types that support FMAs (via Legal or Custom actions) - virtual bool isFMAFasterThanFMulAndFAdd(EVT) const { + virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, + EVT) const { return false; } + /// IR version + virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const { + return false; + } + + /// Returns true if the FADD or FSUB node passed could legally be combined with + /// an fmul to form an ISD::FMAD. + virtual bool isFMADLegalForFAddFSub(const SelectionDAG &DAG, + const SDNode *N) const { + assert(N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB); + return isOperationLegal(ISD::FMAD, N->getValueType(0)); + } + /// Return true if it's profitable to narrow operations of type VT1 to /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from /// i32 to i16. @@ -2608,10 +2601,10 @@ public: // same blocks of its users. virtual bool shouldConsiderGEPOffsetSplit() const { return false; } - // Return the shift amount threshold for profitable transforms into shifts. - // Transforms creating shifts above the returned value will be avoided. - virtual unsigned getShiftAmountThreshold(EVT VT) const { - return VT.getScalarSizeInBits(); + /// Return true if creating a shift of the type by the given + /// amount is not profitable. + virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const { + return false; } //===--------------------------------------------------------------------===// @@ -2683,16 +2676,6 @@ private: /// predication. bool JumpIsExpensive; - /// This target prefers to use _setjmp to implement llvm.setjmp. - /// - /// Defaults to false. - bool UseUnderscoreSetJmp; - - /// This target prefers to use _longjmp to implement llvm.longjmp. - /// - /// Defaults to false. - bool UseUnderscoreLongJmp; - /// Information about the contents of the high-bits in boolean values held in /// a type wider than i1. See getBooleanContents. BooleanContent BooleanContents; @@ -2741,7 +2724,7 @@ private: /// This indicates the default register class to use for each ValueType the /// target supports natively. const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE]; - unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE]; + uint16_t NumRegistersForVT[MVT::LAST_VALUETYPE]; MVT RegisterTypeForVT[MVT::LAST_VALUETYPE]; /// This indicates the "representative" register class to use for each @@ -2781,13 +2764,13 @@ private: /// truncating store of a specific value type and truncating type is legal. LegalizeAction TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE]; - /// For each indexed mode and each value type, keep a pair of LegalizeAction + /// For each indexed mode and each value type, keep a quad of LegalizeAction /// that indicates how instruction selection should deal with the load / - /// store. + /// store / maskedload / maskedstore. /// /// The first dimension is the value_type for the reference. The second /// dimension represents the various modes for load store. - uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE]; + uint16_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE]; /// For each condition code (ISD::CondCode) keep a LegalizeAction that /// indicates how instruction selection should deal with the condition code. @@ -2830,6 +2813,32 @@ private: /// Set default libcall names and calling conventions. void InitLibcalls(const Triple &TT); + /// The bits of IndexedModeActions used to store the legalisation actions + /// We store the data as | ML | MS | L | S | each taking 4 bits. + enum IndexedModeActionsBits { + IMAB_Store = 0, + IMAB_Load = 4, + IMAB_MaskedStore = 8, + IMAB_MaskedLoad = 12 + }; + + void setIndexedModeAction(unsigned IdxMode, MVT VT, unsigned Shift, + LegalizeAction Action) { + assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE && + (unsigned)Action < 0xf && "Table isn't big enough!"); + unsigned Ty = (unsigned)VT.SimpleTy; + IndexedModeActions[Ty][IdxMode] &= ~(0xf << Shift); + IndexedModeActions[Ty][IdxMode] |= ((uint16_t)Action) << Shift; + } + + LegalizeAction getIndexedModeAction(unsigned IdxMode, MVT VT, + unsigned Shift) const { + assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() && + "Table isn't big enough!"); + unsigned Ty = (unsigned)VT.SimpleTy; + return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] >> Shift) & 0xf); + } + protected: /// Return true if the extension represented by \p I is free. /// \pre \p I is a sign, zero, or fp extension and @@ -2932,6 +2941,8 @@ protected: /// details. MachineBasicBlock *emitXRayTypedEvent(MachineInstr &MI, MachineBasicBlock *MBB) const; + + bool IsStrictFPEnabled; }; /// This class defines information used to lower LLVM code to legal SelectionDAG @@ -2947,7 +2958,6 @@ public: TargetLowering(const TargetLowering &) = delete; TargetLowering &operator=(const TargetLowering &) = delete; - /// NOTE: The TargetMachine owns TLOF. explicit TargetLowering(const TargetMachine &TM); bool isPositionIndependent() const; @@ -3024,12 +3034,19 @@ public: const SDLoc &DL, const SDValue OldLHS, const SDValue OldRHS) const; + void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, + SDValue &NewRHS, ISD::CondCode &CCCode, + const SDLoc &DL, const SDValue OldLHS, + const SDValue OldRHS, SDValue &Chain, + bool IsSignaling = false) const; + /// Returns a pair of (return value, chain). /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC. std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef<SDValue> Ops, MakeLibCallOptions CallOptions, - const SDLoc &dl) const; + const SDLoc &dl, + SDValue Chain = SDValue()) const; /// Check whether parameters to a call that are passed in callee saved /// registers are the same as from the calling function. This needs to be @@ -3262,9 +3279,7 @@ public: bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; } bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; } - bool isAfterLegalizeDAG() const { - return Level == AfterLegalizeDAG; - } + bool isAfterLegalizeDAG() const { return Level >= AfterLegalizeDAG; } CombineLevel getDAGCombineLevel() { return Level; } bool isCalledByLegalizer() const { return CalledByLegalizer; } @@ -3695,7 +3710,7 @@ public: /// Return the register ID of the name passed in. Used by named register /// global variables extension. There is no target-independent behaviour /// so the default action is to bail. - virtual Register getRegisterByName(const char* RegName, EVT VT, + virtual Register getRegisterByName(const char* RegName, LLT Ty, const MachineFunction &MF) const { report_fatal_error("Named registers not implemented for this target"); } @@ -3758,7 +3773,7 @@ public: /// Should SelectionDAG lower an atomic store of the given kind as a normal /// StoreSDNode (as opposed to an AtomicSDNode)? NOTE: The intention is to /// eventually migrate all targets to the using StoreSDNodes, but porting is - /// being done target at a time. + /// being done target at a time. virtual bool lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const { assert(SI.isAtomic() && "violated precondition"); return false; @@ -3940,9 +3955,7 @@ public: StringRef Constraint, MVT VT) const; virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const { - if (ConstraintCode == "i") - return InlineAsm::Constraint_i; - else if (ConstraintCode == "m") + if (ConstraintCode == "m") return InlineAsm::Constraint_m; return InlineAsm::Constraint_Unknown; } @@ -4082,14 +4095,18 @@ public: /// Expand float to UINT conversion /// \param N Node to expand /// \param Result output after conversion + /// \param Chain output chain after conversion /// \returns True, if the expansion was successful, false otherwise - bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const; + bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain, + SelectionDAG &DAG) const; /// Expand UINT(i64) to double(f64) conversion /// \param N Node to expand /// \param Result output after conversion + /// \param Chain output chain after conversion /// \returns True, if the expansion was successful, false otherwise - bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SelectionDAG &DAG) const; + bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain, + SelectionDAG &DAG) const; /// Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs. SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const; @@ -4125,12 +4142,13 @@ public: /// Turn load of vector type into a load of the individual elements. /// \param LD load to expand - /// \returns MERGE_VALUEs of the scalar loads with their chains. - SDValue scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const; + /// \returns BUILD_VECTOR and TokenFactor nodes. + std::pair<SDValue, SDValue> scalarizeVectorLoad(LoadSDNode *LD, + SelectionDAG &DAG) const; // Turn a store of a vector type into stores of the individual elements. /// \param ST Store with a vector value type - /// \returns MERGE_VALUs of the individual store chains. + /// \returns TokenFactor of the individual store chains. SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const; /// Expands an unaligned load to 2 half-size loads for an integer, and @@ -4168,6 +4186,14 @@ public: /// method accepts integers as its arguments. SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const; + /// Method for building the DAG expansion of ISD::[US]DIVFIX. This + /// method accepts integers as its arguments. + /// Note: This method may fail if the division could not be performed + /// within the type. Clients must retry with a wider type if this happens. + SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, + SDValue LHS, SDValue RHS, + unsigned Scale, SelectionDAG &DAG) const; + /// Method for building the DAG expansion of ISD::U(ADD|SUB)O. Expansion /// always suceeds and populates the Result and Overflow arguments. void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, |