aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/X86/X86ISelLowering.h
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/X86/X86ISelLowering.h')
-rw-r--r--lib/Target/X86/X86ISelLowering.h306
1 files changed, 184 insertions, 122 deletions
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index b67958a9c498..d826f1ec3e05 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -75,7 +75,7 @@ namespace llvm {
///
CALL,
- /// This operation implements the lowering for readcyclecounter
+ /// This operation implements the lowering for readcyclecounter.
RDTSC_DAG,
/// X86 Read Time-Stamp Counter and Processor ID.
@@ -106,10 +106,6 @@ namespace llvm {
/// 0s or 1s. Generally DTRT for C/C++ with NaNs.
FSETCC,
- /// X86 MOVMSK{pd|ps}, extracts sign bits of two or four FP values,
- /// result in an integer GPR. Needs masking for scalar result.
- FGETSIGNx86,
-
/// X86 conditional moves. Operand 0 and operand 1 are the two values
/// to select from. Operand 2 is the condition code, and operand 3 is the
/// flag operand produced by a CMP or TEST instruction. It also writes a
@@ -191,9 +187,6 @@ namespace llvm {
/// Bitwise Logical AND NOT of Packed FP values.
ANDNP,
- /// Copy integer sign.
- PSIGN,
-
/// Blend where the selector is an immediate.
BLENDI,
@@ -214,30 +207,31 @@ namespace llvm {
FMIN_RND,
FSQRT_RND,
- // FP vector get exponent
+ // FP vector get exponent.
FGETEXP_RND,
- // Extract Normalized Mantissas
+ // Extract Normalized Mantissas.
VGETMANT,
- // FP Scale
+ // FP Scale.
SCALEF,
+ SCALEFS,
+
// Integer add/sub with unsigned saturation.
ADDUS,
SUBUS,
+
// Integer add/sub with signed saturation.
ADDS,
SUBS,
- // Unsigned Integer average
+
+ // Unsigned Integer average.
AVG,
- /// Integer horizontal add.
- HADD,
- /// Integer horizontal sub.
+ /// Integer horizontal add/sub.
+ HADD,
HSUB,
- /// Floating point horizontal add.
+ /// Floating point horizontal add/sub.
FHADD,
-
- /// Floating point horizontal sub.
FHSUB,
// Integer absolute value
@@ -256,7 +250,8 @@ namespace llvm {
/// Note that these typically require refinement
/// in order to obtain suitable precision.
FRSQRT, FRCP,
-
+ FRSQRTS, FRCPS,
+
// Thread Local Storage.
TLSADDR,
@@ -277,6 +272,9 @@ namespace llvm {
// SjLj exception handling longjmp.
EH_SJLJ_LONGJMP,
+ // SjLj exception handling dispatch.
+ EH_SJLJ_SETUP_DISPATCH,
+
/// Tail call return. See X86TargetLowering::LowerCall for
/// the list of operands.
TC_RETURN,
@@ -286,7 +284,6 @@ namespace llvm {
// Vector integer zero-extend.
VZEXT,
-
// Vector integer signed-extend.
VSEXT,
@@ -313,6 +310,11 @@ namespace llvm {
// Vector shift elements
VSHL, VSRL, VSRA,
+ // Vector variable shift right arithmetic.
+ // Unlike ISD::SRA, in case shift count greater then element size
+ // use sign bit to fill destination data element.
+ VSRAV,
+
// Vector shift elements by immediate
VSHLI, VSRLI, VSRAI,
@@ -327,6 +329,8 @@ namespace llvm {
// Vector integer comparisons, the result is in a mask vector.
PCMPEQM, PCMPGTM,
+ MULTISHIFT,
+
/// Vector comparison generating mask bits for fp and
/// integer signed and unsigned data types.
CMPM,
@@ -338,11 +342,13 @@ namespace llvm {
ADD, SUB, ADC, SBB, SMUL,
INC, DEC, OR, XOR, AND,
- BEXTR, // Bit field extract
+ // Bit field extract.
+ BEXTR,
- UMUL, // LOW, HI, FLAGS = umul LHS, RHS
+ // LOW, HI, FLAGS = umul LHS, RHS.
+ UMUL,
- // 8-bit SMUL/UMUL - AX, FLAGS = smul8/umul8 AL, RHS
+ // 8-bit SMUL/UMUL - AX, FLAGS = smul8/umul8 AL, RHS.
SMUL8, UMUL8,
// 8-bit divrem that zero-extend the high result (AH).
@@ -352,6 +358,9 @@ namespace llvm {
// X86-specific multiply by immediate.
MUL_IMM,
+ // Vector sign bit extraction.
+ MOVMSK,
+
// Vector bitwise comparisons.
PTEST,
@@ -362,22 +371,23 @@ namespace llvm {
TESTM,
TESTNM,
- // OR/AND test for masks
+ // OR/AND test for masks.
KORTEST,
KTEST,
// Several flavors of instructions with vector shuffle behaviors.
+ // Saturated signed/unnsigned packing.
PACKSS,
PACKUS,
- // Intra-lane alignr
+ // Intra-lane alignr.
PALIGNR,
- // AVX512 inter-lane alignr
+ // AVX512 inter-lane alignr.
VALIGN,
PSHUFD,
PSHUFHW,
PSHUFLW,
SHUFP,
- //Shuffle Packed Values at 128-bit granularity
+ //Shuffle Packed Values at 128-bit granularity.
SHUF128,
MOVDDUP,
MOVSHDUP,
@@ -393,61 +403,82 @@ namespace llvm {
UNPCKH,
VPERMILPV,
VPERMILPI,
+ VPERMI,
+ VPERM2X128,
+
+ // Variable Permute (VPERM).
+ // Res = VPERMV MaskV, V0
VPERMV,
+
+ // 3-op Variable Permute (VPERMT2).
+ // Res = VPERMV3 V0, MaskV, V1
VPERMV3,
+
+ // 3-op Variable Permute overwriting the index (VPERMI2).
+ // Res = VPERMIV3 V0, MaskV, V1
VPERMIV3,
- VPERMI,
- VPERM2X128,
- // Bitwise ternary logic
+
+ // Bitwise ternary logic.
VPTERNLOG,
- // Fix Up Special Packed Float32/64 values
+ // Fix Up Special Packed Float32/64 values.
VFIXUPIMM,
- // Range Restriction Calculation For Packed Pairs of Float32/64 values
+ VFIXUPIMMS,
+ // Range Restriction Calculation For Packed Pairs of Float32/64 values.
VRANGE,
- // Reduce - Perform Reduction Transformation on scalar\packed FP
+ // Reduce - Perform Reduction Transformation on scalar\packed FP.
VREDUCE,
- // RndScale - Round FP Values To Include A Given Number Of Fraction Bits
+ // RndScale - Round FP Values To Include A Given Number Of Fraction Bits.
VRNDSCALE,
- // VFPCLASS - Tests Types Of a FP Values for packed types.
- VFPCLASS,
- // VFPCLASSS - Tests Types Of a FP Values for scalar types.
- VFPCLASSS,
- // Broadcast scalar to vector
+ // Tests Types Of a FP Values for packed types.
+ VFPCLASS,
+ // Tests Types Of a FP Values for scalar types.
+ VFPCLASSS,
+
+ // Broadcast scalar to vector.
VBROADCAST,
- // Broadcast mask to vector
+ // Broadcast mask to vector.
VBROADCASTM,
- // Broadcast subvector to vector
+ // Broadcast subvector to vector.
SUBV_BROADCAST,
- // Insert/Extract vector element
+
+ // Insert/Extract vector element.
VINSERT,
VEXTRACT,
/// SSE4A Extraction and Insertion.
EXTRQI, INSERTQI,
- // XOP variable/immediate rotations
+ // XOP variable/immediate rotations.
VPROT, VPROTI,
- // XOP arithmetic/logical shifts
+ // XOP arithmetic/logical shifts.
VPSHA, VPSHL,
- // XOP signed/unsigned integer comparisons
+ // XOP signed/unsigned integer comparisons.
VPCOM, VPCOMU,
+ // XOP packed permute bytes.
+ VPPERM,
+ // XOP two source permutation.
+ VPERMIL2,
- // Vector multiply packed unsigned doubleword integers
+ // Vector multiply packed unsigned doubleword integers.
PMULUDQ,
- // Vector multiply packed signed doubleword integers
+ // Vector multiply packed signed doubleword integers.
PMULDQ,
- // Vector Multiply Packed UnsignedIntegers with Round and Scale
+ // Vector Multiply Packed UnsignedIntegers with Round and Scale.
MULHRS,
- // Multiply and Add Packed Integers
+
+ // Multiply and Add Packed Integers.
VPMADDUBSW, VPMADDWD,
- // FMA nodes
+ VPMADD52L, VPMADD52H,
+
+ // FMA nodes.
FMADD,
FNMADD,
FMSUB,
FNMSUB,
FMADDSUB,
FMSUBADD,
- // FMA with rounding mode
+
+ // FMA with rounding mode.
FMADD_RND,
FNMADD_RND,
FMSUB_RND,
@@ -455,17 +486,20 @@ namespace llvm {
FMADDSUB_RND,
FMSUBADD_RND,
- // Compress and expand
+ // Compress and expand.
COMPRESS,
EXPAND,
- //Convert Unsigned/Integer to Scalar Floating-Point Value
- //with rounding mode
+ // Convert Unsigned/Integer to Scalar Floating-Point Value
+ // with rounding mode.
SINT_TO_FP_RND,
UINT_TO_FP_RND,
// Vector float/double to signed/unsigned integer.
FP_TO_SINT_RND, FP_TO_UINT_RND,
+ // Scalar float/double to signed/unsigned integer.
+ SCALAR_FP_TO_SINT_RND, SCALAR_FP_TO_UINT_RND,
+
// Save xmm argument registers to the stack, according to %al. An operator
// is needed so that this can be expanded with control flow.
VASTART_SAVE_XMM_REGS,
@@ -478,11 +512,9 @@ namespace llvm {
// falls back to heap allocation if not.
SEG_ALLOCA,
- // Memory barrier
+ // Memory barriers.
MEMBARRIER,
MFENCE,
- SFENCE,
- LFENCE,
// Store FP status word into i16 register.
FNSTSW16r,
@@ -497,19 +529,26 @@ namespace llvm {
// indicate whether it is valid in CF.
RDSEED,
+ // SSE42 string comparisons.
PCMPISTRI,
PCMPESTRI,
// Test if in transactional execution.
XTEST,
- // ERI instructions
+ // ERI instructions.
RSQRT28, RCP28, EXP2,
// Compare and swap.
LCMPXCHG_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
LCMPXCHG8_DAG,
LCMPXCHG16_DAG,
+ LCMPXCHG8_SAVE_EBX_DAG,
+ LCMPXCHG16_SAVE_RBX_DAG,
+
+ /// LOCK-prefixed arithmetic read-modify-write instructions.
+ /// EFLAGS, OUTCHAIN = LADD(INCHAIN, PTR, RHS)
+ LADD, LSUB, LOR, LXOR, LAND,
// Load, scalar_to_vector, and zero extend.
VZEXT_LOAD,
@@ -551,10 +590,10 @@ namespace llvm {
VAARG_64
// WARNING: Do not add anything in the end unless you want the node to
- // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be
- // thought as target memory ops!
+ // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all
+ // opcodes will be thought as target memory ops!
};
- }
+ } // end namespace X86ISD
/// Define some predicates that are used for node matching.
namespace X86 {
@@ -606,13 +645,12 @@ namespace llvm {
bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
bool hasSymbolicDisplacement = true);
-
/// Determines whether the callee is required to pop its
/// own arguments. Callee pop is necessary to support tail calls.
bool isCalleePop(CallingConv::ID CallingConv,
- bool is64Bit, bool IsVarArg, bool TailCallOpt);
+ bool is64Bit, bool IsVarArg, bool GuaranteeTCO);
- }
+ } // end namespace X86
//===--------------------------------------------------------------------===//
// X86 Implementation of the TargetLowering interface
@@ -679,13 +717,20 @@ namespace llvm {
///
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+ /// Places new result values for the node in Results (their number
+ /// and types must exactly match those of the original return values of
+ /// the node), or leaves Results empty, which indicates that the node is not
+ /// to be custom lowered after all.
+ void LowerOperationWrapper(SDNode *N,
+ SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const override;
+
/// Replace the results of node with an illegal result
/// type with new values built out of custom code.
///
void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const override;
-
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
/// Return true if the target has native support for
@@ -705,9 +750,8 @@ namespace llvm {
bool hasCopyImplyingStackAdjustment(MachineFunction *MF) const override;
MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB) const override;
-
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *MBB) const override;
/// This method returns the name of a target specific DAG node.
const char *getTargetNodeName(unsigned Opcode) const override;
@@ -716,6 +760,12 @@ namespace llvm {
bool isCheapToSpeculateCtlz() const override;
+ bool hasBitPreservingFPLogic(EVT VT) const override {
+ return VT == MVT::f32 || VT == MVT::f64 || VT.isVector();
+ }
+
+ bool hasAndNotCompare(SDValue Y) const override;
+
/// Return the value type to use for ISD::SETCC.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
EVT VT) const override;
@@ -914,16 +964,21 @@ namespace llvm {
unsigned
getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
+ virtual bool needsFixedCatchObjects() const override;
+
/// This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo) const override;
- /// Return true if the target stores stack protector cookies at a fixed
- /// offset in some non-standard address space, and populates the address
- /// space and offset as appropriate.
- bool getStackCookieLocation(unsigned &AddressSpace,
- unsigned &Offset) const override;
+ /// If the target has a standard location for the stack protector cookie,
+ /// returns the address of that location. Otherwise, returns nullptr.
+ Value *getIRStackGuard(IRBuilder<> &IRB) const override;
+
+ bool useLoadStackGuardNode() const override;
+ void insertSSPDeclarations(Module &M) const override;
+ Value *getSDagStackGuard(const Module &M) const override;
+ Value *getSSPStackGuardCheck(const Module &M) const override;
/// Return true if the target stores SafeStack pointer at a fixed offset in
/// some non-standard address space, and populates the address space and
@@ -935,21 +990,24 @@ namespace llvm {
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
- bool useLoadStackGuardNode() const override;
/// \brief Customize the preferred legalization strategy for certain types.
LegalizeTypeAction getPreferredVectorAction(EVT VT) const override;
bool isIntDivCheap(EVT VT, AttributeSet Attr) const override;
+ bool supportSwiftError() const override {
+ return true;
+ }
+
protected:
std::pair<const TargetRegisterClass *, uint8_t>
findRepresentativeClass(const TargetRegisterInfo *TRI,
MVT VT) const override;
private:
- /// Keep a pointer to the X86Subtarget around so that we can
+ /// Keep a reference to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets.
- const X86Subtarget *Subtarget;
+ const X86Subtarget &Subtarget;
/// Select between SSE or x87 floating point ops.
/// When SSE is available, use it for f32 operations.
@@ -969,16 +1027,15 @@ namespace llvm {
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
+ const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
- SDValue LowerMemArgument(SDValue Chain,
- CallingConv::ID CallConv,
+ SDValue LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
const SmallVectorImpl<ISD::InputArg> &ArgInfo,
- SDLoc dl, SelectionDAG &DAG,
- const CCValAssign &VA, MachineFrameInfo *MFI,
- unsigned i) const;
+ const SDLoc &dl, SelectionDAG &DAG,
+ const CCValAssign &VA, MachineFrameInfo *MFI,
+ unsigned i) const;
SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
- SDLoc dl, SelectionDAG &DAG,
+ const SDLoc &dl, SelectionDAG &DAG,
const CCValAssign &VA,
ISD::ArgFlagsTy Flags) const;
@@ -997,12 +1054,15 @@ namespace llvm {
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const;
SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
- SDValue Chain, bool IsTailCall, bool Is64Bit,
- int FPDiff, SDLoc dl) const;
+ SDValue Chain, bool IsTailCall,
+ bool Is64Bit, int FPDiff,
+ const SDLoc &dl) const;
unsigned GetAlignedArgumentStackSize(unsigned StackSize,
SelectionDAG &DAG) const;
+ unsigned getAddressSpace(void) const;
+
std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
bool isSigned,
bool isReplace) const;
@@ -1017,7 +1077,7 @@ namespace llvm {
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
+ SDValue LowerGlobalAddress(const GlobalValue *GV, const SDLoc &dl,
int64_t Offset, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
@@ -1030,8 +1090,8 @@ namespace llvm {
SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerToBT(SDValue And, ISD::CondCode CC,
- SDLoc dl, SelectionDAG &DAG) const;
+ SDValue LowerToBT(SDValue And, ISD::CondCode CC, const SDLoc &dl,
+ SelectionDAG &DAG) const;
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSETCCE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
@@ -1046,6 +1106,7 @@ namespace llvm {
SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const;
@@ -1053,19 +1114,17 @@ namespace llvm {
SDValue LowerGC_TRANSITION_END(SDValue Op, SelectionDAG &DAG) const;
SDValue
- LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const override;
+ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;
- SDValue LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const override;
+ const SDLoc &dl, SelectionDAG &DAG) const override;
bool supportSplitCSR(MachineFunction *MF) const override {
return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
@@ -1080,8 +1139,8 @@ namespace llvm {
bool mayBeEmittedAsTailCall(CallInst *CI) const override;
- EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
- ISD::NodeType ExtendKind) const override;
+ EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
+ ISD::NodeType ExtendKind) const override;
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
bool isVarArg,
@@ -1101,57 +1160,60 @@ namespace llvm {
bool needsCmpXchgNb(Type *MemType) const;
+ void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
+ MachineBasicBlock *DispatchBB, int FI) const;
+
// Utility function to emit the low-level va_arg code for X86-64.
- MachineBasicBlock *EmitVAARG64WithCustomInserter(
- MachineInstr *MI,
- MachineBasicBlock *MBB) const;
+ MachineBasicBlock *
+ EmitVAARG64WithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *MBB) const;
/// Utility function to emit the xmm reg save portion of va_start.
- MachineBasicBlock *EmitVAStartSaveXMMRegsWithCustomInserter(
- MachineInstr *BInstr,
- MachineBasicBlock *BB) const;
+ MachineBasicBlock *
+ EmitVAStartSaveXMMRegsWithCustomInserter(MachineInstr &BInstr,
+ MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredSelect(MachineInstr *I,
+ MachineBasicBlock *EmitLoweredSelect(MachineInstr &I,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredAtomicFP(MachineInstr *I,
+ MachineBasicBlock *EmitLoweredAtomicFP(MachineInstr &I,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredWinAlloca(MachineInstr *MI,
- MachineBasicBlock *BB) const;
-
- MachineBasicBlock *EmitLoweredCatchRet(MachineInstr *MI,
+ MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredCatchPad(MachineInstr *MI,
+ MachineBasicBlock *EmitLoweredCatchPad(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr *MI,
+ MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredTLSAddr(MachineInstr *MI,
+ MachineBasicBlock *EmitLoweredTLSAddr(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI,
+ MachineBasicBlock *EmitLoweredTLSCall(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr *MI,
+ MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const;
- MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI,
+ MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const;
- MachineBasicBlock *emitFMA3Instr(MachineInstr *MI,
+ MachineBasicBlock *emitFMA3Instr(MachineInstr &MI,
MachineBasicBlock *MBB) const;
+ MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr &MI,
+ MachineBasicBlock *MBB) const;
+
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent, for use with the given x86 condition code.
- SDValue EmitTest(SDValue Op0, unsigned X86CC, SDLoc dl,
+ SDValue EmitTest(SDValue Op0, unsigned X86CC, const SDLoc &dl,
SelectionDAG &DAG) const;
/// Emit nodes that will be selected as "cmp Op0,Op1", or something
/// equivalent, for use with the given x86 condition code.
- SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, SDLoc dl,
+ SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, const SDLoc &dl,
SelectionDAG &DAG) const;
/// Convert a comparison if required by the subtarget.
@@ -1173,7 +1235,7 @@ namespace llvm {
namespace X86 {
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo);
- }
-}
+ } // end namespace X86
+} // end namespace llvm
-#endif // X86ISELLOWERING_H
+#endif // LLVM_LIB_TARGET_X86_X86ISELLOWERING_H