aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/X86/X86ISelLowering.h
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/X86/X86ISelLowering.h')
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.h119
1 files changed, 94 insertions, 25 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 362daa98e1f8..fe79fefeed63 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -87,6 +87,10 @@ namespace llvm {
COMI,
UCOMI,
+ // X86 compare with Intrinsics similar to COMI.
+ COMX,
+ UCOMX,
+
/// X86 bit-test instructions.
BT,
@@ -340,6 +344,9 @@ namespace llvm {
// Vector FP round.
VFPROUND,
+ // Convert TWO packed single data to one packed data
+ VFPROUND2,
+ VFPROUND2_RND,
VFPROUND_RND,
VFPROUNDS,
VFPROUNDS_RND,
@@ -595,6 +602,51 @@ namespace llvm {
VPDPBSSD,
VPDPBSSDS,
+ VPDPWSUD,
+ VPDPWSUDS,
+ VPDPWUSD,
+ VPDPWUSDS,
+ VPDPWUUD,
+ VPDPWUUDS,
+
+ VMINMAX,
+ VMINMAX_SAE,
+ VMINMAXS,
+ VMINMAXS_SAE,
+
+ CVTP2IBS,
+ CVTP2IUBS,
+ CVTP2IBS_RND,
+ CVTP2IUBS_RND,
+ CVTTP2IBS,
+ CVTTP2IUBS,
+ CVTTP2IBS_SAE,
+ CVTTP2IUBS_SAE,
+
+ MPSADBW,
+
+ VCVT2PH2BF8,
+ VCVT2PH2BF8S,
+ VCVT2PH2HF8,
+ VCVT2PH2HF8S,
+ VCVTBIASPH2BF8,
+ VCVTBIASPH2BF8S,
+ VCVTBIASPH2HF8,
+ VCVTBIASPH2HF8S,
+ VCVTPH2BF8,
+ VCVTPH2BF8S,
+ VCVTPH2HF8,
+ VCVTPH2HF8S,
+ VMCVTBIASPH2BF8,
+ VMCVTBIASPH2BF8S,
+ VMCVTBIASPH2HF8,
+ VMCVTBIASPH2HF8S,
+ VMCVTPH2BF8,
+ VMCVTPH2BF8S,
+ VMCVTPH2HF8,
+ VMCVTPH2HF8S,
+ VCVTHF82PH,
+
// Compress and expand.
COMPRESS,
EXPAND,
@@ -626,6 +678,18 @@ namespace llvm {
CVTTP2UI,
CVTTP2SI_SAE,
CVTTP2UI_SAE,
+
+ // Saturation enabled Vector float/double to signed/unsigned
+ // integer with truncation.
+ CVTTP2SIS,
+ CVTTP2UIS,
+ CVTTP2SIS_SAE,
+ CVTTP2UIS_SAE,
+ // Masked versions of above. Used for v2f64 to v4i32.
+ // SRC, PASSTHRU, MASK
+ MCVTTP2SIS,
+ MCVTTP2UIS,
+
// Scalar float/double to signed/unsigned integer with truncation.
CVTTS2SI,
CVTTS2UI,
@@ -636,6 +700,12 @@ namespace llvm {
CVTSI2P,
CVTUI2P,
+ // Scalar float/double to signed/unsigned integer with saturation.
+ CVTTS2SIS,
+ CVTTS2UIS,
+ CVTTS2SIS_SAE,
+ CVTTS2UIS_SAE,
+
// Masked versions of above. Used for v2f64->v4f32.
// SRC, PASSTHRU, MASK
MCVTP2SI,
@@ -646,17 +716,16 @@ namespace llvm {
MCVTUI2P,
// Vector float to bfloat16.
- // Convert TWO packed single data to one packed BF16 data
- CVTNE2PS2BF16,
// Convert packed single data to packed BF16 data
CVTNEPS2BF16,
// Masked version of above.
// SRC, PASSTHRU, MASK
MCVTNEPS2BF16,
- // Dot product of BF16 pairs to accumulated into
+ // Dot product of BF16/FP16 pairs to accumulated into
// packed single precision.
DPBF16PS,
+ DPFP16PS,
// A stack checking function call. On Windows it's _chkstk call.
DYN_ALLOCA,
@@ -740,7 +809,8 @@ namespace llvm {
CTEST,
/// X86 strict FP compare instructions.
- STRICT_FCMP = ISD::FIRST_TARGET_STRICTFP_OPCODE,
+ FIRST_STRICTFP_OPCODE,
+ STRICT_FCMP = FIRST_STRICTFP_OPCODE,
STRICT_FCMPS,
// Vector packed double/float comparison.
@@ -781,11 +851,14 @@ namespace llvm {
// Perform an FP80 add after changing precision control in FPCW.
STRICT_FP80_ADD,
- // WARNING: Only add nodes here if they are strict FP nodes. Non-memory and
- // non-strict FP nodes should be above FIRST_TARGET_STRICTFP_OPCODE.
+ /// Floating point max and min.
+ STRICT_FMAX,
+ STRICT_FMIN,
+ LAST_STRICTFP_OPCODE = STRICT_FMIN,
// Compare and swap.
- LCMPXCHG_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
+ FIRST_MEMORY_OPCODE,
+ LCMPXCHG_DAG = FIRST_MEMORY_OPCODE,
LCMPXCHG8_DAG,
LCMPXCHG16_DAG,
LCMPXCHG16_SAVE_RBX_DAG,
@@ -835,6 +908,10 @@ namespace llvm {
// Load x87 FPU environment from memory.
FLDENVm,
+ // Custom handling for FP_TO_xINT_SAT
+ FP_TO_SINT_SAT,
+ FP_TO_UINT_SAT,
+
/// This instruction implements FP_TO_SINT with the
/// integer destination in memory and a FP reg source. This corresponds
/// to the X86::FIST*m instructions and the rounding mode change stuff. It
@@ -906,10 +983,7 @@ namespace llvm {
// Conditional load/store instructions
CLOAD,
CSTORE,
-
- // WARNING: Do not add anything in the end unless you want the node to
- // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all
- // opcodes will be thought as target memory ops!
+ LAST_MEMORY_OPCODE = CSTORE,
};
} // end namespace X86ISD
@@ -1006,8 +1080,7 @@ namespace llvm {
/// function arguments in the caller parameter area. For X86, aggregates
/// that contains are placed at 16-byte boundaries while the rest are at
/// 4-byte boundaries.
- uint64_t getByValTypeAlignment(Type *Ty,
- const DataLayout &DL) const override;
+ Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override;
EVT getOptimalMemOpType(const MemOp &Op,
const AttributeList &FuncAttributes) const override;
@@ -1335,10 +1408,6 @@ namespace llvm {
bool isLegalStoreImmediate(int64_t Imm) const override;
- /// This is used to enable splatted operand transforms for vector shifts
- /// and vector funnel shifts.
- bool isVectorShiftByScalarCheap(Type *Ty) const override;
-
/// Add x86-specific opcodes to the default list.
bool isBinOp(unsigned Opcode) const override;
@@ -1365,8 +1434,6 @@ namespace llvm {
bool isZExtFree(EVT VT1, EVT VT2) const override;
bool isZExtFree(SDValue Val, EVT VT2) const override;
- bool shouldSinkOperands(Instruction *I,
- SmallVectorImpl<Use *> &Ops) const override;
bool shouldConvertPhiType(Type *From, Type *To) const override;
/// Return true if folding a vector load into ExtVal (a sign, zero, or any
@@ -1382,7 +1449,7 @@ namespace llvm {
/// Return true if it's profitable to narrow operations of type SrcVT to
/// DestVT. e.g. on x86, it's profitable to narrow from i32 to i8 but not
/// from i32 to i16.
- bool isNarrowingProfitable(EVT SrcVT, EVT DestVT) const override;
+ bool isNarrowingProfitable(SDNode *N, EVT SrcVT, EVT DestVT) const override;
bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
EVT VT) const override;
@@ -1501,7 +1568,7 @@ namespace llvm {
/// returns the address of that location. Otherwise, returns nullptr.
Value *getIRStackGuard(IRBuilderBase &IRB) const override;
- bool useLoadStackGuardNode() const override;
+ bool useLoadStackGuardNode(const Module &M) const override;
bool useStackGuardXorFP() const override;
void insertSSPDeclarations(Module &M) const override;
Value *getSDagStackGuard(const Module &M) const override;
@@ -1537,6 +1604,10 @@ namespace llvm {
LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
unsigned &NumIntermediates, MVT &RegisterVT) const override;
+ bool functionArgumentNeedsConsecutiveRegisters(
+ Type *Ty, CallingConv::ID CallConv, bool isVarArg,
+ const DataLayout &DL) const override;
+
bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
bool supportSwiftError() const override;
@@ -1736,7 +1807,8 @@ namespace llvm {
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
- LLVMContext &Context) const override;
+ LLVMContext &Context,
+ const Type *RetTy) const override;
const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
ArrayRef<MCPhysReg> getRoundingControlRegisters() const override;
@@ -1781,9 +1853,6 @@ namespace llvm {
MachineBasicBlock *EmitLoweredProbedAlloca(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredTLSAddr(MachineInstr &MI,
- MachineBasicBlock *BB) const;
-
MachineBasicBlock *EmitLoweredTLSCall(MachineInstr &MI,
MachineBasicBlock *BB) const;