aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h')
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h69
1 files changed, 54 insertions, 15 deletions
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index 9c37a4f6ec2d..9b364391f0fa 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -60,9 +60,6 @@ public:
: BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
TLI(ST->getTargetLowering()) {}
- bool areInlineCompatible(const Function *Caller,
- const Function *Callee) const;
-
/// Return the cost of materializing an immediate for a value operand of
/// a store instruction.
InstructionCost getStoreImmCost(Type *VecTy, TTI::OperandValueInfo OpInfo,
@@ -146,9 +143,15 @@ public:
ArrayRef<int> Mask,
TTI::TargetCostKind CostKind, int Index,
VectorType *SubTp,
- ArrayRef<const Value *> Args = std::nullopt,
+ ArrayRef<const Value *> Args = {},
const Instruction *CxtI = nullptr);
+ InstructionCost getScalarizationOverhead(VectorType *Ty,
+ const APInt &DemandedElts,
+ bool Insert, bool Extract,
+ TTI::TargetCostKind CostKind,
+ ArrayRef<Value *> VL = {});
+
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
TTI::TargetCostKind CostKind);
@@ -169,6 +172,8 @@ public:
TTI::TargetCostKind CostKind,
const Instruction *I);
+ InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys);
+
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
TTI::CastContextHint CCH,
TTI::TargetCostKind CostKind,
@@ -193,10 +198,12 @@ public:
TTI::OperandValueInfo OpdInfo = {TTI::OK_AnyValue, TTI::OP_None},
const Instruction *I = nullptr);
- InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
- CmpInst::Predicate VecPred,
- TTI::TargetCostKind CostKind,
- const Instruction *I = nullptr);
+ InstructionCost getCmpSelInstrCost(
+ unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
+ TTI::TargetCostKind CostKind,
+ TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None},
+ TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
+ const Instruction *I = nullptr);
InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
const Instruction *I = nullptr);
@@ -210,8 +217,7 @@ public:
unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None},
TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
- ArrayRef<const Value *> Args = std::nullopt,
- const Instruction *CxtI = nullptr);
+ ArrayRef<const Value *> Args = {}, const Instruction *CxtI = nullptr);
bool isElementTypeLegalForScalableVector(Type *Ty) const {
return TLI->isLegalElementTypeForRVV(TLI->getValueType(DL, Ty));
@@ -232,7 +238,6 @@ public:
return false;
return TLI->isLegalElementTypeForRVV(ElemType);
-
}
bool isLegalMaskedLoad(Type *DataType, Align Alignment) {
@@ -252,6 +257,12 @@ public:
if (DataTypeVT.isFixedLengthVector() && !ST->useRVVForFixedLengthVectors())
return false;
+ // We also need to check if the vector of address is valid.
+ EVT PointerTypeVT = EVT(TLI->getPointerTy(DL));
+ if (DataTypeVT.isScalableVector() &&
+ !TLI->isLegalElementTypeForRVV(PointerTypeVT))
+ return false;
+
EVT ElemType = DataTypeVT.getScalarType();
if (!ST->enableUnalignedVectorMem() && Alignment < ElemType.getStoreSize())
return false;
@@ -281,6 +292,14 @@ public:
return TLI->isLegalStridedLoadStore(DataTypeVT, Alignment);
}
+ bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor,
+ Align Alignment, unsigned AddrSpace) {
+ return TLI->isLegalInterleavedAccessType(VTy, Factor, Alignment, AddrSpace,
+ DL);
+ }
+
+ bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment);
+
bool isLegalMaskedCompressStore(Type *DataTy, Align Alignment);
bool isVScaleKnownToBeAPowerOfTwo() const {
@@ -310,6 +329,12 @@ public:
if (!TLI->isLegalElementTypeForRVV(TLI->getValueType(DL, Ty)))
return false;
+ // We can't promote f16/bf16 fadd reductions and scalable vectors can't be
+ // expanded.
+ // TODO: Promote f16/bf16 fmin/fmax reductions
+ if (Ty->isBFloatTy() || (Ty->isHalfTy() && !ST->hasVInstructionsF16()))
+ return false;
+
switch (RdxDesc.getRecurrenceKind()) {
case RecurKind::Add:
case RecurKind::FAdd:
@@ -363,6 +388,9 @@ public:
llvm_unreachable("unknown register class");
}
+ TTI::AddressingModeKind getPreferredAddressingMode(const Loop *L,
+ ScalarEvolution *SE) const;
+
unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const {
if (Vector)
return RISCVRegisterClass::VRRC;
@@ -394,11 +422,22 @@ public:
bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
const TargetTransformInfo::LSRCost &C2);
- bool shouldFoldTerminatingConditionAfterLSR() const {
- return true;
- }
-
+ bool
+ shouldConsiderAddressTypePromotion(const Instruction &I,
+ bool &AllowPromotionWithoutCommonHeader);
std::optional<unsigned> getMinPageSize() const { return 4096; }
+ /// Return true if the (vector) instruction I will be lowered to an
+ /// instruction with a scalar splat operand for the given Operand number.
+ bool canSplatOperand(Instruction *I, int Operand) const;
+ /// Return true if a vector instruction will lower to a target instruction
+ /// able to splat the given operand.
+ bool canSplatOperand(unsigned Opcode, int Operand) const;
+
+ bool isProfitableToSinkOperands(Instruction *I,
+ SmallVectorImpl<Use *> &Ops) const;
+
+ TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
+ bool IsZeroCmp) const;
};
} // end namespace llvm