diff options
Diffstat (limited to 'contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp')
-rw-r--r-- | contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp | 626 |
1 files changed, 370 insertions, 256 deletions
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp b/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp index 28c8bd0a7ded..6c58e21b28bb 100644 --- a/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/contrib/llvm-project/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -15,7 +15,6 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringRef.h" -#include "llvm/ADT/Triple.h" #include "llvm/ADT/Twine.h" #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/TargetTransformInfo.h" @@ -29,6 +28,7 @@ #include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/MachineValueType.h" #include "llvm/CodeGen/RuntimeLibcalls.h" #include "llvm/CodeGen/StackMaps.h" #include "llvm/CodeGen/TargetLowering.h" @@ -45,18 +45,17 @@ #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Module.h" #include "llvm/IR/Type.h" -#include "llvm/Support/BranchProbability.h" #include "llvm/Support/Casting.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/MachineValueType.h" #include "llvm/Support/MathExtras.h" #include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetOptions.h" +#include "llvm/TargetParser/Triple.h" #include "llvm/Transforms/Utils/SizeOpts.h" #include <algorithm> #include <cassert> -#include <cstddef> #include <cstdint> #include <cstring> #include <iterator> @@ -114,17 +113,6 @@ static bool darwinHasSinCos(const Triple &TT) { return true; } -// Although this default value is arbitrary, it is not random. It is assumed -// that a condition that evaluates the same way by a higher percentage than this -// is best represented as control flow. Therefore, the default value N should be -// set such that the win from N% correct executions is greater than the loss -// from (100 - N)% mispredicted executions for the majority of intended targets. -static cl::opt<int> MinPercentageForPredictableBranch( - "min-predictable-branch", cl::init(99), - cl::desc("Minimum percentage (0-100) that a condition must be either true " - "or false to assume that the condition is predictable"), - cl::Hidden); - void TargetLoweringBase::InitLibcalls(const Triple &TT) { #define HANDLE_LIBCALL(code, name) \ setLibcallName(RTLIB::code, name); @@ -213,7 +201,7 @@ void TargetLoweringBase::InitLibcalls(const Triple &TT) { setLibcallName(RTLIB::SINCOS_PPCF128, "sincosl"); } - if (TT.isPS4CPU()) { + if (TT.isPS()) { setLibcallName(RTLIB::SINCOS_F32, "sincosf"); setLibcallName(RTLIB::SINCOS_F64, "sincos"); } @@ -221,6 +209,35 @@ void TargetLoweringBase::InitLibcalls(const Triple &TT) { if (TT.isOSOpenBSD()) { setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL, nullptr); } + + if (TT.isOSWindows() && !TT.isOSCygMing()) { + setLibcallName(RTLIB::LDEXP_F32, nullptr); + setLibcallName(RTLIB::LDEXP_F80, nullptr); + setLibcallName(RTLIB::LDEXP_F128, nullptr); + setLibcallName(RTLIB::LDEXP_PPCF128, nullptr); + + setLibcallName(RTLIB::FREXP_F32, nullptr); + setLibcallName(RTLIB::FREXP_F80, nullptr); + setLibcallName(RTLIB::FREXP_F128, nullptr); + setLibcallName(RTLIB::FREXP_PPCF128, nullptr); + } +} + +/// GetFPLibCall - Helper to return the right libcall for the given floating +/// point type, or UNKNOWN_LIBCALL if there is none. +RTLIB::Libcall RTLIB::getFPLibCall(EVT VT, + RTLIB::Libcall Call_F32, + RTLIB::Libcall Call_F64, + RTLIB::Libcall Call_F80, + RTLIB::Libcall Call_F128, + RTLIB::Libcall Call_PPCF128) { + return + VT == MVT::f32 ? Call_F32 : + VT == MVT::f64 ? Call_F64 : + VT == MVT::f80 ? Call_F80 : + VT == MVT::f128 ? Call_F128 : + VT == MVT::ppcf128 ? Call_PPCF128 : + RTLIB::UNKNOWN_LIBCALL; } /// getFPEXT - Return the FPEXT_*_* value for the given types, or @@ -231,6 +248,8 @@ RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) { return FPEXT_F16_F32; if (RetVT == MVT::f64) return FPEXT_F16_F64; + if (RetVT == MVT::f80) + return FPEXT_F16_F80; if (RetVT == MVT::f128) return FPEXT_F16_F128; } else if (OpVT == MVT::f32) { @@ -267,6 +286,11 @@ RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) { return FPROUND_F128_F16; if (OpVT == MVT::ppcf128) return FPROUND_PPCF128_F16; + } else if (RetVT == MVT::bf16) { + if (OpVT == MVT::f32) + return FPROUND_F32_BF16; + if (OpVT == MVT::f64) + return FPROUND_F64_BF16; } else if (RetVT == MVT::f32) { if (OpVT == MVT::f64) return FPROUND_F64_F32; @@ -481,27 +505,43 @@ RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) { return UNKNOWN_LIBCALL; } -RTLIB::Libcall RTLIB::getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order, - MVT VT) { +RTLIB::Libcall RTLIB::getPOWI(EVT RetVT) { + return getFPLibCall(RetVT, POWI_F32, POWI_F64, POWI_F80, POWI_F128, + POWI_PPCF128); +} + +RTLIB::Libcall RTLIB::getLDEXP(EVT RetVT) { + return getFPLibCall(RetVT, LDEXP_F32, LDEXP_F64, LDEXP_F80, LDEXP_F128, + LDEXP_PPCF128); +} + +RTLIB::Libcall RTLIB::getFREXP(EVT RetVT) { + return getFPLibCall(RetVT, FREXP_F32, FREXP_F64, FREXP_F80, FREXP_F128, + FREXP_PPCF128); +} + +RTLIB::Libcall RTLIB::getOutlineAtomicHelper(const Libcall (&LC)[5][4], + AtomicOrdering Order, + uint64_t MemSize) { unsigned ModeN, ModelN; - switch (VT.SimpleTy) { - case MVT::i8: + switch (MemSize) { + case 1: ModeN = 0; break; - case MVT::i16: + case 2: ModeN = 1; break; - case MVT::i32: + case 4: ModeN = 2; break; - case MVT::i64: + case 8: ModeN = 3; break; - case MVT::i128: + case 16: ModeN = 4; break; default: - return UNKNOWN_LIBCALL; + return RTLIB::UNKNOWN_LIBCALL; } switch (Order) { @@ -522,6 +562,15 @@ RTLIB::Libcall RTLIB::getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order, return UNKNOWN_LIBCALL; } + return LC[ModeN][ModelN]; +} + +RTLIB::Libcall RTLIB::getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order, + MVT VT) { + if (!VT.isScalarInteger()) + return UNKNOWN_LIBCALL; + uint64_t MemSize = VT.getScalarSizeInBits() / 8; + #define LCALLS(A, B) \ { A##B##_RELAX, A##B##_ACQ, A##B##_REL, A##B##_ACQ_REL } #define LCALL5(A) \ @@ -529,27 +578,27 @@ RTLIB::Libcall RTLIB::getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order, switch (Opc) { case ISD::ATOMIC_CMP_SWAP: { const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_CAS)}; - return LC[ModeN][ModelN]; + return getOutlineAtomicHelper(LC, Order, MemSize); } case ISD::ATOMIC_SWAP: { const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_SWP)}; - return LC[ModeN][ModelN]; + return getOutlineAtomicHelper(LC, Order, MemSize); } case ISD::ATOMIC_LOAD_ADD: { const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDADD)}; - return LC[ModeN][ModelN]; + return getOutlineAtomicHelper(LC, Order, MemSize); } case ISD::ATOMIC_LOAD_OR: { const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDSET)}; - return LC[ModeN][ModelN]; + return getOutlineAtomicHelper(LC, Order, MemSize); } case ISD::ATOMIC_LOAD_CLR: { const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDCLR)}; - return LC[ModeN][ModelN]; + return getOutlineAtomicHelper(LC, Order, MemSize); } case ISD::ATOMIC_LOAD_XOR: { const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDEOR)}; - return LC[ModeN][ModelN]; + return getOutlineAtomicHelper(LC, Order, MemSize); } default: return UNKNOWN_LIBCALL; @@ -649,7 +698,7 @@ RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) { /// InitCmpLibcallCCs - Set default comparison libcall CC. static void InitCmpLibcallCCs(ISD::CondCode *CCs) { - memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL); + std::fill(CCs, CCs + RTLIB::UNKNOWN_LIBCALL, ISD::SETCC_INVALID); CCs[RTLIB::OEQ_F32] = ISD::SETEQ; CCs[RTLIB::OEQ_F64] = ISD::SETEQ; CCs[RTLIB::OEQ_F128] = ISD::SETEQ; @@ -702,9 +751,14 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) { SchedPreferenceInfo = Sched::ILP; GatherAllAliasesMaxDepth = 18; IsStrictFPEnabled = DisableStrictNodeMutation; - // TODO: the default will be switched to 0 in the next commit, along - // with the Target-specific changes necessary. - MaxAtomicSizeInBitsSupported = 1024; + MaxBytesForAlignment = 0; + MaxAtomicSizeInBitsSupported = 0; + + // Assume that even with libcalls, no target supports wider than 128 bit + // division. + MaxDivRemBitWidthSupported = 128; + + MaxLargeFPConvertBitWidthSupported = llvm::IntegerType::MAX_INT_BITS; MinCmpXchgSizeInBits = 0; SupportsUnalignedAtomics = false; @@ -726,6 +780,30 @@ void TargetLoweringBase::initActions() { std::fill(std::begin(TargetDAGCombineArray), std::end(TargetDAGCombineArray), 0); + // We're somewhat special casing MVT::i2 and MVT::i4. Ideally we want to + // remove this and targets should individually set these types if not legal. + for (ISD::NodeType NT : enum_seq(ISD::DELETED_NODE, ISD::BUILTIN_OP_END, + force_iteration_on_noniterable_enum)) { + for (MVT VT : {MVT::i2, MVT::i4}) + OpActions[(unsigned)VT.SimpleTy][NT] = Expand; + } + for (MVT AVT : MVT::all_valuetypes()) { + for (MVT VT : {MVT::i2, MVT::i4, MVT::v128i2, MVT::v64i4}) { + setTruncStoreAction(AVT, VT, Expand); + setLoadExtAction(ISD::EXTLOAD, AVT, VT, Expand); + setLoadExtAction(ISD::ZEXTLOAD, AVT, VT, Expand); + } + } + for (unsigned IM = (unsigned)ISD::PRE_INC; + IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { + for (MVT VT : {MVT::i2, MVT::i4}) { + setIndexedLoadAction(IM, VT, Expand); + setIndexedStoreAction(IM, VT, Expand); + setIndexedMaskedLoadAction(IM, VT, Expand); + setIndexedMaskedStoreAction(IM, VT, Expand); + } + } + for (MVT VT : MVT::fp_valuetypes()) { MVT IntVT = MVT::getIntegerVT(VT.getFixedSizeInBits()); if (IntVT.isValid()) { @@ -749,83 +827,66 @@ void TargetLoweringBase::initActions() { setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand); // These operations default to expand. - setOperationAction(ISD::FGETSIGN, VT, Expand); - setOperationAction(ISD::CONCAT_VECTORS, VT, Expand); - setOperationAction(ISD::FMINNUM, VT, Expand); - setOperationAction(ISD::FMAXNUM, VT, Expand); - setOperationAction(ISD::FMINNUM_IEEE, VT, Expand); - setOperationAction(ISD::FMAXNUM_IEEE, VT, Expand); - setOperationAction(ISD::FMINIMUM, VT, Expand); - setOperationAction(ISD::FMAXIMUM, VT, Expand); - setOperationAction(ISD::FMAD, VT, Expand); - setOperationAction(ISD::SMIN, VT, Expand); - setOperationAction(ISD::SMAX, VT, Expand); - setOperationAction(ISD::UMIN, VT, Expand); - setOperationAction(ISD::UMAX, VT, Expand); - setOperationAction(ISD::ABS, VT, Expand); - setOperationAction(ISD::FSHL, VT, Expand); - setOperationAction(ISD::FSHR, VT, Expand); - setOperationAction(ISD::SADDSAT, VT, Expand); - setOperationAction(ISD::UADDSAT, VT, Expand); - setOperationAction(ISD::SSUBSAT, VT, Expand); - setOperationAction(ISD::USUBSAT, VT, Expand); - setOperationAction(ISD::SSHLSAT, VT, Expand); - setOperationAction(ISD::USHLSAT, VT, Expand); - setOperationAction(ISD::SMULFIX, VT, Expand); - setOperationAction(ISD::SMULFIXSAT, VT, Expand); - setOperationAction(ISD::UMULFIX, VT, Expand); - setOperationAction(ISD::UMULFIXSAT, VT, Expand); - setOperationAction(ISD::SDIVFIX, VT, Expand); - setOperationAction(ISD::SDIVFIXSAT, VT, Expand); - setOperationAction(ISD::UDIVFIX, VT, Expand); - setOperationAction(ISD::UDIVFIXSAT, VT, Expand); - setOperationAction(ISD::FP_TO_SINT_SAT, VT, Expand); - setOperationAction(ISD::FP_TO_UINT_SAT, VT, Expand); + setOperationAction({ISD::FGETSIGN, ISD::CONCAT_VECTORS, + ISD::FMINNUM, ISD::FMAXNUM, + ISD::FMINNUM_IEEE, ISD::FMAXNUM_IEEE, + ISD::FMINIMUM, ISD::FMAXIMUM, + ISD::FMAD, ISD::SMIN, + ISD::SMAX, ISD::UMIN, + ISD::UMAX, ISD::ABS, + ISD::FSHL, ISD::FSHR, + ISD::SADDSAT, ISD::UADDSAT, + ISD::SSUBSAT, ISD::USUBSAT, + ISD::SSHLSAT, ISD::USHLSAT, + ISD::SMULFIX, ISD::SMULFIXSAT, + ISD::UMULFIX, ISD::UMULFIXSAT, + ISD::SDIVFIX, ISD::SDIVFIXSAT, + ISD::UDIVFIX, ISD::UDIVFIXSAT, + ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT, + ISD::IS_FPCLASS}, + VT, Expand); // Overflow operations default to expand - setOperationAction(ISD::SADDO, VT, Expand); - setOperationAction(ISD::SSUBO, VT, Expand); - setOperationAction(ISD::UADDO, VT, Expand); - setOperationAction(ISD::USUBO, VT, Expand); - setOperationAction(ISD::SMULO, VT, Expand); - setOperationAction(ISD::UMULO, VT, Expand); - - // ADDCARRY operations default to expand - setOperationAction(ISD::ADDCARRY, VT, Expand); - setOperationAction(ISD::SUBCARRY, VT, Expand); - setOperationAction(ISD::SETCCCARRY, VT, Expand); - setOperationAction(ISD::SADDO_CARRY, VT, Expand); - setOperationAction(ISD::SSUBO_CARRY, VT, Expand); + setOperationAction({ISD::SADDO, ISD::SSUBO, ISD::UADDO, ISD::USUBO, + ISD::SMULO, ISD::UMULO}, + VT, Expand); + + // Carry-using overflow operations default to expand. + setOperationAction({ISD::UADDO_CARRY, ISD::USUBO_CARRY, ISD::SETCCCARRY, + ISD::SADDO_CARRY, ISD::SSUBO_CARRY}, + VT, Expand); // ADDC/ADDE/SUBC/SUBE default to expand. - setOperationAction(ISD::ADDC, VT, Expand); - setOperationAction(ISD::ADDE, VT, Expand); - setOperationAction(ISD::SUBC, VT, Expand); - setOperationAction(ISD::SUBE, VT, Expand); + setOperationAction({ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}, VT, + Expand); + + // Halving adds + setOperationAction( + {ISD::AVGFLOORS, ISD::AVGFLOORU, ISD::AVGCEILS, ISD::AVGCEILU}, VT, + Expand); + + // Absolute difference + setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Expand); // These default to Expand so they will be expanded to CTLZ/CTTZ by default. - setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); + setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT, + Expand); - setOperationAction(ISD::BITREVERSE, VT, Expand); - setOperationAction(ISD::PARITY, VT, Expand); + setOperationAction({ISD::BITREVERSE, ISD::PARITY}, VT, Expand); // These library functions default to expand. - setOperationAction(ISD::FROUND, VT, Expand); - setOperationAction(ISD::FROUNDEVEN, VT, Expand); - setOperationAction(ISD::FPOWI, VT, Expand); + setOperationAction({ISD::FROUND, ISD::FPOWI, ISD::FLDEXP, ISD::FFREXP}, VT, + Expand); // These operations default to expand for vector types. - if (VT.isVector()) { - setOperationAction(ISD::FCOPYSIGN, VT, Expand); - setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); - setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, VT, Expand); - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Expand); - setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Expand); - setOperationAction(ISD::SPLAT_VECTOR, VT, Expand); - } - - // Constrained floating-point operations default to expand. + if (VT.isVector()) + setOperationAction( + {ISD::FCOPYSIGN, ISD::SIGN_EXTEND_INREG, ISD::ANY_EXTEND_VECTOR_INREG, + ISD::SIGN_EXTEND_VECTOR_INREG, ISD::ZERO_EXTEND_VECTOR_INREG, + ISD::SPLAT_VECTOR, ISD::LRINT, ISD::LLRINT}, + VT, Expand); + + // Constrained floating-point operations default to expand. #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ setOperationAction(ISD::STRICT_##DAGN, VT, Expand); #include "llvm/IR/ConstrainedOps.def" @@ -834,21 +895,27 @@ void TargetLoweringBase::initActions() { setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand); // Vector reduction default to expand. - setOperationAction(ISD::VECREDUCE_FADD, VT, Expand); - setOperationAction(ISD::VECREDUCE_FMUL, VT, Expand); - setOperationAction(ISD::VECREDUCE_ADD, VT, Expand); - setOperationAction(ISD::VECREDUCE_MUL, VT, Expand); - setOperationAction(ISD::VECREDUCE_AND, VT, Expand); - setOperationAction(ISD::VECREDUCE_OR, VT, Expand); - setOperationAction(ISD::VECREDUCE_XOR, VT, Expand); - setOperationAction(ISD::VECREDUCE_SMAX, VT, Expand); - setOperationAction(ISD::VECREDUCE_SMIN, VT, Expand); - setOperationAction(ISD::VECREDUCE_UMAX, VT, Expand); - setOperationAction(ISD::VECREDUCE_UMIN, VT, Expand); - setOperationAction(ISD::VECREDUCE_FMAX, VT, Expand); - setOperationAction(ISD::VECREDUCE_FMIN, VT, Expand); - setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Expand); - setOperationAction(ISD::VECREDUCE_SEQ_FMUL, VT, Expand); + setOperationAction( + {ISD::VECREDUCE_FADD, ISD::VECREDUCE_FMUL, ISD::VECREDUCE_ADD, + ISD::VECREDUCE_MUL, ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, + ISD::VECREDUCE_XOR, ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN, + ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN, ISD::VECREDUCE_FMAX, + ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAXIMUM, ISD::VECREDUCE_FMINIMUM, + ISD::VECREDUCE_SEQ_FADD, ISD::VECREDUCE_SEQ_FMUL}, + VT, Expand); + + // Named vector shuffles default to expand. + setOperationAction(ISD::VECTOR_SPLICE, VT, Expand); + + // VP operations default to expand. +#define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...) \ + setOperationAction(ISD::SDOPC, VT, Expand); +#include "llvm/IR/VPIntrinsics.def" + + // FP environment operations default to expand. + setOperationAction(ISD::GET_FPENV, VT, Expand); + setOperationAction(ISD::SET_FPENV, VT, Expand); + setOperationAction(ISD::RESET_FPENV, VT, Expand); } // Most targets ignore the @llvm.prefetch intrinsic. @@ -860,32 +927,16 @@ void TargetLoweringBase::initActions() { // ConstantFP nodes default to expand. Targets can either change this to // Legal, in which case all fp constants are legal, or use isFPImmLegal() // to optimize expansions for certain constants. - setOperationAction(ISD::ConstantFP, MVT::f16, Expand); - setOperationAction(ISD::ConstantFP, MVT::f32, Expand); - setOperationAction(ISD::ConstantFP, MVT::f64, Expand); - setOperationAction(ISD::ConstantFP, MVT::f80, Expand); - setOperationAction(ISD::ConstantFP, MVT::f128, Expand); + setOperationAction(ISD::ConstantFP, + {MVT::bf16, MVT::f16, MVT::f32, MVT::f64, MVT::f80, MVT::f128}, + Expand); // These library functions default to expand. - for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) { - setOperationAction(ISD::FCBRT, VT, Expand); - setOperationAction(ISD::FLOG , VT, Expand); - setOperationAction(ISD::FLOG2, VT, Expand); - setOperationAction(ISD::FLOG10, VT, Expand); - setOperationAction(ISD::FEXP , VT, Expand); - setOperationAction(ISD::FEXP2, VT, Expand); - setOperationAction(ISD::FFLOOR, VT, Expand); - setOperationAction(ISD::FNEARBYINT, VT, Expand); - setOperationAction(ISD::FCEIL, VT, Expand); - setOperationAction(ISD::FRINT, VT, Expand); - setOperationAction(ISD::FTRUNC, VT, Expand); - setOperationAction(ISD::FROUND, VT, Expand); - setOperationAction(ISD::FROUNDEVEN, VT, Expand); - setOperationAction(ISD::LROUND, VT, Expand); - setOperationAction(ISD::LLROUND, VT, Expand); - setOperationAction(ISD::LRINT, VT, Expand); - setOperationAction(ISD::LLRINT, VT, Expand); - } + setOperationAction({ISD::FCBRT, ISD::FLOG, ISD::FLOG2, ISD::FLOG10, ISD::FEXP, + ISD::FEXP2, ISD::FEXP10, ISD::FFLOOR, ISD::FNEARBYINT, + ISD::FCEIL, ISD::FRINT, ISD::FTRUNC, ISD::LROUND, + ISD::LLROUND, ISD::LRINT, ISD::LLRINT, ISD::FROUNDEVEN}, + {MVT::f32, MVT::f64, MVT::f128}, Expand); // Default ISD::TRAP to expand (which turns it into abort). setOperationAction(ISD::TRAP, MVT::Other, Expand); @@ -895,6 +946,15 @@ void TargetLoweringBase::initActions() { setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand); setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand); + + setOperationAction(ISD::GET_FPENV_MEM, MVT::Other, Expand); + setOperationAction(ISD::SET_FPENV_MEM, MVT::Other, Expand); + + for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) { + setOperationAction(ISD::GET_FPMODE, VT, Expand); + setOperationAction(ISD::SET_FPMODE, VT, Expand); + } + setOperationAction(ISD::RESET_FPMODE, MVT::Other, Expand); } MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL, @@ -907,8 +967,15 @@ EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy, const DataLayout &DL, assert(LHSTy.isInteger() && "Shift amount is not an integer type!"); if (LHSTy.isVector()) return LHSTy; - return LegalTypes ? getScalarShiftAmountTy(DL, LHSTy) - : getPointerTy(DL); + MVT ShiftVT = + LegalTypes ? getScalarShiftAmountTy(DL, LHSTy) : getPointerTy(DL); + // If any possible shift value won't fit in the prefered type, just use + // something safe. Assume it will be legalized when the shift is expanded. + if (ShiftVT.getSizeInBits() < Log2_32_Ceil(LHSTy.getSizeInBits())) + ShiftVT = MVT::i32; + assert(ShiftVT.getSizeInBits() >= Log2_32_Ceil(LHSTy.getSizeInBits()) && + "ShiftVT is still too small!"); + return ShiftVT; } bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const { @@ -940,7 +1007,7 @@ TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const { // If this is a simple type, use the ComputeRegisterProp mechanism. if (VT.isSimple()) { MVT SVT = VT.getSimpleVT(); - assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType)); + assert((unsigned)SVT.SimpleTy < std::size(TransformToType)); MVT NVT = TransformToType[SVT.SimpleTy]; LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT); @@ -985,9 +1052,6 @@ TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const { if (NumElts.isScalar()) return LegalizeKind(TypeScalarizeVector, EltVT); - if (VT.getVectorElementCount() == ElementCount::getScalable(1)) - report_fatal_error("Cannot legalize this vector"); - // Try to widen vector elements until the element type is a power of two and // promote it to a legal type later on, for example: // <3 x i8> -> <4 x i8> -> <4 x i32> @@ -1005,9 +1069,12 @@ TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const { // If type is to be expanded, split the vector. // <4 x i140> -> <2 x i140> - if (LK.first == TypeExpandInteger) + if (LK.first == TypeExpandInteger) { + if (VT.getVectorElementCount().isScalable()) + return LegalizeKind(TypeScalarizeScalableVector, EltVT); return LegalizeKind(TypeSplitVector, VT.getHalfNumVectorElementsVT(Context)); + } // Promote the integer element types until a legal vector type is found // or until the element integer type is too big. If a legal type was not @@ -1066,6 +1133,9 @@ TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const { return LegalizeKind(TypeWidenVector, NVT); } + if (VT.getVectorElementCount() == ElementCount::getScalable(1)) + return LegalizeKind(TypeScalarizeScalableVector, EltVT); + // Vectors with illegal element types are expanded. EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorElementCount().divideCoefficientBy(2)); @@ -1115,8 +1185,7 @@ static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, unsigned LaneSizeInBits = NewVT.getScalarSizeInBits(); // Convert sizes such as i33 to i64. - if (!isPowerOf2_32(LaneSizeInBits)) - LaneSizeInBits = NextPowerOf2(LaneSizeInBits); + LaneSizeInBits = llvm::bit_ceil(LaneSizeInBits); MVT DestVT = TLI->getRegisterType(NewVT); RegisterVT = DestVT; @@ -1132,7 +1201,7 @@ static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, /// specified register class are all legal. bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC) const { - for (auto I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I) + for (const auto *I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I) if (isTypeLegal(*I)) return true; return false; @@ -1159,7 +1228,7 @@ TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI, // all stack slots), but we need to handle the different type of stackmap // operands and memory effects here. - if (!llvm::any_of(MI->operands(), + if (llvm::none_of(MI->operands(), [](MachineOperand &Operand) { return Operand.isFI(); })) return MBB; @@ -1263,11 +1332,11 @@ TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI, /// this allows us to compute derived properties we expose. void TargetLoweringBase::computeRegisterProperties( const TargetRegisterInfo *TRI) { - static_assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE, + static_assert(MVT::VALUETYPE_SIZE <= MVT::MAX_ALLOWED_VALUETYPE, "Too many value types for ValueTypeActions to hold!"); // Everything defaults to needing one register. - for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { + for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) { NumRegistersForVT[i] = 1; RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i; } @@ -1329,6 +1398,15 @@ void TargetLoweringBase::computeRegisterProperties( ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat); } + // Decide how to handle f80. If the target does not have native f80 support, + // expand it to i96 and we will be generating soft float library calls. + if (!isTypeLegal(MVT::f80)) { + NumRegistersForVT[MVT::f80] = 3*NumRegistersForVT[MVT::i32]; + RegisterTypeForVT[MVT::f80] = RegisterTypeForVT[MVT::i32]; + TransformToType[MVT::f80] = MVT::i32; + ValueTypeActions.setTypeAction(MVT::f80, TypeSoftenFloat); + } + // Decide how to handle f64. If the target does not have native f64 support, // expand it to i64 and we will be generating soft float library calls. if (!isTypeLegal(MVT::f64)) { @@ -1365,6 +1443,16 @@ void TargetLoweringBase::computeRegisterProperties( } } + // Decide how to handle bf16. If the target does not have native bf16 support, + // promote it to f32, because there are no bf16 library calls (except for + // converting from f32 to bf16). + if (!isTypeLegal(MVT::bf16)) { + NumRegistersForVT[MVT::bf16] = NumRegistersForVT[MVT::f32]; + RegisterTypeForVT[MVT::bf16] = RegisterTypeForVT[MVT::f32]; + TransformToType[MVT::bf16] = MVT::f32; + ValueTypeActions.setTypeAction(MVT::bf16, TypeSoftPromoteHalf); + } + // Loop over all of the vector value types to see which need transformations. for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE; i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { @@ -1401,7 +1489,7 @@ void TargetLoweringBase::computeRegisterProperties( } if (IsLegalWiderType) break; - LLVM_FALLTHROUGH; + [[fallthrough]]; } case TypeWidenVector: @@ -1435,7 +1523,7 @@ void TargetLoweringBase::computeRegisterProperties( break; } } - LLVM_FALLTHROUGH; + [[fallthrough]]; case TypeSplitVector: case TypeScalarizeVector: { @@ -1479,7 +1567,7 @@ void TargetLoweringBase::computeRegisterProperties( // not a sub-register class / subreg register class) legal register class for // a group of value types. For example, on i386, i8, i16, and i32 // representative would be GR32; while on x86_64 it's GR64. - for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { + for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) { const TargetRegisterClass* RRC; uint8_t Cost; std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i); @@ -1506,10 +1594,10 @@ MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const { /// This method returns the number of registers needed, and the VT for each /// register. It also returns the VT and quantity of the intermediate values /// before they are promoted/expanded. -unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT, - EVT &IntermediateVT, - unsigned &NumIntermediates, - MVT &RegisterVT) const { +unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, + EVT VT, EVT &IntermediateVT, + unsigned &NumIntermediates, + MVT &RegisterVT) const { ElementCount EltCnt = VT.getVectorElementCount(); // If there is a wider vector type with the same element type as this one, @@ -1518,7 +1606,7 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT // This handles things like <2 x float> -> <4 x float> and // <4 x i1> -> <4 x i32>. LegalizeTypeAction TA = getTypeAction(Context, VT); - if (EltCnt.getKnownMinValue() != 1 && + if (!EltCnt.isScalar() && (TA == TypeWidenVector || TA == TypePromoteInteger)) { EVT RegisterEVT = getTypeToTransformTo(Context, VT); if (isTypeLegal(RegisterEVT)) { @@ -1536,7 +1624,7 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT // Scalable vectors cannot be scalarized, so handle the legalisation of the // types like done elsewhere in SelectionDAG. - if (VT.isScalableVector() && !isPowerOf2_32(EltCnt.getKnownMinValue())) { + if (EltCnt.isScalable()) { LegalizeKind LK; EVT PartVT = VT; do { @@ -1545,16 +1633,14 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT PartVT = LK.second; } while (LK.first != TypeLegal); - NumIntermediates = VT.getVectorElementCount().getKnownMinValue() / - PartVT.getVectorElementCount().getKnownMinValue(); + if (!PartVT.isVector()) { + report_fatal_error( + "Don't know how to legalize this scalable vector type"); + } - // FIXME: This code needs to be extended to handle more complex vector - // breakdowns, like nxv7i64 -> nxv8i64 -> 4 x nxv2i64. Currently the only - // supported cases are vectors that are broken down into equal parts - // such as nxv6i64 -> 3 x nxv2i64. - assert((PartVT.getVectorElementCount() * NumIntermediates) == - VT.getVectorElementCount() && - "Expected an integer multiple of PartVT"); + NumIntermediates = + divideCeil(VT.getVectorElementCount().getKnownMinValue(), + PartVT.getVectorElementCount().getKnownMinValue()); IntermediateVT = PartVT; RegisterVT = getRegisterType(Context, IntermediateVT); return NumIntermediates; @@ -1588,7 +1674,7 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT if (EVT(DestVT).bitsLT(NewVT)) { // Value is expanded, e.g. i64 -> i16. TypeSize NewVTSize = NewVT.getSizeInBits(); // Convert sizes such as i33 to i64. - if (!isPowerOf2_32(NewVTSize.getKnownMinSize())) + if (!llvm::has_single_bit<uint32_t>(NewVTSize.getKnownMinValue())) NewVTSize = NewVTSize.coefficientNextPowerOf2(); return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits()); } @@ -1620,6 +1706,11 @@ bool TargetLoweringBase::isSuitableForJumpTable(const SwitchInst *SI, (NumCases * 100 >= Range * MinDensity); } +MVT TargetLoweringBase::getPreferredSwitchConditionType(LLVMContext &Context, + EVT ConditionVT) const { + return getRegisterType(Context, ConditionVT); +} + /// Get the EVTs and ArgFlags collections that represent the legalized return /// type of the given function. This does not require a DAG or a return value, /// and is suitable for use before any DAGs for the function are constructed. @@ -1637,9 +1728,9 @@ void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType, EVT VT = ValueVTs[j]; ISD::NodeType ExtendKind = ISD::ANY_EXTEND; - if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt)) + if (attr.hasRetAttr(Attribute::SExt)) ExtendKind = ISD::SIGN_EXTEND; - else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt)) + else if (attr.hasRetAttr(Attribute::ZExt)) ExtendKind = ISD::ZERO_EXTEND; // FIXME: C calling convention requires the return type to be promoted to @@ -1647,7 +1738,7 @@ void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType, // conventions. The frontend should mark functions whose return values // require promoting with signext or zeroext attributes. if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) { - MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32); + MVT MinVT = TLI.getRegisterType(MVT::i32); if (VT.bitsLT(MinVT)) VT = MinVT; } @@ -1659,13 +1750,13 @@ void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType, // 'inreg' on function refers to return value ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); - if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::InReg)) + if (attr.hasRetAttr(Attribute::InReg)) Flags.setInReg(); // Propagate extension type if any - if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt)) + if (attr.hasRetAttr(Attribute::SExt)) Flags.setSExt(); - else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt)) + else if (attr.hasRetAttr(Attribute::ZExt)) Flags.setZExt(); for (unsigned i = 0; i < NumParts; ++i) @@ -1676,35 +1767,34 @@ void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType, /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate /// function arguments in the caller parameter area. This is the actual /// alignment, not its logarithm. -unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty, +uint64_t TargetLoweringBase::getByValTypeAlignment(Type *Ty, const DataLayout &DL) const { return DL.getABITypeAlign(Ty).value(); } bool TargetLoweringBase::allowsMemoryAccessForAlignment( LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, - Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const { + Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const { // Check if the specified alignment is sufficient based on the data layout. // TODO: While using the data layout works in practice, a better solution // would be to implement this check directly (make this a virtual function). // For example, the ABI alignment may change based on software platform while // this function should only be affected by hardware implementation. Type *Ty = VT.getTypeForEVT(Context); - if (Alignment >= DL.getABITypeAlign(Ty)) { + if (VT.isZeroSized() || Alignment >= DL.getABITypeAlign(Ty)) { // Assume that an access that meets the ABI-specified alignment is fast. if (Fast != nullptr) - *Fast = true; + *Fast = 1; return true; } // This is a misaligned access. - return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment.value(), Flags, - Fast); + return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast); } bool TargetLoweringBase::allowsMemoryAccessForAlignment( LLVMContext &Context, const DataLayout &DL, EVT VT, - const MachineMemOperand &MMO, bool *Fast) const { + const MachineMemOperand &MMO, unsigned *Fast) const { return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(), MMO.getFlags(), Fast); } @@ -1713,7 +1803,7 @@ bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, - bool *Fast) const { + unsigned *Fast) const { return allowsMemoryAccessForAlignment(Context, DL, VT, AddrSpace, Alignment, Flags, Fast); } @@ -1721,7 +1811,7 @@ bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, const MachineMemOperand &MMO, - bool *Fast) const { + unsigned *Fast) const { return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(), MMO.getFlags(), Fast); } @@ -1729,13 +1819,10 @@ bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, LLT Ty, const MachineMemOperand &MMO, - bool *Fast) const { - return allowsMemoryAccess(Context, DL, getMVTForLLT(Ty), MMO.getAddrSpace(), - MMO.getAlign(), MMO.getFlags(), Fast); -} - -BranchProbability TargetLoweringBase::getPredictableBranchThreshold() const { - return BranchProbability(MinPercentageForPredictableBranch, 100); + unsigned *Fast) const { + EVT VT = getApproximateEVTForLLT(Ty, DL, Context); + return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(), + MMO.getFlags(), Fast); } //===----------------------------------------------------------------------===// @@ -1821,36 +1908,9 @@ int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const { llvm_unreachable("Unknown instruction type encountered!"); } -std::pair<int, MVT> -TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL, - Type *Ty) const { - LLVMContext &C = Ty->getContext(); - EVT MTy = getValueType(DL, Ty); - - int Cost = 1; - // We keep legalizing the type until we find a legal kind. We assume that - // the only operation that costs anything is the split. After splitting - // we need to handle two types. - while (true) { - LegalizeKind LK = getTypeConversion(C, MTy); - - if (LK.first == TypeLegal) - return std::make_pair(Cost, MTy.getSimpleVT()); - - if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger) - Cost *= 2; - - // Do not loop with f128 type. - if (MTy == LK.second) - return std::make_pair(Cost, MTy.getSimpleVT()); - - // Keep legalizing the type. - MTy = LK.second; - } -} - -Value *TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilder<> &IRB, - bool UseTLS) const { +Value * +TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilderBase &IRB, + bool UseTLS) const { // compiler-rt provides a variable with a magic name. Targets that do not // link with compiler-rt may also provide such a variable. Module *M = IRB.GetInsertBlock()->getParent()->getParent(); @@ -1858,7 +1918,7 @@ Value *TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilder<> &IRB, auto UnsafeStackPtr = dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar)); - Type *StackPtrTy = Type::getInt8PtrTy(M->getContext()); + Type *StackPtrTy = PointerType::getUnqual(M->getContext()); if (!UnsafeStackPtr) { auto TLSModel = UseTLS ? @@ -1881,16 +1941,17 @@ Value *TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilder<> &IRB, return UnsafeStackPtr; } -Value *TargetLoweringBase::getSafeStackPointerLocation(IRBuilder<> &IRB) const { +Value * +TargetLoweringBase::getSafeStackPointerLocation(IRBuilderBase &IRB) const { if (!TM.getTargetTriple().isAndroid()) return getDefaultSafeStackPointerLocation(IRB, true); // Android provides a libc function to retrieve the address of the current // thread's unsafe stack pointer. Module *M = IRB.GetInsertBlock()->getParent()->getParent(); - Type *StackPtrTy = Type::getInt8PtrTy(M->getContext()); - FunctionCallee Fn = M->getOrInsertFunction("__safestack_pointer_address", - StackPtrTy->getPointerTo(0)); + auto *PtrTy = PointerType::getUnqual(M->getContext()); + FunctionCallee Fn = + M->getOrInsertFunction("__safestack_pointer_address", PtrTy); return IRB.CreateCall(Fn); } @@ -1941,10 +2002,10 @@ bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL, // For OpenBSD return its special guard variable. Otherwise return nullptr, // so that SelectionDAG handle SSP. -Value *TargetLoweringBase::getIRStackGuard(IRBuilder<> &IRB) const { +Value *TargetLoweringBase::getIRStackGuard(IRBuilderBase &IRB) const { if (getTargetMachine().getTargetTriple().isOSOpenBSD()) { Module &M = *IRB.GetInsertBlock()->getParent()->getParent(); - PointerType *PtrTy = Type::getInt8PtrTy(M.getContext()); + PointerType *PtrTy = PointerType::getUnqual(M.getContext()); Constant *C = M.getOrInsertGlobal("__guard_local", PtrTy); if (GlobalVariable *G = dyn_cast_or_null<GlobalVariable>(C)) G->setVisibility(GlobalValue::HiddenVisibility); @@ -1957,11 +2018,16 @@ Value *TargetLoweringBase::getIRStackGuard(IRBuilder<> &IRB) const { // TODO: add LOAD_STACK_GUARD support. void TargetLoweringBase::insertSSPDeclarations(Module &M) const { if (!M.getNamedValue("__stack_chk_guard")) { - auto *GV = new GlobalVariable(M, Type::getInt8PtrTy(M.getContext()), false, - GlobalVariable::ExternalLinkage, nullptr, - "__stack_chk_guard"); - if (TM.getRelocationModel() == Reloc::Static && - !TM.getTargetTriple().isWindowsGNUEnvironment()) + auto *GV = new GlobalVariable(M, PointerType::getUnqual(M.getContext()), + false, GlobalVariable::ExternalLinkage, + nullptr, "__stack_chk_guard"); + + // FreeBSD has "__stack_chk_guard" defined externally on libc.so + if (M.getDirectAccessExternalData() && + !TM.getTargetTriple().isWindowsGNUEnvironment() && + !(TM.getTargetTriple().isPPC64() && TM.getTargetTriple().isOSFreeBSD()) && + (!TM.getTargetTriple().isOSDarwin() || + TM.getRelocationModel() == Reloc::Static)) GV->setDSOLocal(true); } } @@ -2000,6 +2066,17 @@ bool TargetLoweringBase::isJumpTableRelative() const { return getTargetMachine().isPositionIndependent(); } +Align TargetLoweringBase::getPrefLoopAlignment(MachineLoop *ML) const { + if (TM.Options.LoopAlignment) + return Align(TM.Options.LoopAlignment); + return PrefLoopAlignment; +} + +unsigned TargetLoweringBase::getMaxPermittedBytesForAlignment( + MachineBasicBlock *MBB) const { + return MaxBytesForAlignment; +} + //===----------------------------------------------------------------------===// // Reciprocal Estimates //===----------------------------------------------------------------------===// @@ -2020,9 +2097,11 @@ static std::string getReciprocalOpName(bool IsSqrt, EVT VT) { Name += IsSqrt ? "sqrt" : "div"; - // TODO: Handle "half" or other float types? + // TODO: Handle other float types? if (VT.getScalarType() == MVT::f64) { Name += "d"; + } else if (VT.getScalarType() == MVT::f16) { + Name += "h"; } else { assert(VT.getScalarType() == MVT::f32 && "Unexpected FP type for reciprocal estimate"); @@ -2184,13 +2263,41 @@ int TargetLoweringBase::getDivRefinementSteps(EVT VT, return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF)); } +bool TargetLoweringBase::isLoadBitCastBeneficial( + EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG, + const MachineMemOperand &MMO) const { + // Single-element vectors are scalarized, so we should generally avoid having + // any memory operations on such types, as they would get scalarized too. + if (LoadVT.isFixedLengthVector() && BitcastVT.isFixedLengthVector() && + BitcastVT.getVectorNumElements() == 1) + return false; + + // Don't do if we could do an indexed load on the original type, but not on + // the new one. + if (!LoadVT.isSimple() || !BitcastVT.isSimple()) + return true; + + MVT LoadMVT = LoadVT.getSimpleVT(); + + // Don't bother doing this if it's just going to be promoted again later, as + // doing so might interfere with other combines. + if (getOperationAction(ISD::LOAD, LoadMVT) == Promote && + getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT()) + return false; + + unsigned Fast = 0; + return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), BitcastVT, + MMO, &Fast) && + Fast; +} + void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const { MF.getRegInfo().freezeReservedRegs(MF); } -MachineMemOperand::Flags -TargetLoweringBase::getLoadMemOperandFlags(const LoadInst &LI, - const DataLayout &DL) const { +MachineMemOperand::Flags TargetLoweringBase::getLoadMemOperandFlags( + const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC, + const TargetLibraryInfo *LibInfo) const { MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad; if (LI.isVolatile()) Flags |= MachineMemOperand::MOVolatile; @@ -2201,7 +2308,9 @@ TargetLoweringBase::getLoadMemOperandFlags(const LoadInst &LI, if (LI.hasMetadata(LLVMContext::MD_invariant_load)) Flags |= MachineMemOperand::MOInvariant; - if (isDereferenceablePointer(LI.getPointerOperand(), LI.getType(), DL)) + if (isDereferenceableAndAlignedPointer(LI.getPointerOperand(), LI.getType(), + LI.getAlign(), DL, &LI, AC, + /*DT=*/nullptr, LibInfo)) Flags |= MachineMemOperand::MODereferenceable; Flags |= getTargetMMOFlags(LI); @@ -2243,6 +2352,24 @@ TargetLoweringBase::getAtomicMemOperandFlags(const Instruction &AI, return Flags; } +Instruction *TargetLoweringBase::emitLeadingFence(IRBuilderBase &Builder, + Instruction *Inst, + AtomicOrdering Ord) const { + if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore()) + return Builder.CreateFence(Ord); + else + return nullptr; +} + +Instruction *TargetLoweringBase::emitTrailingFence(IRBuilderBase &Builder, + Instruction *Inst, + AtomicOrdering Ord) const { + if (isAcquireOrStronger(Ord)) + return Builder.CreateFence(Ord); + else + return nullptr; +} + //===----------------------------------------------------------------------===// // GlobalISel Hooks //===----------------------------------------------------------------------===// @@ -2260,7 +2387,7 @@ bool TargetLoweringBase::shouldLocalize(const MachineInstr &MI, auto maxUses = [](unsigned RematCost) { // A cost of 1 means remats are basically free. if (RematCost == 1) - return UINT_MAX; + return std::numeric_limits<unsigned>::max(); if (RematCost == 2) return 2U; @@ -2270,18 +2397,6 @@ bool TargetLoweringBase::shouldLocalize(const MachineInstr &MI, llvm_unreachable("Unexpected remat cost"); }; - // Helper to walk through uses and terminate if we've reached a limit. Saves - // us spending time traversing uses if all we want to know is if it's >= min. - auto isUsesAtMost = [&](unsigned Reg, unsigned MaxUses) { - unsigned NumUses = 0; - auto UI = MRI.use_instr_nodbg_begin(Reg), UE = MRI.use_instr_nodbg_end(); - for (; UI != UE && NumUses < MaxUses; ++UI) { - NumUses++; - } - // If we haven't reached the end yet then there are more than MaxUses users. - return UI == UE; - }; - switch (MI.getOpcode()) { default: return false; @@ -2298,8 +2413,7 @@ bool TargetLoweringBase::shouldLocalize(const MachineInstr &MI, unsigned MaxUses = maxUses(RematCost); if (MaxUses == UINT_MAX) return true; // Remats are "free" so always localize. - bool B = isUsesAtMost(Reg, MaxUses); - return B; + return MRI.hasAtMostUserInstrs(Reg, MaxUses); } } } |