aboutsummaryrefslogtreecommitdiff
path: root/include/llvm/Target
diff options
context:
space:
mode:
Diffstat (limited to 'include/llvm/Target')
-rw-r--r--include/llvm/Target/Mangler.h9
-rw-r--r--include/llvm/Target/Target.td104
-rw-r--r--include/llvm/Target/TargetCallingConv.h27
-rw-r--r--include/llvm/Target/TargetData.h363
-rw-r--r--include/llvm/Target/TargetELFWriterInfo.h121
-rw-r--r--include/llvm/Target/TargetInstrInfo.h37
-rw-r--r--include/llvm/Target/TargetIntrinsicInfo.h5
-rw-r--r--include/llvm/Target/TargetLibraryInfo.h103
-rw-r--r--include/llvm/Target/TargetLowering.h131
-rw-r--r--include/llvm/Target/TargetLoweringObjectFile.h9
-rw-r--r--include/llvm/Target/TargetMachine.h20
-rw-r--r--include/llvm/Target/TargetOpcodes.h6
-rw-r--r--include/llvm/Target/TargetOptions.h4
-rw-r--r--include/llvm/Target/TargetRegisterInfo.h62
-rw-r--r--include/llvm/Target/TargetSchedule.td340
-rw-r--r--include/llvm/Target/TargetSelectionDAG.td4
-rw-r--r--include/llvm/Target/TargetSelectionDAGInfo.h10
-rw-r--r--include/llvm/Target/TargetSubtargetInfo.h23
-rw-r--r--include/llvm/Target/TargetTransformImpl.h98
19 files changed, 858 insertions, 618 deletions
diff --git a/include/llvm/Target/Mangler.h b/include/llvm/Target/Mangler.h
index d5e165e58b91..a50f54a436e9 100644
--- a/include/llvm/Target/Mangler.h
+++ b/include/llvm/Target/Mangler.h
@@ -22,7 +22,7 @@ class GlobalValue;
template <typename T> class SmallVectorImpl;
class MCContext;
class MCSymbol;
-class TargetData;
+class DataLayout;
class Mangler {
public:
@@ -34,7 +34,7 @@ public:
private:
MCContext &Context;
- const TargetData &TD;
+ const DataLayout &TD;
/// AnonGlobalIDs - We need to give global values the same name every time
/// they are mangled. This keeps track of the number we give to anonymous
@@ -47,20 +47,19 @@ private:
unsigned NextAnonGlobalID;
public:
- Mangler(MCContext &context, const TargetData &td)
+ Mangler(MCContext &context, const DataLayout &td)
: Context(context), TD(td), NextAnonGlobalID(1) {}
/// getSymbol - Return the MCSymbol for the specified global value. This
/// symbol is the main label that is the address of the global.
MCSymbol *getSymbol(const GlobalValue *GV);
-
/// getNameWithPrefix - Fill OutName with the name of the appropriate prefix
/// and the specified global variable's name. If the global variable doesn't
/// have a name, this fills in a unique name for the global.
void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
bool isImplicitlyPrivate);
-
+
/// getNameWithPrefix - Fill OutName with the name of the appropriate prefix
/// and the specified name as the global variable name. GVName must not be
/// empty.
diff --git a/include/llvm/Target/Target.td b/include/llvm/Target/Target.td
index 1816445579ed..12f5c0eb306a 100644
--- a/include/llvm/Target/Target.td
+++ b/include/llvm/Target/Target.td
@@ -343,8 +343,8 @@ class Instruction {
bit isBarrier = 0; // Can control flow fall through this instruction?
bit isCall = 0; // Is this instruction a call instruction?
bit canFoldAsLoad = 0; // Can this be folded as a simple memory operand?
- bit mayLoad = 0; // Is it possible for this inst to read memory?
- bit mayStore = 0; // Is it possible for this inst to write memory?
+ bit mayLoad = ?; // Is it possible for this inst to read memory?
+ bit mayStore = ?; // Is it possible for this inst to write memory?
bit isConvertibleToThreeAddress = 0; // Can this 2-addr instruction promote?
bit isCommutable = 0; // Is this 3 operand instruction commutable?
bit isTerminator = 0; // Is this part of the terminator for a basic block?
@@ -369,7 +369,7 @@ class Instruction {
//
// neverHasSideEffects - Set on an instruction with no pattern if it has no
// side effects.
- bit hasSideEffects = 0;
+ bit hasSideEffects = ?;
bit neverHasSideEffects = 0;
// Is this instruction a "real" instruction (with a distinct machine
@@ -495,7 +495,8 @@ def ptr_rc : PointerLikeRegClass<0>;
/// unknown definition - Mark this operand as being of unknown type, causing
/// it to be resolved by inference in the context it is used.
-def unknown;
+class unknown_class;
+def unknown : unknown_class;
/// AsmOperandClass - Representation for the kinds of operands which the target
/// specific parser can create and the assembly matcher may need to distinguish.
@@ -602,23 +603,31 @@ def f64imm : Operand<f64>;
///
def zero_reg;
+/// OperandWithDefaultOps - This Operand class can be used as the parent class
+/// for an Operand that needs to be initialized with a default value if
+/// no value is supplied in a pattern. This class can be used to simplify the
+/// pattern definitions for instructions that have target specific flags
+/// encoded as immediate operands.
+class OperandWithDefaultOps<ValueType ty, dag defaultops>
+ : Operand<ty> {
+ dag DefaultOps = defaultops;
+}
+
/// PredicateOperand - This can be used to define a predicate operand for an
/// instruction. OpTypes specifies the MIOperandInfo for the operand, and
/// AlwaysVal specifies the value of this predicate when set to "always
/// execute".
class PredicateOperand<ValueType ty, dag OpTypes, dag AlwaysVal>
- : Operand<ty> {
+ : OperandWithDefaultOps<ty, AlwaysVal> {
let MIOperandInfo = OpTypes;
- dag DefaultOps = AlwaysVal;
}
/// OptionalDefOperand - This is used to define a optional definition operand
/// for an instruction. DefaultOps is the register the operand represents if
/// none is supplied, e.g. zero_reg.
class OptionalDefOperand<ValueType ty, dag OpTypes, dag defaultops>
- : Operand<ty> {
+ : OperandWithDefaultOps<ty, defaultops> {
let MIOperandInfo = OpTypes;
- dag DefaultOps = defaultops;
}
@@ -631,6 +640,17 @@ class InstrInfo {
// Sparc manual specifies its instructions in the format [31..0] (big), while
// PowerPC specifies them using the format [0..31] (little).
bit isLittleEndianEncoding = 0;
+
+ // The instruction properties mayLoad, mayStore, and hasSideEffects are unset
+ // by default, and TableGen will infer their value from the instruction
+ // pattern when possible.
+ //
+ // Normally, TableGen will issue an error it it can't infer the value of a
+ // property that hasn't been set explicitly. When guessInstructionProperties
+ // is set, it will guess a safe value instead.
+ //
+ // This option is a temporary migration help. It will go away.
+ bit guessInstructionProperties = 1;
}
// Standard Pseudo Instructions.
@@ -734,6 +754,18 @@ def BUNDLE : Instruction {
let InOperandList = (ins variable_ops);
let AsmString = "BUNDLE";
}
+def LIFETIME_START : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins i32imm:$id);
+ let AsmString = "LIFETIME_START";
+ let neverHasSideEffects = 1;
+}
+def LIFETIME_END : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins i32imm:$id);
+ let AsmString = "LIFETIME_END";
+ let neverHasSideEffects = 1;
+}
}
//===----------------------------------------------------------------------===//
@@ -753,6 +785,10 @@ class AsmParser {
// function of the AsmParser class to call on every matched instruction.
// This can be used to perform target specific instruction post-processing.
string AsmParserInstCleanup = "";
+
+ //ShouldEmitMatchRegisterName - Set to false if the target needs a hand
+ //written register name matcher
+ bit ShouldEmitMatchRegisterName = 1;
}
def DefaultAsmParser : AsmParser;
@@ -953,12 +989,64 @@ class Processor<string n, ProcessorItineraries pi, list<SubtargetFeature> f> {
// ProcessorModel allows subtargets to specify the more general
// SchedMachineModel instead if a ProcessorItinerary. Subtargets will
// gradually move to this newer form.
+//
+// Although this class always passes NoItineraries to the Processor
+// class, the SchedMachineModel may still define valid Itineraries.
class ProcessorModel<string n, SchedMachineModel m, list<SubtargetFeature> f>
: Processor<n, NoItineraries, f> {
let SchedModel = m;
}
//===----------------------------------------------------------------------===//
+// InstrMapping - This class is used to create mapping tables to relate
+// instructions with each other based on the values specified in RowFields,
+// ColFields, KeyCol and ValueCols.
+//
+class InstrMapping {
+ // FilterClass - Used to limit search space only to the instructions that
+ // define the relationship modeled by this InstrMapping record.
+ string FilterClass;
+
+ // RowFields - List of fields/attributes that should be same for all the
+ // instructions in a row of the relation table. Think of this as a set of
+ // properties shared by all the instructions related by this relationship
+ // model and is used to categorize instructions into subgroups. For instance,
+ // if we want to define a relation that maps 'Add' instruction to its
+ // predicated forms, we can define RowFields like this:
+ //
+ // let RowFields = BaseOp
+ // All add instruction predicated/non-predicated will have to set their BaseOp
+ // to the same value.
+ //
+ // def Add: { let BaseOp = 'ADD'; let predSense = 'nopred' }
+ // def Add_predtrue: { let BaseOp = 'ADD'; let predSense = 'true' }
+ // def Add_predfalse: { let BaseOp = 'ADD'; let predSense = 'false' }
+ list<string> RowFields = [];
+
+ // List of fields/attributes that are same for all the instructions
+ // in a column of the relation table.
+ // Ex: let ColFields = 'predSense' -- It means that the columns are arranged
+ // based on the 'predSense' values. All the instruction in a specific
+ // column have the same value and it is fixed for the column according
+ // to the values set in 'ValueCols'.
+ list<string> ColFields = [];
+
+ // Values for the fields/attributes listed in 'ColFields'.
+ // Ex: let KeyCol = 'nopred' -- It means that the key instruction (instruction
+ // that models this relation) should be non-predicated.
+ // In the example above, 'Add' is the key instruction.
+ list<string> KeyCol = [];
+
+ // List of values for the fields/attributes listed in 'ColFields', one for
+ // each column in the relation table.
+ //
+ // Ex: let ValueCols = [['true'],['false']] -- It adds two columns in the
+ // table. First column requires all the instructions to have predSense
+ // set to 'true' and second column requires it to be 'false'.
+ list<list<string> > ValueCols = [];
+}
+
+//===----------------------------------------------------------------------===//
// Pull in the common support for calling conventions.
//
include "llvm/Target/TargetCallingConv.td"
diff --git a/include/llvm/Target/TargetCallingConv.h b/include/llvm/Target/TargetCallingConv.h
index f8cebefb0eae..2160e371bda9 100644
--- a/include/llvm/Target/TargetCallingConv.h
+++ b/include/llvm/Target/TargetCallingConv.h
@@ -113,9 +113,18 @@ namespace ISD {
MVT VT;
bool Used;
+ /// Index original Function's argument.
+ unsigned OrigArgIndex;
+
+ /// Offset in bytes of current input value relative to the beginning of
+ /// original argument. E.g. if argument was splitted into four 32 bit
+ /// registers, we got 4 InputArgs with PartOffsets 0, 4, 8 and 12.
+ unsigned PartOffset;
+
InputArg() : VT(MVT::Other), Used(false) {}
- InputArg(ArgFlagsTy flags, EVT vt, bool used)
- : Flags(flags), Used(used) {
+ InputArg(ArgFlagsTy flags, EVT vt, bool used,
+ unsigned origIdx, unsigned partOffs)
+ : Flags(flags), Used(used), OrigArgIndex(origIdx), PartOffset(partOffs) {
VT = vt.getSimpleVT();
}
};
@@ -131,9 +140,19 @@ namespace ISD {
/// IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
bool IsFixed;
+ /// Index original Function's argument.
+ unsigned OrigArgIndex;
+
+ /// Offset in bytes of current output value relative to the beginning of
+ /// original argument. E.g. if argument was splitted into four 32 bit
+ /// registers, we got 4 OutputArgs with PartOffsets 0, 4, 8 and 12.
+ unsigned PartOffset;
+
OutputArg() : IsFixed(false) {}
- OutputArg(ArgFlagsTy flags, EVT vt, bool isfixed)
- : Flags(flags), IsFixed(isfixed) {
+ OutputArg(ArgFlagsTy flags, EVT vt, bool isfixed,
+ unsigned origIdx, unsigned partOffs)
+ : Flags(flags), IsFixed(isfixed), OrigArgIndex(origIdx),
+ PartOffset(partOffs) {
VT = vt.getSimpleVT();
}
};
diff --git a/include/llvm/Target/TargetData.h b/include/llvm/Target/TargetData.h
deleted file mode 100644
index 4f94ab751cb6..000000000000
--- a/include/llvm/Target/TargetData.h
+++ /dev/null
@@ -1,363 +0,0 @@
-//===-- llvm/Target/TargetData.h - Data size & alignment info ---*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines target properties related to datatype size/offset/alignment
-// information. It uses lazy annotations to cache information about how
-// structure types are laid out and used.
-//
-// This structure should be created once, filled in if the defaults are not
-// correct and then passed around by const&. None of the members functions
-// require modification to the object.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_TARGETDATA_H
-#define LLVM_TARGET_TARGETDATA_H
-
-#include "llvm/Pass.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/DataTypes.h"
-
-namespace llvm {
-
-class Value;
-class Type;
-class IntegerType;
-class StructType;
-class StructLayout;
-class GlobalVariable;
-class LLVMContext;
-template<typename T>
-class ArrayRef;
-
-/// Enum used to categorize the alignment types stored by TargetAlignElem
-enum AlignTypeEnum {
- INTEGER_ALIGN = 'i', ///< Integer type alignment
- VECTOR_ALIGN = 'v', ///< Vector type alignment
- FLOAT_ALIGN = 'f', ///< Floating point type alignment
- AGGREGATE_ALIGN = 'a', ///< Aggregate alignment
- STACK_ALIGN = 's' ///< Stack objects alignment
-};
-
-/// Target alignment element.
-///
-/// Stores the alignment data associated with a given alignment type (pointer,
-/// integer, vector, float) and type bit width.
-///
-/// @note The unusual order of elements in the structure attempts to reduce
-/// padding and make the structure slightly more cache friendly.
-struct TargetAlignElem {
- AlignTypeEnum AlignType : 8; ///< Alignment type (AlignTypeEnum)
- unsigned ABIAlign; ///< ABI alignment for this type/bitw
- unsigned PrefAlign; ///< Pref. alignment for this type/bitw
- uint32_t TypeBitWidth; ///< Type bit width
-
- /// Initializer
- static TargetAlignElem get(AlignTypeEnum align_type, unsigned abi_align,
- unsigned pref_align, uint32_t bit_width);
- /// Equality predicate
- bool operator==(const TargetAlignElem &rhs) const;
-};
-
-/// TargetData - This class holds a parsed version of the target data layout
-/// string in a module and provides methods for querying it. The target data
-/// layout string is specified *by the target* - a frontend generating LLVM IR
-/// is required to generate the right target data for the target being codegen'd
-/// to. If some measure of portability is desired, an empty string may be
-/// specified in the module.
-class TargetData : public ImmutablePass {
-private:
- bool LittleEndian; ///< Defaults to false
- unsigned PointerMemSize; ///< Pointer size in bytes
- unsigned PointerABIAlign; ///< Pointer ABI alignment
- unsigned PointerPrefAlign; ///< Pointer preferred alignment
- unsigned StackNaturalAlign; ///< Stack natural alignment
-
- SmallVector<unsigned char, 8> LegalIntWidths; ///< Legal Integers.
-
- /// Alignments- Where the primitive type alignment data is stored.
- ///
- /// @sa init().
- /// @note Could support multiple size pointer alignments, e.g., 32-bit
- /// pointers vs. 64-bit pointers by extending TargetAlignment, but for now,
- /// we don't.
- SmallVector<TargetAlignElem, 16> Alignments;
-
- /// InvalidAlignmentElem - This member is a signal that a requested alignment
- /// type and bit width were not found in the SmallVector.
- static const TargetAlignElem InvalidAlignmentElem;
-
- // The StructType -> StructLayout map.
- mutable void *LayoutMap;
-
- //! Set/initialize target alignments
- void setAlignment(AlignTypeEnum align_type, unsigned abi_align,
- unsigned pref_align, uint32_t bit_width);
- unsigned getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width,
- bool ABIAlign, Type *Ty) const;
- //! Internal helper method that returns requested alignment for type.
- unsigned getAlignment(Type *Ty, bool abi_or_pref) const;
-
- /// Valid alignment predicate.
- ///
- /// Predicate that tests a TargetAlignElem reference returned by get() against
- /// InvalidAlignmentElem.
- bool validAlignment(const TargetAlignElem &align) const {
- return &align != &InvalidAlignmentElem;
- }
-
- /// Initialise a TargetData object with default values, ensure that the
- /// target data pass is registered.
- void init();
-
-public:
- /// Default ctor.
- ///
- /// @note This has to exist, because this is a pass, but it should never be
- /// used.
- TargetData();
-
- /// Constructs a TargetData from a specification string. See init().
- explicit TargetData(StringRef TargetDescription)
- : ImmutablePass(ID) {
- std::string errMsg = parseSpecifier(TargetDescription, this);
- assert(errMsg == "" && "Invalid target data layout string.");
- (void)errMsg;
- }
-
- /// Parses a target data specification string. Returns an error message
- /// if the string is malformed, or the empty string on success. Optionally
- /// initialises a TargetData object if passed a non-null pointer.
- static std::string parseSpecifier(StringRef TargetDescription, TargetData* td = 0);
-
- /// Initialize target data from properties stored in the module.
- explicit TargetData(const Module *M);
-
- TargetData(const TargetData &TD) :
- ImmutablePass(ID),
- LittleEndian(TD.isLittleEndian()),
- PointerMemSize(TD.PointerMemSize),
- PointerABIAlign(TD.PointerABIAlign),
- PointerPrefAlign(TD.PointerPrefAlign),
- LegalIntWidths(TD.LegalIntWidths),
- Alignments(TD.Alignments),
- LayoutMap(0)
- { }
-
- ~TargetData(); // Not virtual, do not subclass this class
-
- /// Target endianness...
- bool isLittleEndian() const { return LittleEndian; }
- bool isBigEndian() const { return !LittleEndian; }
-
- /// getStringRepresentation - Return the string representation of the
- /// TargetData. This representation is in the same format accepted by the
- /// string constructor above.
- std::string getStringRepresentation() const;
-
- /// isLegalInteger - This function returns true if the specified type is
- /// known to be a native integer type supported by the CPU. For example,
- /// i64 is not native on most 32-bit CPUs and i37 is not native on any known
- /// one. This returns false if the integer width is not legal.
- ///
- /// The width is specified in bits.
- ///
- bool isLegalInteger(unsigned Width) const {
- for (unsigned i = 0, e = (unsigned)LegalIntWidths.size(); i != e; ++i)
- if (LegalIntWidths[i] == Width)
- return true;
- return false;
- }
-
- bool isIllegalInteger(unsigned Width) const {
- return !isLegalInteger(Width);
- }
-
- /// Returns true if the given alignment exceeds the natural stack alignment.
- bool exceedsNaturalStackAlignment(unsigned Align) const {
- return (StackNaturalAlign != 0) && (Align > StackNaturalAlign);
- }
-
- /// fitsInLegalInteger - This function returns true if the specified type fits
- /// in a native integer type supported by the CPU. For example, if the CPU
- /// only supports i32 as a native integer type, then i27 fits in a legal
- // integer type but i45 does not.
- bool fitsInLegalInteger(unsigned Width) const {
- for (unsigned i = 0, e = (unsigned)LegalIntWidths.size(); i != e; ++i)
- if (Width <= LegalIntWidths[i])
- return true;
- return false;
- }
-
- /// Target pointer alignment
- unsigned getPointerABIAlignment() const { return PointerABIAlign; }
- /// Return target's alignment for stack-based pointers
- unsigned getPointerPrefAlignment() const { return PointerPrefAlign; }
- /// Target pointer size
- unsigned getPointerSize() const { return PointerMemSize; }
- /// Target pointer size, in bits
- unsigned getPointerSizeInBits() const { return 8*PointerMemSize; }
-
- /// Size examples:
- ///
- /// Type SizeInBits StoreSizeInBits AllocSizeInBits[*]
- /// ---- ---------- --------------- ---------------
- /// i1 1 8 8
- /// i8 8 8 8
- /// i19 19 24 32
- /// i32 32 32 32
- /// i100 100 104 128
- /// i128 128 128 128
- /// Float 32 32 32
- /// Double 64 64 64
- /// X86_FP80 80 80 96
- ///
- /// [*] The alloc size depends on the alignment, and thus on the target.
- /// These values are for x86-32 linux.
-
- /// getTypeSizeInBits - Return the number of bits necessary to hold the
- /// specified type. For example, returns 36 for i36 and 80 for x86_fp80.
- uint64_t getTypeSizeInBits(Type* Ty) const;
-
- /// getTypeStoreSize - Return the maximum number of bytes that may be
- /// overwritten by storing the specified type. For example, returns 5
- /// for i36 and 10 for x86_fp80.
- uint64_t getTypeStoreSize(Type *Ty) const {
- return (getTypeSizeInBits(Ty)+7)/8;
- }
-
- /// getTypeStoreSizeInBits - Return the maximum number of bits that may be
- /// overwritten by storing the specified type; always a multiple of 8. For
- /// example, returns 40 for i36 and 80 for x86_fp80.
- uint64_t getTypeStoreSizeInBits(Type *Ty) const {
- return 8*getTypeStoreSize(Ty);
- }
-
- /// getTypeAllocSize - Return the offset in bytes between successive objects
- /// of the specified type, including alignment padding. This is the amount
- /// that alloca reserves for this type. For example, returns 12 or 16 for
- /// x86_fp80, depending on alignment.
- uint64_t getTypeAllocSize(Type* Ty) const {
- // Round up to the next alignment boundary.
- return RoundUpAlignment(getTypeStoreSize(Ty), getABITypeAlignment(Ty));
- }
-
- /// getTypeAllocSizeInBits - Return the offset in bits between successive
- /// objects of the specified type, including alignment padding; always a
- /// multiple of 8. This is the amount that alloca reserves for this type.
- /// For example, returns 96 or 128 for x86_fp80, depending on alignment.
- uint64_t getTypeAllocSizeInBits(Type* Ty) const {
- return 8*getTypeAllocSize(Ty);
- }
-
- /// getABITypeAlignment - Return the minimum ABI-required alignment for the
- /// specified type.
- unsigned getABITypeAlignment(Type *Ty) const;
-
- /// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
- /// an integer type of the specified bitwidth.
- unsigned getABIIntegerTypeAlignment(unsigned BitWidth) const;
-
-
- /// getCallFrameTypeAlignment - Return the minimum ABI-required alignment
- /// for the specified type when it is part of a call frame.
- unsigned getCallFrameTypeAlignment(Type *Ty) const;
-
-
- /// getPrefTypeAlignment - Return the preferred stack/global alignment for
- /// the specified type. This is always at least as good as the ABI alignment.
- unsigned getPrefTypeAlignment(Type *Ty) const;
-
- /// getPreferredTypeAlignmentShift - Return the preferred alignment for the
- /// specified type, returned as log2 of the value (a shift amount).
- ///
- unsigned getPreferredTypeAlignmentShift(Type *Ty) const;
-
- /// getIntPtrType - Return an unsigned integer type that is the same size or
- /// greater to the host pointer size.
- ///
- IntegerType *getIntPtrType(LLVMContext &C) const;
-
- /// getIndexedOffset - return the offset from the beginning of the type for
- /// the specified indices. This is used to implement getelementptr.
- ///
- uint64_t getIndexedOffset(Type *Ty, ArrayRef<Value *> Indices) const;
-
- /// getStructLayout - Return a StructLayout object, indicating the alignment
- /// of the struct, its size, and the offsets of its fields. Note that this
- /// information is lazily cached.
- const StructLayout *getStructLayout(StructType *Ty) const;
-
- /// getPreferredAlignment - Return the preferred alignment of the specified
- /// global. This includes an explicitly requested alignment (if the global
- /// has one).
- unsigned getPreferredAlignment(const GlobalVariable *GV) const;
-
- /// getPreferredAlignmentLog - Return the preferred alignment of the
- /// specified global, returned in log form. This includes an explicitly
- /// requested alignment (if the global has one).
- unsigned getPreferredAlignmentLog(const GlobalVariable *GV) const;
-
- /// RoundUpAlignment - Round the specified value up to the next alignment
- /// boundary specified by Alignment. For example, 7 rounded up to an
- /// alignment boundary of 4 is 8. 8 rounded up to the alignment boundary of 4
- /// is 8 because it is already aligned.
- template <typename UIntTy>
- static UIntTy RoundUpAlignment(UIntTy Val, unsigned Alignment) {
- assert((Alignment & (Alignment-1)) == 0 && "Alignment must be power of 2!");
- return (Val + (Alignment-1)) & ~UIntTy(Alignment-1);
- }
-
- static char ID; // Pass identification, replacement for typeid
-};
-
-/// StructLayout - used to lazily calculate structure layout information for a
-/// target machine, based on the TargetData structure.
-///
-class StructLayout {
- uint64_t StructSize;
- unsigned StructAlignment;
- unsigned NumElements;
- uint64_t MemberOffsets[1]; // variable sized array!
-public:
-
- uint64_t getSizeInBytes() const {
- return StructSize;
- }
-
- uint64_t getSizeInBits() const {
- return 8*StructSize;
- }
-
- unsigned getAlignment() const {
- return StructAlignment;
- }
-
- /// getElementContainingOffset - Given a valid byte offset into the structure,
- /// return the structure index that contains it.
- ///
- unsigned getElementContainingOffset(uint64_t Offset) const;
-
- uint64_t getElementOffset(unsigned Idx) const {
- assert(Idx < NumElements && "Invalid element idx!");
- return MemberOffsets[Idx];
- }
-
- uint64_t getElementOffsetInBits(unsigned Idx) const {
- return getElementOffset(Idx)*8;
- }
-
-private:
- friend class TargetData; // Only TargetData can create this class
- StructLayout(StructType *ST, const TargetData &TD);
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/include/llvm/Target/TargetELFWriterInfo.h b/include/llvm/Target/TargetELFWriterInfo.h
deleted file mode 100644
index 5e48629cf4d6..000000000000
--- a/include/llvm/Target/TargetELFWriterInfo.h
+++ /dev/null
@@ -1,121 +0,0 @@
-//===-- llvm/Target/TargetELFWriterInfo.h - ELF Writer Info -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the TargetELFWriterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_TARGETELFWRITERINFO_H
-#define LLVM_TARGET_TARGETELFWRITERINFO_H
-
-namespace llvm {
-
- //===--------------------------------------------------------------------===//
- // TargetELFWriterInfo
- //===--------------------------------------------------------------------===//
-
- class TargetELFWriterInfo {
- protected:
- // EMachine - This field is the target specific value to emit as the
- // e_machine member of the ELF header.
- unsigned short EMachine;
- bool is64Bit, isLittleEndian;
- public:
-
- // Machine architectures
- enum MachineType {
- EM_NONE = 0, // No machine
- EM_M32 = 1, // AT&T WE 32100
- EM_SPARC = 2, // SPARC
- EM_386 = 3, // Intel 386
- EM_68K = 4, // Motorola 68000
- EM_88K = 5, // Motorola 88000
- EM_486 = 6, // Intel 486 (deprecated)
- EM_860 = 7, // Intel 80860
- EM_MIPS = 8, // MIPS R3000
- EM_PPC = 20, // PowerPC
- EM_ARM = 40, // ARM
- EM_ALPHA = 41, // DEC Alpha
- EM_SPARCV9 = 43, // SPARC V9
- EM_X86_64 = 62, // AMD64
- EM_HEXAGON = 164 // Qualcomm Hexagon
- };
-
- // ELF File classes
- enum {
- ELFCLASS32 = 1, // 32-bit object file
- ELFCLASS64 = 2 // 64-bit object file
- };
-
- // ELF Endianess
- enum {
- ELFDATA2LSB = 1, // Little-endian object file
- ELFDATA2MSB = 2 // Big-endian object file
- };
-
- explicit TargetELFWriterInfo(bool is64Bit_, bool isLittleEndian_);
- virtual ~TargetELFWriterInfo();
-
- unsigned short getEMachine() const { return EMachine; }
- unsigned getEFlags() const { return 0; }
- unsigned getEIClass() const { return is64Bit ? ELFCLASS64 : ELFCLASS32; }
- unsigned getEIData() const {
- return isLittleEndian ? ELFDATA2LSB : ELFDATA2MSB;
- }
-
- /// ELF Header and ELF Section Header Info
- unsigned getHdrSize() const { return is64Bit ? 64 : 52; }
- unsigned getSHdrSize() const { return is64Bit ? 64 : 40; }
-
- /// Symbol Table Info
- unsigned getSymTabEntrySize() const { return is64Bit ? 24 : 16; }
-
- /// getPrefELFAlignment - Returns the preferred alignment for ELF. This
- /// is used to align some sections.
- unsigned getPrefELFAlignment() const { return is64Bit ? 8 : 4; }
-
- /// getRelocationEntrySize - Entry size used in the relocation section
- unsigned getRelocationEntrySize() const {
- return is64Bit ? (hasRelocationAddend() ? 24 : 16)
- : (hasRelocationAddend() ? 12 : 8);
- }
-
- /// getRelocationType - Returns the target specific ELF Relocation type.
- /// 'MachineRelTy' contains the object code independent relocation type
- virtual unsigned getRelocationType(unsigned MachineRelTy) const = 0;
-
- /// hasRelocationAddend - True if the target uses an addend in the
- /// ELF relocation entry.
- virtual bool hasRelocationAddend() const = 0;
-
- /// getDefaultAddendForRelTy - Gets the default addend value for a
- /// relocation entry based on the target ELF relocation type.
- virtual long int getDefaultAddendForRelTy(unsigned RelTy,
- long int Modifier = 0) const = 0;
-
- /// getRelTySize - Returns the size of relocatable field in bits
- virtual unsigned getRelocationTySize(unsigned RelTy) const = 0;
-
- /// isPCRelativeRel - True if the relocation type is pc relative
- virtual bool isPCRelativeRel(unsigned RelTy) const = 0;
-
- /// getJumpTableRelocationTy - Returns the machine relocation type used
- /// to reference a jumptable.
- virtual unsigned getAbsoluteLabelMachineRelTy() const = 0;
-
- /// computeRelocation - Some relocatable fields could be relocated
- /// directly, avoiding the relocation symbol emission, compute the
- /// final relocation value for this symbol.
- virtual long int computeRelocation(unsigned SymOffset, unsigned RelOffset,
- unsigned RelTy) const = 0;
- };
-
-} // end llvm namespace
-
-#endif // LLVM_TARGET_TARGETELFWRITERINFO_H
diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h
index da30ab82d6c2..4570813ba6c2 100644
--- a/include/llvm/Target/TargetInstrInfo.h
+++ b/include/llvm/Target/TargetInstrInfo.h
@@ -45,8 +45,8 @@ template<class T> class SmallVectorImpl;
/// TargetInstrInfo - Interface to description of machine instruction set
///
class TargetInstrInfo : public MCInstrInfo {
- TargetInstrInfo(const TargetInstrInfo &); // DO NOT IMPLEMENT
- void operator=(const TargetInstrInfo &); // DO NOT IMPLEMENT
+ TargetInstrInfo(const TargetInstrInfo &) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetInstrInfo &) LLVM_DELETED_FUNCTION;
public:
TargetInstrInfo(int CFSetupOpcode = -1, int CFDestroyOpcode = -1)
: CallFrameSetupOpcode(CFSetupOpcode),
@@ -459,6 +459,13 @@ public:
}
/// copyPhysReg - Emit instructions to copy a pair of physical registers.
+ ///
+ /// This function should support copies within any legal register class as
+ /// well as any cross-class copies created during instruction selection.
+ ///
+ /// The source and destination registers may overlap, which may require a
+ /// careful implementation when multiple copy instructions are required for
+ /// large registers. See for example the ARM target.
virtual void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
@@ -794,29 +801,6 @@ public:
const MachineInstr *UseMI, unsigned UseIdx,
bool FindMin = false) const;
- /// computeOperandLatency - Compute and return the latency of the given data
- /// dependent def and use. DefMI must be a valid def. UseMI may be NULL for
- /// an unknown use. If the subtarget allows, this may or may not need to call
- /// getOperandLatency().
- ///
- /// FindMin may be set to get the minimum vs. expected latency. Minimum
- /// latency is used for scheduling groups, while expected latency is for
- /// instruction cost and critical path.
- unsigned computeOperandLatency(const InstrItineraryData *ItinData,
- const TargetRegisterInfo *TRI,
- const MachineInstr *DefMI,
- const MachineInstr *UseMI,
- unsigned Reg, bool FindMin) const;
-
- /// getOutputLatency - Compute and return the output dependency latency of a
- /// a given pair of defs which both target the same register. This is usually
- /// one.
- virtual unsigned getOutputLatency(const InstrItineraryData *ItinData,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *DepMI) const {
- return 1;
- }
-
/// getInstrLatency - Compute the instruction latency of a given instruction.
/// If the instruction has higher cost when predicated, it's returned via
/// PredCost.
@@ -831,6 +815,9 @@ public:
unsigned defaultDefLatency(const MCSchedModel *SchedModel,
const MachineInstr *DefMI) const;
+ int computeDefOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, bool FindMin) const;
+
/// isHighLatencyDef - Return true if this opcode has high latency to its
/// result.
virtual bool isHighLatencyDef(int opc) const { return false; }
diff --git a/include/llvm/Target/TargetIntrinsicInfo.h b/include/llvm/Target/TargetIntrinsicInfo.h
index c44b9230c0d8..ce213496935d 100644
--- a/include/llvm/Target/TargetIntrinsicInfo.h
+++ b/include/llvm/Target/TargetIntrinsicInfo.h
@@ -14,6 +14,7 @@
#ifndef LLVM_TARGET_TARGETINTRINSICINFO_H
#define LLVM_TARGET_TARGETINTRINSICINFO_H
+#include "llvm/Support/Compiler.h"
#include <string>
namespace llvm {
@@ -27,8 +28,8 @@ class Type;
/// TargetIntrinsicInfo - Interface to description of machine instruction set
///
class TargetIntrinsicInfo {
- TargetIntrinsicInfo(const TargetIntrinsicInfo &); // DO NOT IMPLEMENT
- void operator=(const TargetIntrinsicInfo &); // DO NOT IMPLEMENT
+ TargetIntrinsicInfo(const TargetIntrinsicInfo &) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetIntrinsicInfo &) LLVM_DELETED_FUNCTION;
public:
TargetIntrinsicInfo();
virtual ~TargetIntrinsicInfo();
diff --git a/include/llvm/Target/TargetLibraryInfo.h b/include/llvm/Target/TargetLibraryInfo.h
index ea2874f440f7..a2c97d782e29 100644
--- a/include/llvm/Target/TargetLibraryInfo.h
+++ b/include/llvm/Target/TargetLibraryInfo.h
@@ -18,6 +18,26 @@ namespace llvm {
namespace LibFunc {
enum Func {
+ /// void operator delete[](void*);
+ ZdaPv,
+ /// void operator delete(void*);
+ ZdlPv,
+ /// void *new[](unsigned int);
+ Znaj,
+ /// void *new[](unsigned int, nothrow);
+ ZnajRKSt9nothrow_t,
+ /// void *new[](unsigned long);
+ Znam,
+ /// void *new[](unsigned long, nothrow);
+ ZnamRKSt9nothrow_t,
+ /// void *new(unsigned int);
+ Znwj,
+ /// void *new(unsigned int, nothrow);
+ ZnwjRKSt9nothrow_t,
+ /// void *new(unsigned long);
+ Znwm,
+ /// void *new(unsigned long, nothrow);
+ ZnwmRKSt9nothrow_t,
/// int __cxa_atexit(void (*f)(void *), void *p, void *d);
cxa_atexit,
/// void __cxa_guard_abort(guard_t *guard);
@@ -33,12 +53,24 @@ namespace llvm {
acos,
/// float acosf(float x);
acosf,
+ /// double acosh(double x);
+ acosh,
+ /// float acoshf(float x);
+ acoshf,
+ /// long double acoshl(long double x);
+ acoshl,
/// long double acosl(long double x);
acosl,
/// double asin(double x);
asin,
/// float asinf(float x);
asinf,
+ /// double asinh(double x);
+ asinh,
+ /// float asinhf(float x);
+ asinhf,
+ /// long double asinhl(long double x);
+ asinhl,
/// long double asinl(long double x);
asinl,
/// double atan(double x);
@@ -51,8 +83,22 @@ namespace llvm {
atan2l,
/// float atanf(float x);
atanf,
+ /// double atanh(double x);
+ atanh,
+ /// float atanhf(float x);
+ atanhf,
+ /// long double atanhl(long double x);
+ atanhl,
/// long double atanl(long double x);
atanl,
+ /// void *calloc(size_t count, size_t size);
+ calloc,
+ /// double cbrt(double x);
+ cbrt,
+ /// float cbrtf(float x);
+ cbrtf,
+ /// long double cbrtl(long double x);
+ cbrtl,
/// double ceil(double x);
ceil,
/// float ceilf(float x);
@@ -79,6 +125,12 @@ namespace llvm {
cosl,
/// double exp(double x);
exp,
+ /// double exp10(double x);
+ exp10,
+ /// float exp10f(float x);
+ exp10f,
+ /// long double exp10l(long double x);
+ exp10l,
/// double exp2(double x);
exp2,
/// float exp2f(float x);
@@ -119,6 +171,8 @@ namespace llvm {
fputc,
/// int fputs(const char *s, FILE *stream);
fputs,
+ /// void free(void *ptr);
+ free,
/// size_t fwrite(const void *ptr, size_t size, size_t nitems,
/// FILE *stream);
fwrite,
@@ -144,10 +198,18 @@ namespace llvm {
log2f,
/// double long double log2l(long double x);
log2l,
+ /// double logb(double x);
+ logb,
+ /// float logbf(float x);
+ logbf,
+ /// long double logbl(long double x);
+ logbl,
/// float logf(float x);
logf,
/// long double logl(long double x);
logl,
+ /// void *malloc(size_t size);
+ malloc,
/// void *memchr(const void *s, int c, size_t n);
memchr,
/// int memcmp(const void *s1, const void *s2, size_t n);
@@ -166,6 +228,8 @@ namespace llvm {
nearbyintf,
/// long double nearbyintl(long double x);
nearbyintl,
+ /// int posix_memalign(void **memptr, size_t alignment, size_t size);
+ posix_memalign,
/// double pow(double x, double y);
pow,
/// float powf(float x, float y);
@@ -176,6 +240,10 @@ namespace llvm {
putchar,
/// int puts(const char *s);
puts,
+ /// void *realloc(void *ptr, size_t size);
+ realloc,
+ /// void *reallocf(void *ptr, size_t size);
+ reallocf,
/// double rint(double x);
rint,
/// float rintf(float x);
@@ -208,12 +276,20 @@ namespace llvm {
sqrtf,
/// long double sqrtl(long double x);
sqrtl,
+ /// char *stpcpy(char *s1, const char *s2);
+ stpcpy,
/// char *strcat(char *s1, const char *s2);
strcat,
/// char *strchr(const char *s, int c);
strchr,
+ /// int strcmp(const char *s1, const char *s2);
+ strcmp,
/// char *strcpy(char *s1, const char *s2);
strcpy,
+ /// size_t strcspn(const char *s1, const char *s2);
+ strcspn,
+ /// char *strdup(const char *s1);
+ strdup,
/// size_t strlen(const char *s);
strlen,
/// char *strncat(char *s1, const char *s2, size_t n);
@@ -222,8 +298,33 @@ namespace llvm {
strncmp,
/// char *strncpy(char *s1, const char *s2, size_t n);
strncpy,
+ /// char *strndup(const char *s1, size_t n);
+ strndup,
/// size_t strnlen(const char *s, size_t maxlen);
strnlen,
+ /// char *strpbrk(const char *s1, const char *s2);
+ strpbrk,
+ /// char *strrchr(const char *s, int c);
+ strrchr,
+ /// size_t strspn(const char *s1, const char *s2);
+ strspn,
+ /// char *strstr(const char *s1, const char *s2);
+ strstr,
+ /// double strtod(const char *nptr, char **endptr);
+ strtod,
+ /// float strtof(const char *nptr, char **endptr);
+ strtof,
+ /// long int strtol(const char *nptr, char **endptr, int base);
+ strtol,
+ /// long double strtold(const char *nptr, char **endptr);
+ strtold,
+ /// long long int strtoll(const char *nptr, char **endptr, int base);
+ strtoll,
+ /// unsigned long int strtoul(const char *nptr, char **endptr, int base);
+ strtoul,
+ /// unsigned long long int strtoull(const char *nptr, char **endptr,
+ /// int base);
+ strtoull,
/// double tan(double x);
tan,
/// float tanf(float x);
@@ -242,6 +343,8 @@ namespace llvm {
truncf,
/// long double truncl(long double x);
truncl,
+ /// void *valloc(size_t size);
+ valloc,
NumLibFuncs
};
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index acf0419510e9..580a30fcd2d8 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -22,9 +22,11 @@
#ifndef LLVM_TARGET_TARGETLOWERING_H
#define LLVM_TARGET_TARGETLOWERING_H
+#include "llvm/AddressingMode.h"
#include "llvm/CallingConv.h"
#include "llvm/InlineAsm.h"
#include "llvm/Attributes.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/CallSite.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
@@ -49,7 +51,7 @@ namespace llvm {
class MCContext;
class MCExpr;
template<typename T> class SmallVectorImpl;
- class TargetData;
+ class DataLayout;
class TargetRegisterClass;
class TargetLibraryInfo;
class TargetLoweringObjectFile;
@@ -76,8 +78,8 @@ namespace llvm {
/// target-specific constructs to SelectionDAG operators.
///
class TargetLowering {
- TargetLowering(const TargetLowering&); // DO NOT IMPLEMENT
- void operator=(const TargetLowering&); // DO NOT IMPLEMENT
+ TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION;
public:
/// LegalizeAction - This enum indicates whether operations are valid for a
/// target, and if not, what action should be used to make them valid.
@@ -101,12 +103,24 @@ public:
TypeWidenVector // This vector should be widened into a larger vector.
};
+ /// LegalizeKind holds the legalization kind that needs to happen to EVT
+ /// in order to type-legalize it.
+ typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
+
enum BooleanContent { // How the target represents true/false values.
UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
ZeroOrOneBooleanContent, // All bits zero except for bit 0.
ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
};
+ enum SelectSupportKind {
+ ScalarValSelect, // The target supports scalar selects (ex: cmov).
+ ScalarCondVectorVal, // The target supports selects with a scalar condition
+ // and vector values (ex: cmov).
+ VectorMaskSelect // The target supports vector selects with a vector
+ // mask (ex: x86 blends).
+ };
+
static ISD::NodeType getExtendForContent(BooleanContent Content) {
switch (Content) {
case UndefinedBooleanContent:
@@ -128,22 +142,37 @@ public:
virtual ~TargetLowering();
const TargetMachine &getTargetMachine() const { return TM; }
- const TargetData *getTargetData() const { return TD; }
+ const DataLayout *getDataLayout() const { return TD; }
const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; }
bool isBigEndian() const { return !IsLittleEndian; }
bool isLittleEndian() const { return IsLittleEndian; }
- MVT getPointerTy() const { return PointerTy; }
+ // Return the pointer type for the given address space, defaults to
+ // the pointer type from the data layout.
+ // FIXME: The default needs to be removed once all the code is updated.
+ virtual MVT getPointerTy(uint32_t AS = 0) const { return PointerTy; }
virtual MVT getShiftAmountTy(EVT LHSTy) const;
/// isSelectExpensive - Return true if the select operation is expensive for
/// this target.
bool isSelectExpensive() const { return SelectIsExpensive; }
+ virtual bool isSelectSupported(SelectSupportKind kind) const { return true; }
+
/// isIntDivCheap() - Return true if integer divide is usually cheaper than
/// a sequence of several shifts, adds, and multiplies for this target.
bool isIntDivCheap() const { return IntDivIsCheap; }
+ /// isSlowDivBypassed - Returns true if target has indicated at least one
+ /// type should be bypassed.
+ bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
+
+ /// getBypassSlowDivTypes - Returns map of slow types for division or
+ /// remainder with corresponding fast types
+ const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
+ return BypassSlowDivWidths;
+ }
+
/// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of
/// srl/add/sra.
bool isPow2DivCheap() const { return Pow2DivIsCheap; }
@@ -382,6 +411,13 @@ public:
getOperationAction(Op, VT) == Custom);
}
+ /// isOperationExpand - Return true if the specified operation is illegal on
+ /// this target or unlikely to be made legal with custom lowering. This is
+ /// used to help guide high-level lowering decisions.
+ bool isOperationExpand(unsigned Op, EVT VT) const {
+ return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
+ }
+
/// isOperationLegal - Return true if the specified operation is legal on this
/// target.
bool isOperationLegal(unsigned Op, EVT VT) const {
@@ -475,8 +511,12 @@ public:
assert((unsigned)CC < array_lengthof(CondCodeActions) &&
(unsigned)VT.getSimpleVT().SimpleTy < sizeof(CondCodeActions[0])*4 &&
"Table isn't big enough!");
+ /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit
+ /// value and the upper 27 bits index into the second dimension of the
+ /// array to select what 64bit value to use.
LegalizeAction Action = (LegalizeAction)
- ((CondCodeActions[CC] >> (2*VT.getSimpleVT().SimpleTy)) & 3);
+ ((CondCodeActions[CC][VT.getSimpleVT().SimpleTy >> 5]
+ >> (2*(VT.getSimpleVT().SimpleTy & 0x1F))) & 3);
assert(Action != Promote && "Can't promote condition code!");
return Action;
}
@@ -533,6 +573,7 @@ public:
}
return EVT::getEVT(Ty, AllowUnknown);
}
+
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. This is the actual
@@ -686,6 +727,12 @@ public:
return SupportJumpTables;
}
+ /// getMinimumJumpTableEntries - return integer threshold on number of
+ /// blocks to use jump tables rather than if sequence.
+ int getMinimumJumpTableEntries() const {
+ return MinimumJumpTableEntries;
+ }
+
/// getStackPointerRegisterToSaveRestore - If a physical register, this
/// specifies the register that llvm.savestack/llvm.restorestack should save
/// and restore.
@@ -1006,6 +1053,12 @@ protected:
SupportJumpTables = Val;
}
+ /// setMinimumJumpTableEntries - Indicate the number of blocks to generate
+ /// jump tables rather than if sequence.
+ void setMinimumJumpTableEntries(int Val) {
+ MinimumJumpTableEntries = Val;
+ }
+
/// setStackPointerRegisterToSaveRestore - If set to a physical register, this
/// specifies the register that llvm.savestack/llvm.restorestack should save
/// and restore.
@@ -1045,6 +1098,11 @@ protected:
/// of instructions not containing an integer divide.
void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
+ /// addBypassSlowDiv - Tells the code generator which bitwidths to bypass.
+ void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
+ BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
+ }
+
/// setPow2DivIsCheap - Tells the code generator that it shouldn't generate
/// srl/add/sra for a signed divide by power of two, and let the target handle
/// it.
@@ -1127,8 +1185,13 @@ protected:
assert(VT < MVT::LAST_VALUETYPE &&
(unsigned)CC < array_lengthof(CondCodeActions) &&
"Table isn't big enough!");
- CondCodeActions[(unsigned)CC] &= ~(uint64_t(3UL) << VT.SimpleTy*2);
- CondCodeActions[(unsigned)CC] |= (uint64_t)Action << VT.SimpleTy*2;
+ /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit
+ /// value and the upper 27 bits index into the second dimension of the
+ /// array to select what 64bit value to use.
+ CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5]
+ &= ~(uint64_t(3UL) << (VT.SimpleTy & 0x1F)*2);
+ CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5]
+ |= (uint64_t)Action << (VT.SimpleTy & 0x1F)*2;
}
/// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the
@@ -1201,7 +1264,7 @@ protected:
public:
//===--------------------------------------------------------------------===//
// Lowering methods - These methods must be implemented by targets so that
- // the SelectionDAGLowering code knows how to lower these.
+ // the SelectionDAGBuilder code knows how to lower these.
//
/// LowerFormalArguments - This hook must be implemented to lower the
@@ -1271,9 +1334,9 @@ public:
FunctionType *FTy, bool isTailCall, SDValue callee,
ArgListTy &args, SelectionDAG &dag, DebugLoc dl,
ImmutableCallSite &cs)
- : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0, Attribute::SExt)),
- RetZExt(cs.paramHasAttr(0, Attribute::ZExt)), IsVarArg(FTy->isVarArg()),
- IsInReg(cs.paramHasAttr(0, Attribute::InReg)),
+ : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0, Attributes::SExt)),
+ RetZExt(cs.paramHasAttr(0, Attributes::ZExt)), IsVarArg(FTy->isVarArg()),
+ IsInReg(cs.paramHasAttr(0, Attributes::InReg)),
DoesNotReturn(cs.doesNotReturn()),
IsReturnValueUsed(!cs.getInstruction()->use_empty()),
IsTailCall(isTailCall), NumFixedArgs(FTy->getNumParams()),
@@ -1314,7 +1377,7 @@ public:
}
/// HandleByVal - Target-specific cleanup for formal ByVal parameters.
- virtual void HandleByVal(CCState *, unsigned &) const {}
+ virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
/// CanLowerReturn - This hook should be implemented to check whether the
/// return values described by the Outs array can fit into the return
@@ -1584,22 +1647,6 @@ public:
// Addressing mode description hooks (used by LSR etc).
//
- /// AddrMode - This represents an addressing mode of:
- /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
- /// If BaseGV is null, there is no BaseGV.
- /// If BaseOffs is zero, there is no base offset.
- /// If HasBaseReg is false, there is no base register.
- /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
- /// no scale.
- ///
- struct AddrMode {
- GlobalValue *BaseGV;
- int64_t BaseOffs;
- bool HasBaseReg;
- int64_t Scale;
- AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
- };
-
/// GetAddrModeArguments - CodeGenPrepare sinks address calculations into the
/// same BB as Load/Store instructions reading the address. This allows as
/// much computation as possible to be done in the address mode for that
@@ -1741,10 +1788,11 @@ public:
private:
const TargetMachine &TM;
- const TargetData *TD;
+ const DataLayout *TD;
const TargetLoweringObjectFile &TLOF;
- /// PointerTy - The type to use for pointers, usually i32 or i64.
+ /// PointerTy - The type to use for pointers for the default address space,
+ /// usually i32 or i64.
///
MVT PointerTy;
@@ -1762,6 +1810,12 @@ private:
/// set to true unconditionally.
bool IntDivIsCheap;
+ /// BypassSlowDivMap - Tells the code generator to bypass slow divide or
+ /// remainder instructions. For example, BypassSlowDivWidths[32,8] tells the
+ /// code generator to bypass 32-bit integer div/rem with an 8-bit unsigned
+ /// integer div/rem when the operands are positive and less than 256.
+ DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
+
/// Pow2DivIsCheap - Tells the code generator that it shouldn't generate
/// srl/add/sra for a signed divide by power of two, and let the target handle
/// it.
@@ -1784,6 +1838,9 @@ private:
/// If it's not true, then each jumptable must be lowered into if-then-else's.
bool SupportJumpTables;
+ /// MinimumJumpTableEntries - Number of blocks threshold to use jump tables.
+ int MinimumJumpTableEntries;
+
/// BooleanContents - Information about the contents of the high-bits in
/// boolean values held in a type wider than i1. See getBooleanContents.
BooleanContent BooleanContents;
@@ -1901,12 +1958,14 @@ private:
/// CondCodeActions - For each condition code (ISD::CondCode) keep a
/// LegalizeAction that indicates how instruction selection should
/// deal with the condition code.
- uint64_t CondCodeActions[ISD::SETCC_INVALID];
+ /// Because each CC action takes up 2 bits, we need to have the array size
+ /// be large enough to fit all of the value types. This can be done by
+ /// dividing the MVT::LAST_VALUETYPE by 32 and adding one.
+ uint64_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE / 32) + 1];
ValueTypeActionImpl ValueTypeActions;
- typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
-
+public:
LegalizeKind
getTypeConversion(LLVMContext &Context, EVT VT) const {
// If this is a simple type, use the ComputeRegisterProp mechanism.
@@ -1921,6 +1980,9 @@ private:
ValueTypeActions.getTypeAction(NVT.getSimpleVT()) != TypePromoteInteger)
&& "Promote may not follow Expand or Promote");
+ if (LA == TypeSplitVector)
+ NVT = EVT::getVectorVT(Context, VT.getVectorElementType(),
+ VT.getVectorNumElements() / 2);
return LegalizeKind(LA, NVT);
}
@@ -2023,6 +2085,7 @@ private:
return LegalizeKind(TypeSplitVector, NVT);
}
+private:
std::vector<std::pair<EVT, const TargetRegisterClass*> > AvailableRegClasses;
/// TargetDAGCombineArray - Targets can specify ISD nodes that they would
diff --git a/include/llvm/Target/TargetLoweringObjectFile.h b/include/llvm/Target/TargetLoweringObjectFile.h
index d631f58aab74..13a6fe37d7a9 100644
--- a/include/llvm/Target/TargetLoweringObjectFile.h
+++ b/include/llvm/Target/TargetLoweringObjectFile.h
@@ -33,10 +33,11 @@ namespace llvm {
class TargetLoweringObjectFile : public MCObjectFileInfo {
MCContext *Ctx;
-
- TargetLoweringObjectFile(const TargetLoweringObjectFile&); // DO NOT IMPLEMENT
- void operator=(const TargetLoweringObjectFile&); // DO NOT IMPLEMENT
-
+
+ TargetLoweringObjectFile(
+ const TargetLoweringObjectFile&) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetLoweringObjectFile&) LLVM_DELETED_FUNCTION;
+
public:
MCContext &getContext() const { return *Ctx; }
diff --git a/include/llvm/Target/TargetMachine.h b/include/llvm/Target/TargetMachine.h
index e4bf32bd86c8..50066473b552 100644
--- a/include/llvm/Target/TargetMachine.h
+++ b/include/llvm/Target/TargetMachine.h
@@ -17,6 +17,8 @@
#include "llvm/Pass.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/TargetTransformInfo.h"
+#include "llvm/Target/TargetTransformImpl.h"
#include "llvm/ADT/StringRef.h"
#include <cassert>
#include <string>
@@ -31,8 +33,7 @@ class MCCodeGenInfo;
class MCContext;
class PassManagerBase;
class Target;
-class TargetData;
-class TargetELFWriterInfo;
+class DataLayout;
class TargetFrameLowering;
class TargetInstrInfo;
class TargetIntrinsicInfo;
@@ -52,8 +53,8 @@ class raw_ostream;
/// through this interface.
///
class TargetMachine {
- TargetMachine(const TargetMachine &); // DO NOT IMPLEMENT
- void operator=(const TargetMachine &); // DO NOT IMPLEMENT
+ TargetMachine(const TargetMachine &) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetMachine &) LLVM_DELETED_FUNCTION;
protected: // Can only create subclasses.
TargetMachine(const Target &T, StringRef TargetTriple,
StringRef CPU, StringRef FS, const TargetOptions &Options);
@@ -106,7 +107,11 @@ public:
virtual const TargetFrameLowering *getFrameLowering() const { return 0; }
virtual const TargetLowering *getTargetLowering() const { return 0; }
virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const{ return 0; }
- virtual const TargetData *getTargetData() const { return 0; }
+ virtual const DataLayout *getDataLayout() const { return 0; }
+ virtual const ScalarTargetTransformInfo*
+ getScalarTargetTransformInfo() const { return 0; }
+ virtual const VectorTargetTransformInfo*
+ getVectorTargetTransformInfo() const { return 0; }
/// getMCAsmInfo - Return target specific asm information.
///
@@ -142,11 +147,6 @@ public:
return 0;
}
- /// getELFWriterInfo - If this target supports an ELF writer, return
- /// information for it, otherwise return null.
- ///
- virtual const TargetELFWriterInfo *getELFWriterInfo() const { return 0; }
-
/// hasMCRelaxAll - Check whether all machine code instructions should be
/// relaxed.
bool hasMCRelaxAll() const { return MCRelaxAll; }
diff --git a/include/llvm/Target/TargetOpcodes.h b/include/llvm/Target/TargetOpcodes.h
index f0b181e345b7..516e0706b897 100644
--- a/include/llvm/Target/TargetOpcodes.h
+++ b/include/llvm/Target/TargetOpcodes.h
@@ -87,7 +87,11 @@ namespace TargetOpcode {
/// BUNDLE - This instruction represents an instruction bundle. Instructions
/// which immediately follow a BUNDLE instruction which are marked with
/// 'InsideBundle' flag are inside the bundle.
- BUNDLE
+ BUNDLE = 14,
+
+ /// Lifetime markers.
+ LIFETIME_START = 15,
+ LIFETIME_END = 16
};
} // end namespace TargetOpcode
} // end namespace llvm
diff --git a/include/llvm/Target/TargetOptions.h b/include/llvm/Target/TargetOptions.h
index d1a07d1480b4..68ca5678369a 100644
--- a/include/llvm/Target/TargetOptions.h
+++ b/include/llvm/Target/TargetOptions.h
@@ -155,6 +155,10 @@ namespace llvm {
/// automatically realigned, if needed.
unsigned RealignStack : 1;
+ /// SSPBufferSize - The minimum size of buffers that will receive stack
+ /// smashing protection when -fstack-protection is used.
+ unsigned SSPBufferSize;
+
/// EnableFastISel - This flag enables fast-path instruction selection
/// which trades away generated code quality in favor of reducing
/// compile time.
diff --git a/include/llvm/Target/TargetRegisterInfo.h b/include/llvm/Target/TargetRegisterInfo.h
index df4d900e4c8e..afa2ee27443a 100644
--- a/include/llvm/Target/TargetRegisterInfo.h
+++ b/include/llvm/Target/TargetRegisterInfo.h
@@ -221,13 +221,17 @@ public:
private:
const TargetRegisterInfoDesc *InfoDesc; // Extra desc array for codegen
const char *const *SubRegIndexNames; // Names of subreg indexes.
+ // Pointer to array of lane masks, one per sub-reg index.
+ const unsigned *SubRegIndexLaneMasks;
+
regclass_iterator RegClassBegin, RegClassEnd; // List of regclasses
protected:
TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
regclass_iterator RegClassBegin,
regclass_iterator RegClassEnd,
- const char *const *subregindexnames);
+ const char *const *SRINames,
+ const unsigned *SRILaneMasks);
virtual ~TargetRegisterInfo();
public:
@@ -327,10 +331,36 @@ public:
/// getSubRegIndexName - Return the human-readable symbolic target-specific
/// name for the specified SubRegIndex.
const char *getSubRegIndexName(unsigned SubIdx) const {
- assert(SubIdx && "This is not a subregister index");
+ assert(SubIdx && SubIdx < getNumSubRegIndices() &&
+ "This is not a subregister index");
return SubRegIndexNames[SubIdx-1];
}
+ /// getSubRegIndexLaneMask - Return a bitmask representing the parts of a
+ /// register that are covered by SubIdx.
+ ///
+ /// Lane masks for sub-register indices are similar to register units for
+ /// physical registers. The individual bits in a lane mask can't be assigned
+ /// any specific meaning. They can be used to check if two sub-register
+ /// indices overlap.
+ ///
+ /// If the target has a register such that:
+ ///
+ /// getSubReg(Reg, A) overlaps getSubReg(Reg, B)
+ ///
+ /// then:
+ ///
+ /// getSubRegIndexLaneMask(A) & getSubRegIndexLaneMask(B) != 0
+ ///
+ /// The converse is not necessarily true. If two lane masks have a common
+ /// bit, the corresponding sub-registers may not overlap, but it can be
+ /// assumed that they usually will.
+ unsigned getSubRegIndexLaneMask(unsigned SubIdx) const {
+ // SubIdx == 0 is allowed, it has the lane mask ~0u.
+ assert(SubIdx < getNumSubRegIndices() && "This is not a subregister index");
+ return SubRegIndexLaneMasks[SubIdx];
+ }
+
/// regsOverlap - Returns true if the two registers are equal or alias each
/// other. The registers may be virtual register.
bool regsOverlap(unsigned regA, unsigned regB) const {
@@ -416,18 +446,6 @@ public:
return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC->MC);
}
- /// canCombineSubRegIndices - Given a register class and a list of
- /// subregister indices, return true if it's possible to combine the
- /// subregister indices into one that corresponds to a larger
- /// subregister. Return the new subregister index by reference. Note the
- /// new index may be zero if the given subregisters can be combined to
- /// form the whole register.
- virtual bool canCombineSubRegIndices(const TargetRegisterClass *RC,
- SmallVectorImpl<unsigned> &SubIndices,
- unsigned &NewSubIdx) const {
- return 0;
- }
-
/// getMatchingSuperRegClass - Return a subclass of the specified register
/// class A so that each register in it has a sub-register of the
/// specified sub-register index which is in the specified register class B.
@@ -458,6 +476,8 @@ public:
/// composeSubRegIndices - Return the subregister index you get from composing
/// two subregister indices.
///
+ /// The special null sub-register index composes as the identity.
+ ///
/// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b)
/// returns c. Note that composeSubRegIndices does not tell you about illegal
/// compositions. If R does not have a subreg a, or R:a does not have a subreg
@@ -467,11 +487,19 @@ public:
/// ssub_0:S0 - ssub_3:S3 subregs.
/// If you compose subreg indices dsub_1, ssub_0 you get ssub_2.
///
- virtual unsigned composeSubRegIndices(unsigned a, unsigned b) const {
- // This default implementation is correct for most targets.
- return b;
+ unsigned composeSubRegIndices(unsigned a, unsigned b) const {
+ if (!a) return b;
+ if (!b) return a;
+ return composeSubRegIndicesImpl(a, b);
}
+protected:
+ /// Overridden by TableGen in targets that have sub-registers.
+ virtual unsigned composeSubRegIndicesImpl(unsigned, unsigned) const {
+ llvm_unreachable("Target has no sub-registers");
+ }
+
+public:
/// getCommonSuperRegClass - Find a common super-register class if it exists.
///
/// Find a register class, SuperRC and two sub-register indices, PreA and
diff --git a/include/llvm/Target/TargetSchedule.td b/include/llvm/Target/TargetSchedule.td
index 4dc488dbaece..0da82fdd8971 100644
--- a/include/llvm/Target/TargetSchedule.td
+++ b/include/llvm/Target/TargetSchedule.td
@@ -10,25 +10,77 @@
// This file defines the target-independent scheduling interfaces which should
// be implemented by each target which is using TableGen based scheduling.
//
+// The SchedMachineModel is defined by subtargets for three categories of data:
+// 1. Basic properties for coarse grained instruction cost model.
+// 2. Scheduler Read/Write resources for simple per-opcode cost model.
+// 3. Instruction itineraties for detailed reservation tables.
+//
+// (1) Basic properties are defined by the SchedMachineModel
+// class. Target hooks allow subtargets to associate opcodes with
+// those properties.
+//
+// (2) A per-operand machine model can be implemented in any
+// combination of the following ways:
+//
+// A. Associate per-operand SchedReadWrite types with Instructions by
+// modifying the Instruction definition to inherit from Sched. For
+// each subtarget, define WriteRes and ReadAdvance to associate
+// processor resources and latency with each SchedReadWrite type.
+//
+// B. In each instruction definition, name an ItineraryClass. For each
+// subtarget, define ItinRW entries to map ItineraryClass to
+// per-operand SchedReadWrite types. Unlike method A, these types may
+// be subtarget specific and can be directly associated with resources
+// by defining SchedWriteRes and SchedReadAdvance.
+//
+// C. In the subtarget, map SchedReadWrite types to specific
+// opcodes. This overrides any SchedReadWrite types or
+// ItineraryClasses defined by the Instruction. As in method B, the
+// subtarget can directly associate resources with SchedReadWrite
+// types by defining SchedWriteRes and SchedReadAdvance.
+//
+// D. In either the target or subtarget, define SchedWriteVariant or
+// SchedReadVariant to map one SchedReadWrite type onto another
+// sequence of SchedReadWrite types. This allows dynamic selection of
+// an instruction's machine model via custom C++ code. It also allows
+// a machine-independent SchedReadWrite type to map to a sequence of
+// machine-dependent types.
+//
+// (3) A per-pipeline-stage machine model can be implemented by providing
+// Itineraries in addition to mapping instructions to ItineraryClasses.
//===----------------------------------------------------------------------===//
+// Include legacy support for instruction itineraries.
include "llvm/Target/TargetItinerary.td"
-// The SchedMachineModel is defined by subtargets for three categories of data:
-// 1) Basic properties for coarse grained instruction cost model.
-// 2) Scheduler Read/Write resources for simple per-opcode cost model.
-// 3) Instruction itineraties for detailed reservation tables.
+class Instruction; // Forward def
+
+// DAG operator that interprets the DAG args as Instruction defs.
+def instrs;
+
+// DAG operator that interprets each DAG arg as a regex pattern for
+// matching Instruction opcode names.
+// The regex must match the beginning of the opcode (as in Python re.match).
+// To avoid matching prefixes, append '$' to the pattern.
+def instregex;
+
+// Define the SchedMachineModel and provide basic properties for
+// coarse grained instruction cost model. Default values for the
+// properties are defined in MCSchedModel. A value of "-1" in the
+// target description's SchedMachineModel indicates that the property
+// is not overriden by the target.
//
-// Default values for basic properties are defined in MCSchedModel. "-1"
-// indicates that the property is not overriden by the target description.
+// Target hooks allow subtargets to associate LoadLatency and
+// HighLatency with groups of opcodes.
class SchedMachineModel {
- int IssueWidth = -1; // Max instructions that may be scheduled per cycle.
+ int IssueWidth = -1; // Max micro-ops that may be scheduled per cycle.
int MinLatency = -1; // Determines which instrucions are allowed in a group.
// (-1) inorder (0) ooo, (1): inorder +var latencies.
int LoadLatency = -1; // Cycles for loads to access the cache.
int HighLatency = -1; // Approximation of cycles for "high latency" ops.
int MispredictPenalty = -1; // Extra cycles for a mispredicted branch.
+ // Per-cycle resources tables.
ProcessorItineraries Itineraries = NoItineraries;
bit NoModel = 0; // Special tag to indicate missing machine model.
@@ -38,4 +90,276 @@ def NoSchedModel : SchedMachineModel {
let NoModel = 1;
}
-// TODO: Define classes for processor and scheduler resources.
+// Define a kind of processor resource that may be common across
+// similar subtargets.
+class ProcResourceKind;
+
+// Define a number of interchangeable processor resources. NumUnits
+// determines the throughput of instructions that require the resource.
+//
+// An optional Super resource may be given to model these resources as
+// a subset of the more general super resources. Using one of these
+// resources implies using one of the super resoruces.
+//
+// ProcResourceUnits normally model a few buffered resources within an
+// out-of-order engine that the compiler attempts to conserve.
+// Buffered resources may be held for multiple clock cycles, but the
+// scheduler does not pin them to a particular clock cycle relative to
+// instruction dispatch. Setting Buffered=0 changes this to an
+// in-order resource. In this case, the scheduler counts down from the
+// cycle that the instruction issues in-order, forcing an interlock
+// with subsequent instructions that require the same resource until
+// the number of ResourceCyles specified in WriteRes expire.
+//
+// SchedModel ties these units to a processor for any stand-alone defs
+// of this class. Instances of subclass ProcResource will be automatically
+// attached to a processor, so SchedModel is not needed.
+class ProcResourceUnits<ProcResourceKind kind, int num> {
+ ProcResourceKind Kind = kind;
+ int NumUnits = num;
+ ProcResourceKind Super = ?;
+ bit Buffered = 1;
+ SchedMachineModel SchedModel = ?;
+}
+
+// EponymousProcResourceKind helps implement ProcResourceUnits by
+// allowing a ProcResourceUnits definition to reference itself. It
+// should not be referenced anywhere else.
+def EponymousProcResourceKind : ProcResourceKind;
+
+// Subtargets typically define processor resource kind and number of
+// units in one place.
+class ProcResource<int num> : ProcResourceKind,
+ ProcResourceUnits<EponymousProcResourceKind, num>;
+
+// A target architecture may define SchedReadWrite types and associate
+// them with instruction operands.
+class SchedReadWrite;
+
+// List the per-operand types that map to the machine model of an
+// instruction. One SchedWrite type must be listed for each explicit
+// def operand in order. Additional SchedWrite types may optionally be
+// listed for implicit def operands. SchedRead types may optionally
+// be listed for use operands in order. The order of defs relative to
+// uses is insignificant. This way, the same SchedReadWrite list may
+// be used for multiple forms of an operation. For example, a
+// two-address instruction could have two tied operands or single
+// operand that both reads and writes a reg. In both cases we have a
+// single SchedWrite and single SchedRead in any order.
+class Sched<list<SchedReadWrite> schedrw> {
+ list<SchedReadWrite> SchedRW = schedrw;
+}
+
+// Define a scheduler resource associated with a def operand.
+class SchedWrite : SchedReadWrite;
+def NoWrite : SchedWrite;
+
+// Define a scheduler resource associated with a use operand.
+class SchedRead : SchedReadWrite;
+
+// Define a SchedWrite that is modeled as a sequence of other
+// SchedWrites with additive latency. This allows a single operand to
+// be mapped the resources composed from a set of previously defined
+// SchedWrites.
+//
+// If the final write in this sequence is a SchedWriteVariant marked
+// Variadic, then the list of prior writes are distributed across all
+// operands after resolving the predicate for the final write.
+//
+// SchedModel silences warnings but is ignored.
+class WriteSequence<list<SchedWrite> writes, int rep = 1> : SchedWrite {
+ list<SchedWrite> Writes = writes;
+ int Repeat = rep;
+ SchedMachineModel SchedModel = ?;
+}
+
+// Define values common to WriteRes and SchedWriteRes.
+//
+// SchedModel ties these resources to a processor.
+class ProcWriteResources<list<ProcResourceKind> resources> {
+ list<ProcResourceKind> ProcResources = resources;
+ list<int> ResourceCycles = [];
+ int Latency = 1;
+ int NumMicroOps = 1;
+ bit BeginGroup = 0;
+ bit EndGroup = 0;
+ // Allow a processor to mark some scheduling classes as unsupported
+ // for stronger verification.
+ bit Unsupported = 0;
+ SchedMachineModel SchedModel = ?;
+}
+
+// Define the resources and latency of a SchedWrite. This will be used
+// directly by targets that have no itinerary classes. In this case,
+// SchedWrite is defined by the target, while WriteResources is
+// defined by the subtarget, and maps the SchedWrite to processor
+// resources.
+//
+// If a target already has itinerary classes, SchedWriteResources can
+// be used instead to define subtarget specific SchedWrites and map
+// them to processor resources in one place. Then ItinRW can map
+// itinerary classes to the subtarget's SchedWrites.
+//
+// ProcResources indicates the set of resources consumed by the write.
+// Optionally, ResourceCycles indicates the number of cycles the
+// resource is consumed. Each ResourceCycles item is paired with the
+// ProcResource item at the same position in its list. Since
+// ResourceCycles are rarely specialized, the list may be
+// incomplete. By default, resources are consumed for a single cycle,
+// regardless of latency, which models a fully pipelined processing
+// unit. A value of 0 for ResourceCycles means that the resource must
+// be available but is not consumed, which is only relevant for
+// unbuffered resources.
+//
+// By default, each SchedWrite takes one micro-op, which is counted
+// against the processor's IssueWidth limit. If an instruction can
+// write multiple registers with a single micro-op, the subtarget
+// should define one of the writes to be zero micro-ops. If a
+// subtarget requires multiple micro-ops to write a single result, it
+// should either override the write's NumMicroOps to be greater than 1
+// or require additional writes. Extra writes can be required either
+// by defining a WriteSequence, or simply listing extra writes in the
+// instruction's list of writers beyond the number of "def"
+// operands. The scheduler assumes that all micro-ops must be
+// dispatched in the same cycle. These micro-ops may be required to
+// begin or end the current dispatch group.
+class WriteRes<SchedWrite write, list<ProcResourceKind> resources>
+ : ProcWriteResources<resources> {
+ SchedWrite WriteType = write;
+}
+
+// Directly name a set of WriteResources defining a new SchedWrite
+// type at the same time. This class is unaware of its SchedModel so
+// must be referenced by InstRW or ItinRW.
+class SchedWriteRes<list<ProcResourceKind> resources> : SchedWrite,
+ ProcWriteResources<resources>;
+
+// Define values common to ReadAdvance and SchedReadAdvance.
+//
+// SchedModel ties these resources to a processor.
+class ProcReadAdvance<int cycles, list<SchedWrite> writes = []> {
+ int Cycles = cycles;
+ list<SchedWrite> ValidWrites = writes;
+ // Allow a processor to mark some scheduling classes as unsupported
+ // for stronger verification.
+ bit Unsupported = 0;
+ SchedMachineModel SchedModel = ?;
+}
+
+// A processor may define a ReadAdvance associated with a SchedRead
+// to reduce latency of a prior write by N cycles. A negative advance
+// effectively increases latency, which may be used for cross-domain
+// stalls.
+//
+// A ReadAdvance may be associated with a list of SchedWrites
+// to implement pipeline bypass. The Writes list may be empty to
+// indicate operands that are always read this number of Cycles later
+// than a normal register read, allowing the read's parent instruction
+// to issue earlier relative to the writer.
+class ReadAdvance<SchedRead read, int cycles, list<SchedWrite> writes = []>
+ : ProcReadAdvance<cycles, writes> {
+ SchedRead ReadType = read;
+}
+
+// Directly associate a new SchedRead type with a delay and optional
+// pipeline bypess. For use with InstRW or ItinRW.
+class SchedReadAdvance<int cycles, list<SchedWrite> writes = []> : SchedRead,
+ ProcReadAdvance<cycles, writes>;
+
+// Define SchedRead defaults. Reads seldom need special treatment.
+def ReadDefault : SchedRead;
+def NoReadAdvance : SchedReadAdvance<0>;
+
+// Define shared code that will be in the same scope as all
+// SchedPredicates. Available variables are:
+// (const MachineInstr *MI, const TargetSchedModel *SchedModel)
+class PredicateProlog<code c> {
+ code Code = c;
+}
+
+// Define a predicate to determine which SchedVariant applies to a
+// particular MachineInstr. The code snippet is used as an
+// if-statement's expression. Available variables are MI, SchedModel,
+// and anything defined in a PredicateProlog.
+//
+// SchedModel silences warnings but is ignored.
+class SchedPredicate<code pred> {
+ SchedMachineModel SchedModel = ?;
+ code Predicate = pred;
+}
+def NoSchedPred : SchedPredicate<[{true}]>;
+
+// Associate a predicate with a list of SchedReadWrites. By default,
+// the selected SchedReadWrites are still associated with a single
+// operand and assumed to execute sequentially with additive
+// latency. However, if the parent SchedWriteVariant or
+// SchedReadVariant is marked "Variadic", then each Selected
+// SchedReadWrite is mapped in place to the instruction's variadic
+// operands. In this case, latency is not additive. If the current Variant
+// is already part of a Sequence, then that entire chain leading up to
+// the Variant is distributed over the variadic operands.
+class SchedVar<SchedPredicate pred, list<SchedReadWrite> selected> {
+ SchedPredicate Predicate = pred;
+ list<SchedReadWrite> Selected = selected;
+}
+
+// SchedModel silences warnings but is ignored.
+class SchedVariant<list<SchedVar> variants> {
+ list<SchedVar> Variants = variants;
+ bit Variadic = 0;
+ SchedMachineModel SchedModel = ?;
+}
+
+// A SchedWriteVariant is a single SchedWrite type that maps to a list
+// of SchedWrite types under the conditions defined by its predicates.
+//
+// A Variadic write is expanded to cover multiple "def" operands. The
+// SchedVariant's Expansion list is then interpreted as one write
+// per-operand instead of the usual sequential writes feeding a single
+// operand.
+class SchedWriteVariant<list<SchedVar> variants> : SchedWrite,
+ SchedVariant<variants> {
+}
+
+// A SchedReadVariant is a single SchedRead type that maps to a list
+// of SchedRead types under the conditions defined by its predicates.
+//
+// A Variadic write is expanded to cover multiple "readsReg" operands as
+// explained above.
+class SchedReadVariant<list<SchedVar> variants> : SchedRead,
+ SchedVariant<variants> {
+}
+
+// Map a set of opcodes to a list of SchedReadWrite types. This allows
+// the subtarget to easily override specific operations.
+//
+// SchedModel ties this opcode mapping to a processor.
+class InstRW<list<SchedReadWrite> rw, dag instrlist> {
+ list<SchedReadWrite> OperandReadWrites = rw;
+ dag Instrs = instrlist;
+ SchedMachineModel SchedModel = ?;
+}
+
+// Map a set of itinerary classes to SchedReadWrite resources. This is
+// used to bootstrap a target (e.g. ARM) when itineraries already
+// exist and changing InstrInfo is undesirable.
+//
+// SchedModel ties this ItineraryClass mapping to a processor.
+class ItinRW<list<SchedReadWrite> rw, list<InstrItinClass> iic> {
+ list<InstrItinClass> MatchedItinClasses = iic;
+ list<SchedReadWrite> OperandReadWrites = rw;
+ SchedMachineModel SchedModel = ?;
+}
+
+// Alias a target-defined SchedReadWrite to a processor specific
+// SchedReadWrite. This allows a subtarget to easily map a
+// SchedReadWrite type onto a WriteSequence, SchedWriteVariant, or
+// SchedReadVariant.
+//
+// SchedModel will usually be provided by surrounding let statement
+// and ties this SchedAlias mapping to a processor.
+class SchedAlias<SchedReadWrite match, SchedReadWrite alias> {
+ SchedReadWrite MatchRW = match;
+ SchedReadWrite AliasRW = alias;
+ SchedMachineModel SchedModel = ?;
+}
diff --git a/include/llvm/Target/TargetSelectionDAG.td b/include/llvm/Target/TargetSelectionDAG.td
index 3f81c06bc0b6..83bd7874df76 100644
--- a/include/llvm/Target/TargetSelectionDAG.td
+++ b/include/llvm/Target/TargetSelectionDAG.td
@@ -445,9 +445,9 @@ def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", SDTAtomic2,
def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load : SDNode<"ISD::ATOMIC_LOAD", SDTAtomicLoad,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def atomic_store : SDNode<"ISD::ATOMIC_STORE", SDTAtomicStore,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
// Do not use ld, st directly. Use load, extload, sextload, zextload, store,
// and truncst (see below).
diff --git a/include/llvm/Target/TargetSelectionDAGInfo.h b/include/llvm/Target/TargetSelectionDAGInfo.h
index c9ca7223b5f5..96793bc036e7 100644
--- a/include/llvm/Target/TargetSelectionDAGInfo.h
+++ b/include/llvm/Target/TargetSelectionDAGInfo.h
@@ -20,7 +20,7 @@
namespace llvm {
-class TargetData;
+class DataLayout;
class TargetMachine;
//===----------------------------------------------------------------------===//
@@ -28,13 +28,13 @@ class TargetMachine;
/// SelectionDAG lowering and instruction selection process.
///
class TargetSelectionDAGInfo {
- TargetSelectionDAGInfo(const TargetSelectionDAGInfo &); // DO NOT IMPLEMENT
- void operator=(const TargetSelectionDAGInfo &); // DO NOT IMPLEMENT
+ TargetSelectionDAGInfo(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION;
- const TargetData *TD;
+ const DataLayout *TD;
protected:
- const TargetData *getTargetData() const { return TD; }
+ const DataLayout *getDataLayout() const { return TD; }
public:
explicit TargetSelectionDAGInfo(const TargetMachine &TM);
diff --git a/include/llvm/Target/TargetSubtargetInfo.h b/include/llvm/Target/TargetSubtargetInfo.h
index fc23b2c6b58d..6db96d980b5e 100644
--- a/include/llvm/Target/TargetSubtargetInfo.h
+++ b/include/llvm/Target/TargetSubtargetInfo.h
@@ -19,9 +19,11 @@
namespace llvm {
+class MachineInstr;
class SDep;
class SUnit;
class TargetRegisterClass;
+class TargetSchedModel;
template <typename T> class SmallVectorImpl;
//===----------------------------------------------------------------------===//
@@ -31,8 +33,8 @@ template <typename T> class SmallVectorImpl;
/// be exposed through a TargetSubtargetInfo-derived class.
///
class TargetSubtargetInfo : public MCSubtargetInfo {
- TargetSubtargetInfo(const TargetSubtargetInfo&); // DO NOT IMPLEMENT
- void operator=(const TargetSubtargetInfo&); // DO NOT IMPLEMENT
+ TargetSubtargetInfo(const TargetSubtargetInfo&) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetSubtargetInfo&) LLVM_DELETED_FUNCTION;
protected: // Can only create subclasses...
TargetSubtargetInfo();
public:
@@ -43,23 +45,26 @@ public:
virtual ~TargetSubtargetInfo();
- /// getSpecialAddressLatency - For targets where it is beneficial to
- /// backschedule instructions that compute addresses, return a value
- /// indicating the number of scheduling cycles of backscheduling that
- /// should be attempted.
- virtual unsigned getSpecialAddressLatency() const { return 0; }
+ /// Resolve a SchedClass at runtime, where SchedClass identifies an
+ /// MCSchedClassDesc with the isVariant property. This may return the ID of
+ /// another variant SchedClass, but repeated invocation must quickly terminate
+ /// in a nonvariant SchedClass.
+ virtual unsigned resolveSchedClass(unsigned SchedClass, const MachineInstr *MI,
+ const TargetSchedModel* SchedModel) const {
+ return 0;
+ }
// enablePostRAScheduler - If the target can benefit from post-regalloc
// scheduling and the specified optimization level meets the requirement
// return true to enable post-register-allocation scheduling. In
// CriticalPathRCs return any register classes that should only be broken
- // if on the critical path.
+ // if on the critical path.
virtual bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
AntiDepBreakMode& Mode,
RegClassVector& CriticalPathRCs) const;
// adjustSchedDependency - Perform target specific adjustments to
// the latency of a schedule dependency.
- virtual void adjustSchedDependency(SUnit *def, SUnit *use,
+ virtual void adjustSchedDependency(SUnit *def, SUnit *use,
SDep& dep) const { }
};
diff --git a/include/llvm/Target/TargetTransformImpl.h b/include/llvm/Target/TargetTransformImpl.h
new file mode 100644
index 000000000000..7ea2396076dc
--- /dev/null
+++ b/include/llvm/Target/TargetTransformImpl.h
@@ -0,0 +1,98 @@
+//=- llvm/Target/TargetTransformImpl.h - Target Loop Trans Info----*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the target-specific implementations of the
+// TargetTransform interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGET_TRANSFORMATION_IMPL_H
+#define LLVM_TARGET_TARGET_TRANSFORMATION_IMPL_H
+
+#include "llvm/TargetTransformInfo.h"
+#include "llvm/CodeGen/ValueTypes.h"
+
+namespace llvm {
+
+class TargetLowering;
+
+/// ScalarTargetTransformInfo - This is a default implementation for the
+/// ScalarTargetTransformInfo interface. Different targets can implement
+/// this interface differently.
+class ScalarTargetTransformImpl : public ScalarTargetTransformInfo {
+private:
+ const TargetLowering *TLI;
+
+public:
+ /// Ctor
+ explicit ScalarTargetTransformImpl(const TargetLowering *TL) : TLI(TL) {}
+
+ virtual bool isLegalAddImmediate(int64_t imm) const;
+
+ virtual bool isLegalICmpImmediate(int64_t imm) const;
+
+ virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
+
+ virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
+
+ virtual bool isTypeLegal(Type *Ty) const;
+
+ virtual unsigned getJumpBufAlignment() const;
+
+ virtual unsigned getJumpBufSize() const;
+
+ virtual bool shouldBuildLookupTables() const;
+};
+
+class VectorTargetTransformImpl : public VectorTargetTransformInfo {
+protected:
+ const TargetLowering *TLI;
+
+ /// Estimate the cost of type-legalization and the legalized type.
+ std::pair<unsigned, MVT> getTypeLegalizationCost(Type *Ty) const;
+
+ /// Estimate the overhead of scalarizing an instruction. Insert and Extract
+ /// are set if the result needs to be inserted and/or extracted from vectors.
+ unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
+
+ // Get the ISD node that corresponds to the Instruction class opcode.
+ int InstructionOpcodeToISD(unsigned Opcode) const;
+
+public:
+ explicit VectorTargetTransformImpl(const TargetLowering *TL) : TLI(TL) {}
+
+ virtual ~VectorTargetTransformImpl() {}
+
+ virtual unsigned getInstrCost(unsigned Opcode, Type *Ty1, Type *Ty2) const;
+
+ virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty) const;
+
+ virtual unsigned getBroadcastCost(Type *Tp) const;
+
+ virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
+ Type *Src) const;
+
+ virtual unsigned getCFInstrCost(unsigned Opcode) const;
+
+ virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
+ Type *CondTy) const;
+
+ virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
+ unsigned Index) const;
+
+ virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src,
+ unsigned Alignment,
+ unsigned AddressSpace) const;
+
+ virtual unsigned getNumberOfParts(Type *Tp) const;
+};
+
+} // end llvm namespace
+
+#endif