aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp')
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp72
1 files changed, 42 insertions, 30 deletions
diff --git a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index be7ef6420193..51e4d036fe91 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -69,7 +69,7 @@ getTargetNodeName(unsigned Opcode) const
}
XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM)
- : TargetLowering(TM, new XCoreTargetObjectFile()), TM(TM),
+ : TargetLowering(TM), TM(TM),
Subtarget(TM.getSubtarget<XCoreSubtarget>()) {
// Set up the register classes.
@@ -127,12 +127,14 @@ XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM)
setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
// Loads
- setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
- setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
- setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand);
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Expand);
+ }
// Custom expand misaligned loads / stores.
setOperationAction(ISD::LOAD, MVT::i32, Custom);
@@ -426,7 +428,9 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
"Unexpected extension type");
assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
- if (allowsUnalignedMemoryAccesses(LD->getMemoryVT()))
+ if (allowsMisalignedMemoryAccesses(LD->getMemoryVT(),
+ LD->getAddressSpace(),
+ LD->getAlignment()))
return SDValue();
unsigned ABIAlignment = getDataLayout()->
@@ -461,14 +465,15 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
if (LD->getAlignment() == 2) {
SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain,
BasePtr, LD->getPointerInfo(), MVT::i16,
- LD->isVolatile(), LD->isNonTemporal(), 2);
+ LD->isVolatile(), LD->isNonTemporal(),
+ LD->isInvariant(), 2);
SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
DAG.getConstant(2, MVT::i32));
SDValue High = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
HighAddr,
LD->getPointerInfo().getWithOffset(2),
MVT::i16, LD->isVolatile(),
- LD->isNonTemporal(), 2);
+ LD->isNonTemporal(), LD->isInvariant(), 2);
SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
DAG.getConstant(16, MVT::i32));
SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
@@ -504,7 +509,9 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
StoreSDNode *ST = cast<StoreSDNode>(Op);
assert(!ST->isTruncatingStore() && "Unexpected store type");
assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
- if (allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
+ if (allowsMisalignedMemoryAccesses(ST->getMemoryVT(),
+ ST->getAddressSpace(),
+ ST->getAlignment())) {
return SDValue();
}
unsigned ABIAlignment = getDataLayout()->
@@ -800,7 +807,8 @@ SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
return SDValue();
MachineFunction &MF = DAG.getMachineFunction();
- const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo();
+ const TargetRegisterInfo *RegInfo =
+ getTargetMachine().getSubtargetImpl()->getRegisterInfo();
return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op),
RegInfo->getFrameRegister(MF), MVT::i32);
}
@@ -846,7 +854,8 @@ LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl(Op);
// Absolute SP = (FP + FrameToArgs) + Offset
- const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo();
+ const TargetRegisterInfo *RegInfo =
+ getTargetMachine().getSubtargetImpl()->getRegisterInfo();
SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
RegInfo->getFrameRegister(MF), MVT::i32);
SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl,
@@ -969,7 +978,7 @@ LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
N->getBasePtr(), N->getPointerInfo(),
N->isVolatile(), N->isNonTemporal(),
N->isInvariant(), N->getAlignment(),
- N->getTBAAInfo(), N->getRanges());
+ N->getAAInfo(), N->getRanges());
}
if (N->getMemoryVT() == MVT::i16) {
if (N->getAlignment() < 2)
@@ -977,13 +986,13 @@ LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
N->getBasePtr(), N->getPointerInfo(), MVT::i16,
N->isVolatile(), N->isNonTemporal(),
- N->getAlignment(), N->getTBAAInfo());
+ N->isInvariant(), N->getAlignment(), N->getAAInfo());
}
if (N->getMemoryVT() == MVT::i8)
return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
N->getBasePtr(), N->getPointerInfo(), MVT::i8,
N->isVolatile(), N->isNonTemporal(),
- N->getAlignment(), N->getTBAAInfo());
+ N->isInvariant(), N->getAlignment(), N->getAAInfo());
return SDValue();
}
@@ -999,7 +1008,7 @@ LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(),
N->getBasePtr(), N->getPointerInfo(),
N->isVolatile(), N->isNonTemporal(),
- N->getAlignment(), N->getTBAAInfo());
+ N->getAlignment(), N->getAAInfo());
}
if (N->getMemoryVT() == MVT::i16) {
if (N->getAlignment() < 2)
@@ -1007,13 +1016,13 @@ LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
N->getBasePtr(), N->getPointerInfo(), MVT::i16,
N->isVolatile(), N->isNonTemporal(),
- N->getAlignment(), N->getTBAAInfo());
+ N->getAlignment(), N->getAAInfo());
}
if (N->getMemoryVT() == MVT::i8)
return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
N->getBasePtr(), N->getPointerInfo(), MVT::i8,
N->isVolatile(), N->isNonTemporal(),
- N->getAlignment(), N->getTBAAInfo());
+ N->getAlignment(), N->getAAInfo());
return SDValue();
}
@@ -1118,8 +1127,8 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
+ *DAG.getContext());
// The ABI dictates there should be one stack slot available to the callee
// on function entry (for saving lr).
@@ -1129,8 +1138,8 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
SmallVector<CCValAssign, 16> RVLocs;
// Analyze return values to determine the number of bytes of stack required.
- CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
+ *DAG.getContext());
RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4);
RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
@@ -1284,8 +1293,8 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
+ *DAG.getContext());
CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
@@ -1443,7 +1452,7 @@ CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const {
SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context);
+ CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
if (!CCInfo.CheckReturn(Outs, RetCC_XCore))
return false;
if (CCInfo.getNextStackOffset() != 0 && isVarArg)
@@ -1467,8 +1476,8 @@ XCoreTargetLowering::LowerReturn(SDValue Chain,
SmallVector<CCValAssign, 16> RVLocs;
// CCState - Info about the registers and stack slot.
- CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
+ *DAG.getContext());
// Analyze return values.
if (!isVarArg)
@@ -1541,7 +1550,8 @@ XCoreTargetLowering::LowerReturn(SDValue Chain,
MachineBasicBlock *
XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const {
- const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo();
+ const TargetInstrInfo &TII =
+ *getTargetMachine().getSubtargetImpl()->getInstrInfo();
DebugLoc dl = MI->getDebugLoc();
assert((MI->getOpcode() == XCore::SELECT_CC) &&
"Unexpected instr type to insert");
@@ -1803,7 +1813,9 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
// Replace unaligned store of unaligned load with memmove.
StoreSDNode *ST = cast<StoreSDNode>(N);
if (!DCI.isBeforeLegalize() ||
- allowsUnalignedMemoryAccesses(ST->getMemoryVT()) ||
+ allowsMisalignedMemoryAccesses(ST->getMemoryVT(),
+ ST->getAddressSpace(),
+ ST->getAlignment()) ||
ST->isVolatile() || ST->isIndexed()) {
break;
}
@@ -1912,7 +1924,7 @@ XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
if (Ty->getTypeID() == Type::VoidTyID)
return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
- const DataLayout *TD = TM.getDataLayout();
+ const DataLayout *TD = TM.getSubtargetImpl()->getDataLayout();
unsigned Size = TD->getTypeAllocSize(Ty);
if (AM.BaseGV) {
return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&