aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp')
-rw-r--r--contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp101
1 files changed, 68 insertions, 33 deletions
diff --git a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index eb1e51341ec4..270134d84c61 100644
--- a/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -164,6 +164,8 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
++I) {
MVT VT = MVT::SimpleValueType(I);
if (isTypeLegal(VT)) {
+ setOperationAction(ISD::ABS, VT, Legal);
+
// Expand individual DIV and REMs into DIVREMs.
setOperationAction(ISD::SDIV, VT, Expand);
setOperationAction(ISD::UDIV, VT, Expand);
@@ -283,10 +285,13 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
// Give LowerOperation the chance to replace 64-bit ORs with subregs.
setOperationAction(ISD::OR, MVT::i64, Custom);
- // FIXME: Can we support these natively?
+ // Expand 128 bit shifts without using a libcall.
setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
+ setLibcallName(RTLIB::SRL_I128, nullptr);
+ setLibcallName(RTLIB::SHL_I128, nullptr);
+ setLibcallName(RTLIB::SRA_I128, nullptr);
// We have native instructions for i8, i16 and i32 extensions, but not i1.
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
@@ -358,6 +363,7 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SUB, VT, Legal);
if (VT != MVT::v2i64)
setOperationAction(ISD::MUL, VT, Legal);
+ setOperationAction(ISD::ABS, VT, Legal);
setOperationAction(ISD::AND, VT, Legal);
setOperationAction(ISD::OR, VT, Legal);
setOperationAction(ISD::XOR, VT, Legal);
@@ -784,10 +790,11 @@ bool SystemZVectorConstantInfo::isVectorConstantLegal(
SystemZVectorConstantInfo::SystemZVectorConstantInfo(APFloat FPImm) {
IntBits = FPImm.bitcastToAPInt().zextOrSelf(128);
isFP128 = (&FPImm.getSemantics() == &APFloat::IEEEquad());
-
- // Find the smallest splat.
SplatBits = FPImm.bitcastToAPInt();
unsigned Width = SplatBits.getBitWidth();
+ IntBits <<= (SystemZ::VectorBits - Width);
+
+ // Find the smallest splat.
while (Width > 8) {
unsigned HalfSize = Width / 2;
APInt HighValue = SplatBits.lshr(HalfSize).trunc(HalfSize);
@@ -981,16 +988,16 @@ bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL,
bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const {
if (!FromType->isIntegerTy() || !ToType->isIntegerTy())
return false;
- unsigned FromBits = FromType->getPrimitiveSizeInBits();
- unsigned ToBits = ToType->getPrimitiveSizeInBits();
+ unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedSize();
+ unsigned ToBits = ToType->getPrimitiveSizeInBits().getFixedSize();
return FromBits > ToBits;
}
bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const {
if (!FromVT.isInteger() || !ToVT.isInteger())
return false;
- unsigned FromBits = FromVT.getSizeInBits();
- unsigned ToBits = ToVT.getSizeInBits();
+ unsigned FromBits = FromVT.getFixedSizeInBits();
+ unsigned ToBits = ToVT.getFixedSizeInBits();
return FromBits > ToBits;
}
@@ -1543,6 +1550,7 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
bool IsVarArg = CLI.IsVarArg;
MachineFunction &MF = DAG.getMachineFunction();
EVT PtrVT = getPointerTy(MF.getDataLayout());
+ LLVMContext &Ctx = *DAG.getContext();
// Detect unsupported vector argument and return types.
if (Subtarget.hasVector()) {
@@ -1552,7 +1560,7 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
// Analyze the operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
- SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
+ SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, Ctx);
ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
// We don't support GuaranteedTailCallOpt, only automatically-detected
@@ -1577,14 +1585,25 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
if (VA.getLocInfo() == CCValAssign::Indirect) {
// Store the argument in a stack slot and pass its address.
- SDValue SpillSlot = DAG.CreateStackTemporary(Outs[I].ArgVT);
+ unsigned ArgIndex = Outs[I].OrigArgIndex;
+ EVT SlotVT;
+ if (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) {
+ // Allocate the full stack space for a promoted (and split) argument.
+ Type *OrigArgType = CLI.Args[Outs[I].OrigArgIndex].Ty;
+ EVT OrigArgVT = getValueType(MF.getDataLayout(), OrigArgType);
+ MVT PartVT = getRegisterTypeForCallingConv(Ctx, CLI.CallConv, OrigArgVT);
+ unsigned N = getNumRegistersForCallingConv(Ctx, CLI.CallConv, OrigArgVT);
+ SlotVT = EVT::getIntegerVT(Ctx, PartVT.getSizeInBits() * N);
+ } else {
+ SlotVT = Outs[I].ArgVT;
+ }
+ SDValue SpillSlot = DAG.CreateStackTemporary(SlotVT);
int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
MemOpChains.push_back(
DAG.getStore(Chain, DL, ArgValue, SpillSlot,
MachinePointerInfo::getFixedStack(MF, FI)));
// If the original argument was split (e.g. i128), we need
// to store all parts of it here (and pass just one address).
- unsigned ArgIndex = Outs[I].OrigArgIndex;
assert (Outs[I].PartOffset == 0);
while (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) {
SDValue PartValue = OutVals[I + 1];
@@ -1594,6 +1613,8 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
MemOpChains.push_back(
DAG.getStore(Chain, DL, PartValue, Address,
MachinePointerInfo::getFixedStack(MF, FI)));
+ assert((PartOffset + PartValue.getValueType().getStoreSize() <=
+ SlotVT.getStoreSize()) && "Not enough space for argument part!");
++I;
}
ArgValue = SpillSlot;
@@ -1687,7 +1708,7 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RetLocs;
- CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
+ CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx);
RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
// Copy all of the result registers out of their specified physreg.
@@ -2285,7 +2306,8 @@ static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL,
C.Op1.getOpcode() == ISD::Constant &&
cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
- if (L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueSizeInBits()) {
+ if (L->getMemoryVT().getStoreSizeInBits().getFixedSize() <=
+ C.Op0.getValueSizeInBits().getFixedSize()) {
unsigned Type = L->getExtensionType();
if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) ||
(Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) {
@@ -2958,7 +2980,7 @@ static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) {
// Return the absolute or negative absolute of Op; IsNegative decides which.
static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op,
bool IsNegative) {
- Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op);
+ Op = DAG.getNode(ISD::ABS, DL, Op.getValueType(), Op);
if (IsNegative)
Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(),
DAG.getConstant(0, DL, Op.getValueType()), Op);
@@ -3414,14 +3436,14 @@ lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
// If user has set the no alignment function attribute, ignore
// alloca alignments.
- uint64_t AlignVal = (RealignOpt ?
- dyn_cast<ConstantSDNode>(Align)->getZExtValue() : 0);
+ uint64_t AlignVal =
+ (RealignOpt ? cast<ConstantSDNode>(Align)->getZExtValue() : 0);
uint64_t StackAlign = TFI->getStackAlignment();
uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
- unsigned SPReg = getStackPointerRegisterToSaveRestore();
+ Register SPReg = getStackPointerRegisterToSaveRestore();
SDValue NeededSpace = Size;
// Get a reference to the stack pointer.
@@ -3430,7 +3452,8 @@ lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
// If we need a backchain, save it now.
SDValue Backchain;
if (StoreBackchain)
- Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
+ Backchain = DAG.getLoad(MVT::i64, DL, Chain, getBackchainAddress(OldSP, DAG),
+ MachinePointerInfo());
// Add extra space for alignment if needed.
if (ExtraAlignSpace)
@@ -3467,7 +3490,8 @@ lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
}
if (StoreBackchain)
- Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
+ Chain = DAG.getStore(Chain, DL, Backchain, getBackchainAddress(NewSP, DAG),
+ MachinePointerInfo());
SDValue Ops[2] = { Result, Chain };
return DAG.getMergeValues(Ops, DL);
@@ -4090,13 +4114,15 @@ SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
if (StoreBackchain) {
SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, MVT::i64);
- Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
+ Backchain = DAG.getLoad(MVT::i64, DL, Chain, getBackchainAddress(OldSP, DAG),
+ MachinePointerInfo());
}
Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R15D, NewSP);
if (StoreBackchain)
- Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
+ Chain = DAG.getStore(Chain, DL, Backchain, getBackchainAddress(NewSP, DAG),
+ MachinePointerInfo());
return Chain;
}
@@ -5557,7 +5583,6 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
OPCODE(TLS_LDCALL);
OPCODE(PCREL_WRAPPER);
OPCODE(PCREL_OFFSET);
- OPCODE(IABS);
OPCODE(ICMP);
OPCODE(FCMP);
OPCODE(STRICT_FCMP);
@@ -6815,8 +6840,7 @@ static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known,
DAG.computeKnownBits(Op.getOperand(OpNo), Src0DemE, Depth + 1);
KnownBits RHSKnown =
DAG.computeKnownBits(Op.getOperand(OpNo + 1), Src1DemE, Depth + 1);
- Known.Zero = LHSKnown.Zero & RHSKnown.Zero;
- Known.One = LHSKnown.One & RHSKnown.One;
+ Known = KnownBits::commonBits(LHSKnown, RHSKnown);
}
void
@@ -7246,6 +7270,15 @@ MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI,
StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp);
+ // ISel pattern matching also adds a load memory operand of the same
+ // address, so take special care to find the storing memory operand.
+ MachineMemOperand *MMO = nullptr;
+ for (auto *I : MI.memoperands())
+ if (I->isStore()) {
+ MMO = I;
+ break;
+ }
+
// Use STOCOpcode if possible. We could use different store patterns in
// order to avoid matching the index register, but the performance trade-offs
// might be more complicated in that case.
@@ -7253,15 +7286,6 @@ MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI,
if (Invert)
CCMask ^= CCValid;
- // ISel pattern matching also adds a load memory operand of the same
- // address, so take special care to find the storing memory operand.
- MachineMemOperand *MMO = nullptr;
- for (auto *I : MI.memoperands())
- if (I->isStore()) {
- MMO = I;
- break;
- }
-
BuildMI(*MBB, MI, DL, TII->get(STOCOpcode))
.addReg(SrcReg)
.add(Base)
@@ -7306,7 +7330,8 @@ MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI,
.addReg(SrcReg)
.add(Base)
.addImm(Disp)
- .addReg(IndexReg);
+ .addReg(IndexReg)
+ .addMemOperand(MMO);
MBB->addSuccessor(JoinMBB);
MI.eraseFromParent();
@@ -8140,6 +8165,16 @@ MachineBasicBlock *SystemZTargetLowering::emitProbedAlloca(
return DoneMBB;
}
+SDValue SystemZTargetLowering::
+getBackchainAddress(SDValue SP, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ auto *TFL =
+ static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering());
+ SDLoc DL(SP);
+ return DAG.getNode(ISD::ADD, DL, MVT::i64, SP,
+ DAG.getIntPtrConstant(TFL->getBackchainOffset(MF), DL));
+}
+
MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter(
MachineInstr &MI, MachineBasicBlock *MBB) const {
switch (MI.getOpcode()) {