aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/Target/Mips/MipsISelLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Target/Mips/MipsISelLowering.cpp')
-rw-r--r--contrib/llvm-project/llvm/lib/Target/Mips/MipsISelLowering.cpp278
1 files changed, 146 insertions, 132 deletions
diff --git a/contrib/llvm-project/llvm/lib/Target/Mips/MipsISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 8534a0ad886e..88b226eaaccf 100644
--- a/contrib/llvm-project/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -40,6 +40,7 @@
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
@@ -63,7 +64,6 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MachineValueType.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
@@ -94,18 +94,6 @@ static const MCPhysReg Mips64DPRegs[8] = {
Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
};
-// If I is a shifted mask, set the size (Size) and the first bit of the
-// mask (Pos), and return true.
-// For example, if I is 0x003ff800, (Pos, Size) = (11, 11).
-static bool isShiftedMask(uint64_t I, uint64_t &Pos, uint64_t &Size) {
- if (!isShiftedMask_64(I))
- return false;
-
- Size = countPopulation(I);
- Pos = countTrailingZeros(I);
- return true;
-}
-
// The MIPS MSA ABI passes vector arguments in the integer register set.
// The number of integer registers used is dependant on the ABI used.
MVT MipsTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
@@ -114,29 +102,37 @@ MVT MipsTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
if (!VT.isVector())
return getRegisterType(Context, VT);
- return Subtarget.isABI_O32() || VT.getSizeInBits() == 32 ? MVT::i32
- : MVT::i64;
+ if (VT.isPow2VectorType() && VT.getVectorElementType().isRound())
+ return Subtarget.isABI_O32() || VT.getSizeInBits() == 32 ? MVT::i32
+ : MVT::i64;
+ return getRegisterType(Context, VT.getVectorElementType());
}
unsigned MipsTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const {
- if (VT.isVector())
- return divideCeil(VT.getSizeInBits(), Subtarget.isABI_O32() ? 32 : 64);
+ if (VT.isVector()) {
+ if (VT.isPow2VectorType() && VT.getVectorElementType().isRound())
+ return divideCeil(VT.getSizeInBits(), Subtarget.isABI_O32() ? 32 : 64);
+ return VT.getVectorNumElements() *
+ getNumRegisters(Context, VT.getVectorElementType());
+ }
return MipsTargetLowering::getNumRegisters(Context, VT);
}
unsigned MipsTargetLowering::getVectorTypeBreakdownForCallingConv(
LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
unsigned &NumIntermediates, MVT &RegisterVT) const {
- // Break down vector types to either 2 i64s or 4 i32s.
- RegisterVT = getRegisterTypeForCallingConv(Context, CC, VT);
- IntermediateVT = RegisterVT;
- NumIntermediates =
- VT.getFixedSizeInBits() < RegisterVT.getFixedSizeInBits()
- ? VT.getVectorNumElements()
- : divideCeil(VT.getSizeInBits(), RegisterVT.getSizeInBits());
- return NumIntermediates;
+ if (VT.isPow2VectorType()) {
+ IntermediateVT = getRegisterTypeForCallingConv(Context, CC, VT);
+ RegisterVT = IntermediateVT.getSimpleVT();
+ NumIntermediates = getNumRegistersForCallingConv(Context, CC, VT);
+ return NumIntermediates;
+ }
+ IntermediateVT = VT.getVectorElementType();
+ NumIntermediates = VT.getVectorNumElements();
+ RegisterVT = getRegisterType(Context, IntermediateVT);
+ return NumIntermediates * getNumRegisters(Context, IntermediateVT);
}
SDValue MipsTargetLowering::getGlobalReg(SelectionDAG &DAG, EVT Ty) const {
@@ -192,6 +188,7 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
case MipsISD::Ret: return "MipsISD::Ret";
case MipsISD::ERet: return "MipsISD::ERet";
case MipsISD::EH_RETURN: return "MipsISD::EH_RETURN";
+ case MipsISD::FAbs: return "MipsISD::FAbs";
case MipsISD::FMS: return "MipsISD::FMS";
case MipsISD::FPBrcond: return "MipsISD::FPBrcond";
case MipsISD::FPCmp: return "MipsISD::FPCmp";
@@ -353,15 +350,12 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
setOperationAction(ISD::SETCC, MVT::f32, Custom);
setOperationAction(ISD::SETCC, MVT::f64, Custom);
setOperationAction(ISD::BRCOND, MVT::Other, Custom);
+ setOperationAction(ISD::FABS, MVT::f32, Custom);
+ setOperationAction(ISD::FABS, MVT::f64, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
- if (!(TM.Options.NoNaNsFPMath || Subtarget.inAbs2008Mode())) {
- setOperationAction(ISD::FABS, MVT::f32, Custom);
- setOperationAction(ISD::FABS, MVT::f64, Custom);
- }
-
if (Subtarget.isGP64bit()) {
setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
@@ -494,15 +488,8 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
setOperationAction(ISD::TRAP, MVT::Other, Legal);
- setTargetDAGCombine(ISD::SDIVREM);
- setTargetDAGCombine(ISD::UDIVREM);
- setTargetDAGCombine(ISD::SELECT);
- setTargetDAGCombine(ISD::AND);
- setTargetDAGCombine(ISD::OR);
- setTargetDAGCombine(ISD::ADD);
- setTargetDAGCombine(ISD::SUB);
- setTargetDAGCombine(ISD::AssertZext);
- setTargetDAGCombine(ISD::SHL);
+ setTargetDAGCombine({ISD::SDIVREM, ISD::UDIVREM, ISD::SELECT, ISD::AND,
+ ISD::OR, ISD::ADD, ISD::SUB, ISD::AssertZext, ISD::SHL});
if (ABI.IsO32()) {
// These libcalls are not available in 32-bit.
@@ -514,6 +501,11 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
setLibcallName(RTLIB::MULO_I128, nullptr);
}
+ if (Subtarget.isGP64bit())
+ setMaxAtomicSizeInBitsSupported(64);
+ else
+ setMaxAtomicSizeInBitsSupported(32);
+
setMinFunctionAlignment(Subtarget.isGP64bit() ? Align(8) : Align(4));
// The arguments on the stack are defined in terms of 4-byte slots on O32
@@ -794,14 +786,15 @@ static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
EVT ValTy = N->getValueType(0);
SDLoc DL(N);
- uint64_t Pos = 0, SMPos, SMSize;
+ uint64_t Pos = 0;
+ unsigned SMPos, SMSize;
ConstantSDNode *CN;
SDValue NewOperand;
unsigned Opc;
// Op's second operand must be a shifted mask.
if (!(CN = dyn_cast<ConstantSDNode>(Mask)) ||
- !isShiftedMask(CN->getZExtValue(), SMPos, SMSize))
+ !isShiftedMask_64(CN->getZExtValue(), SMPos, SMSize))
return SDValue();
if (FirstOperandOpc == ISD::SRA || FirstOperandOpc == ISD::SRL) {
@@ -875,7 +868,7 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
SDValue And0 = N->getOperand(0), And1 = N->getOperand(1);
- uint64_t SMPos0, SMSize0, SMPos1, SMSize1;
+ unsigned SMPos0, SMSize0, SMPos1, SMSize1;
ConstantSDNode *CN, *CN1;
// See if Op's first operand matches (and $src1 , mask0).
@@ -883,7 +876,7 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
if (!(CN = dyn_cast<ConstantSDNode>(And0.getOperand(1))) ||
- !isShiftedMask(~CN->getSExtValue(), SMPos0, SMSize0))
+ !isShiftedMask_64(~CN->getSExtValue(), SMPos0, SMSize0))
return SDValue();
// See if Op's second operand matches (and (shl $src, pos), mask1).
@@ -891,7 +884,7 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
And1.getOperand(0).getOpcode() == ISD::SHL) {
if (!(CN = dyn_cast<ConstantSDNode>(And1.getOperand(1))) ||
- !isShiftedMask(CN->getZExtValue(), SMPos1, SMSize1))
+ !isShiftedMask_64(CN->getZExtValue(), SMPos1, SMSize1))
return SDValue();
// The shift masks must have the same position and size.
@@ -970,6 +963,14 @@ static SDValue performMADD_MSUBCombine(SDNode *ROOTNode, SelectionDAG &CurDAG,
ROOTNode->getOperand(1).getOpcode() != ISD::MUL)
return SDValue();
+ // In the case where we have a multiplication as the left operand of
+ // of a subtraction, we can't combine into a MipsISD::MSub node as the
+ // the instruction definition of msub(u) places the multiplication on
+ // on the right.
+ if (ROOTNode->getOpcode() == ISD::SUB &&
+ ROOTNode->getOperand(0).getOpcode() == ISD::MUL)
+ return SDValue();
+
// We don't handle vector types here.
if (ROOTNode->getValueType(0).isVector())
return SDValue();
@@ -1027,16 +1028,11 @@ static SDValue performMADD_MSUBCombine(SDNode *ROOTNode, SelectionDAG &CurDAG,
// Initialize accumulator.
SDLoc DL(ROOTNode);
- SDValue TopHalf;
- SDValue BottomHalf;
- BottomHalf = CurDAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, AddOperand,
- CurDAG.getIntPtrConstant(0, DL));
-
- TopHalf = CurDAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, AddOperand,
- CurDAG.getIntPtrConstant(1, DL));
- SDValue ACCIn = CurDAG.getNode(MipsISD::MTLOHI, DL, MVT::Untyped,
- BottomHalf,
- TopHalf);
+ SDValue BottomHalf, TopHalf;
+ std::tie(BottomHalf, TopHalf) =
+ CurDAG.SplitScalar(AddOperand, DL, MVT::i32, MVT::i32);
+ SDValue ACCIn =
+ CurDAG.getNode(MipsISD::MTLOHI, DL, MVT::Untyped, BottomHalf, TopHalf);
// Create MipsMAdd(u) / MipsMSub(u) node.
bool IsAdd = ROOTNode->getOpcode() == ISD::ADD;
@@ -1118,7 +1114,8 @@ static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG,
EVT ValTy = N->getValueType(0);
SDLoc DL(N);
- uint64_t Pos = 0, SMPos, SMSize;
+ uint64_t Pos = 0;
+ unsigned SMPos, SMSize;
ConstantSDNode *CN;
SDValue NewOperand;
@@ -1136,7 +1133,7 @@ static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG,
// AND's second operand must be a shifted mask.
if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))) ||
- !isShiftedMask(CN->getZExtValue(), SMPos, SMSize))
+ !isShiftedMask_64(CN->getZExtValue(), SMPos, SMSize))
return SDValue();
// Return if the shifted mask does not start at bit 0 or the sum of its size
@@ -1183,16 +1180,32 @@ SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
return SDValue();
}
-bool MipsTargetLowering::isCheapToSpeculateCttz() const {
+bool MipsTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
return Subtarget.hasMips32();
}
-bool MipsTargetLowering::isCheapToSpeculateCtlz() const {
+bool MipsTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
return Subtarget.hasMips32();
}
+bool MipsTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
+ // We can use ANDI+SLTIU as a bit test. Y contains the bit position.
+ // For MIPSR2 or later, we may be able to use the `ext` instruction or its'
+ // double-word variants.
+ if (auto *C = dyn_cast<ConstantSDNode>(Y))
+ return C->getAPIntValue().ule(15);
+
+ return false;
+}
+
bool MipsTargetLowering::shouldFoldConstantShiftPairToMask(
const SDNode *N, CombineLevel Level) const {
+ assert(((N->getOpcode() == ISD::SHL &&
+ N->getOperand(0).getOpcode() == ISD::SRL) ||
+ (N->getOpcode() == ISD::SRL &&
+ N->getOperand(0).getOpcode() == ISD::SHL)) &&
+ "Expected shift-shift mask");
+
if (N->getOperand(0).getValueType().isVector())
return false;
return true;
@@ -2029,8 +2042,7 @@ SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
return Op;
SDValue CCNode = CondRes.getOperand(2);
- Mips::CondCode CC =
- (Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue();
+ Mips::CondCode CC = (Mips::CondCode)CCNode->getAsZExtVal();
unsigned Opc = invertFPCondCodeUser(CC) ? Mips::BRANCH_F : Mips::BRANCH_T;
SDValue BrCode = DAG.getConstant(Opc, DL, MVT::i32);
SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
@@ -2421,11 +2433,14 @@ MipsTargetLowering::lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
return lowerFCOPYSIGN32(Op, DAG, Subtarget.hasExtractInsert());
}
-static SDValue lowerFABS32(SDValue Op, SelectionDAG &DAG,
- bool HasExtractInsert) {
+SDValue MipsTargetLowering::lowerFABS32(SDValue Op, SelectionDAG &DAG,
+ bool HasExtractInsert) const {
SDLoc DL(Op);
SDValue Res, Const1 = DAG.getConstant(1, DL, MVT::i32);
+ if (DAG.getTarget().Options.NoNaNsFPMath || Subtarget.inAbs2008Mode())
+ return DAG.getNode(MipsISD::FAbs, DL, Op.getValueType(), Op.getOperand(0));
+
// If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
// to i32.
SDValue X = (Op.getValueType() == MVT::f32)
@@ -2458,11 +2473,14 @@ static SDValue lowerFABS32(SDValue Op, SelectionDAG &DAG,
return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
}
-static SDValue lowerFABS64(SDValue Op, SelectionDAG &DAG,
- bool HasExtractInsert) {
+SDValue MipsTargetLowering::lowerFABS64(SDValue Op, SelectionDAG &DAG,
+ bool HasExtractInsert) const {
SDLoc DL(Op);
SDValue Res, Const1 = DAG.getConstant(1, DL, MVT::i32);
+ if (DAG.getTarget().Options.NoNaNsFPMath || Subtarget.inAbs2008Mode())
+ return DAG.getNode(MipsISD::FAbs, DL, Op.getValueType(), Op.getOperand(0));
+
// Bitcast to integer node.
SDValue X = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(0));
@@ -2489,7 +2507,7 @@ SDValue MipsTargetLowering::lowerFABS(SDValue Op, SelectionDAG &DAG) const {
SDValue MipsTargetLowering::
lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
// check the depth
- if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) {
+ if (Op.getConstantOperandVal(0) != 0) {
DAG.getContext()->emitError(
"return address can be determined only for current frame");
return SDValue();
@@ -2510,7 +2528,7 @@ SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
return SDValue();
// check the depth
- if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) {
+ if (Op.getConstantOperandVal(0) != 0) {
DAG.getContext()->emitError(
"return address can be determined only for current frame");
return SDValue();
@@ -2574,12 +2592,13 @@ SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
SDValue Shamt = Op.getOperand(2);
// if shamt < (VT.bits):
// lo = (shl lo, shamt)
- // hi = (or (shl hi, shamt) (srl (srl lo, 1), ~shamt))
+ // hi = (or (shl hi, shamt) (srl (srl lo, 1), (xor shamt, (VT.bits-1))))
// else:
// lo = 0
// hi = (shl lo, shamt[4:0])
- SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
- DAG.getConstant(-1, DL, MVT::i32));
+ SDValue Not =
+ DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
+ DAG.getConstant(VT.getSizeInBits() - 1, DL, MVT::i32));
SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo,
DAG.getConstant(1, DL, VT));
SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, Not);
@@ -2604,7 +2623,7 @@ SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
// if shamt < (VT.bits):
- // lo = (or (shl (shl hi, 1), ~shamt) (srl lo, shamt))
+ // lo = (or (shl (shl hi, 1), (xor shamt, (VT.bits-1))) (srl lo, shamt))
// if isSRA:
// hi = (sra hi, shamt)
// else:
@@ -2616,8 +2635,9 @@ SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
// else:
// lo = (srl hi, shamt[4:0])
// hi = 0
- SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
- DAG.getConstant(-1, DL, MVT::i32));
+ SDValue Not =
+ DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
+ DAG.getConstant(VT.getSizeInBits() - 1, DL, MVT::i32));
SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, VT, Hi,
DAG.getConstant(1, DL, VT));
SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, ShiftLeft1Hi, Not);
@@ -2673,7 +2693,7 @@ SDValue MipsTargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
return Op;
// Return if load is aligned or if MemVT is neither i32 nor i64.
- if ((LD->getAlignment() >= MemVT.getSizeInBits() / 8) ||
+ if ((LD->getAlign().value() >= (MemVT.getSizeInBits() / 8)) ||
((MemVT != MVT::i32) && (MemVT != MVT::i64)))
return SDValue();
@@ -2787,7 +2807,7 @@ static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG,
SDValue Tr = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Val), FPTy,
Val.getOperand(0));
return DAG.getStore(SD->getChain(), SDLoc(SD), Tr, SD->getBasePtr(),
- SD->getPointerInfo(), SD->getAlignment(),
+ SD->getPointerInfo(), SD->getAlign(),
SD->getMemOperand()->getFlags());
}
@@ -2797,7 +2817,7 @@ SDValue MipsTargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
// Lower unaligned integer stores.
if (!Subtarget.systemSupportsUnalignedAccess() &&
- (SD->getAlignment() < MemVT.getSizeInBits() / 8) &&
+ (SD->getAlign().value() < (MemVT.getSizeInBits() / 8)) &&
((MemVT == MVT::i32) || (MemVT == MVT::i64)))
return lowerUnalignedIntStore(SD, DAG, Subtarget.isLittle());
@@ -2929,8 +2949,6 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
Reg = State.AllocateReg(IntRegs);
LocVT = MVT::i32;
} else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
- LocVT = MVT::i32;
-
// Allocate int register and shadow next int register. If first
// available register is Mips::A1 or Mips::A3, shadow it too.
Reg = State.AllocateReg(IntRegs);
@@ -2938,6 +2956,8 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
Reg = State.AllocateReg(IntRegs);
if (Reg) {
+ LocVT = MVT::i32;
+
State.addLoc(
CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
MCRegister HiReg = State.AllocateReg(IntRegs);
@@ -3047,13 +3067,13 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
// Build a sequence of copy-to-reg nodes chained together with token
// chain and flag operands which copy the outgoing args into registers.
- // The InFlag in necessary since all emitted instructions must be
+ // The InGlue in necessary since all emitted instructions must be
// stuck together.
- SDValue InFlag;
+ SDValue InGlue;
for (auto &R : RegsToPass) {
- Chain = CLI.DAG.getCopyToReg(Chain, CLI.DL, R.first, R.second, InFlag);
- InFlag = Chain.getValue(1);
+ Chain = CLI.DAG.getCopyToReg(Chain, CLI.DL, R.first, R.second, InGlue);
+ InGlue = Chain.getValue(1);
}
// Add argument registers to the end of the list so that they are
@@ -3077,8 +3097,8 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
}
Ops.push_back(CLI.DAG.getRegisterMask(Mask));
- if (InFlag.getNode())
- Ops.push_back(InFlag);
+ if (InGlue.getNode())
+ Ops.push_back(InGlue);
}
void MipsTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
@@ -3208,7 +3228,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
ES ? ES->getSymbol() : nullptr);
// Get a count of how many bytes are to be pushed on the stack.
- unsigned NextStackOffset = CCInfo.getNextStackOffset();
+ unsigned StackSize = CCInfo.getStackSize();
// Call site info for function parameters tracking.
MachineFunction::CallSiteInfo CSInfo;
@@ -3218,8 +3238,8 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
bool InternalLinkage = false;
if (IsTailCall) {
IsTailCall = isEligibleForTailCallOptimization(
- CCInfo, NextStackOffset, *MF.getInfo<MipsFunctionInfo>());
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ CCInfo, StackSize, *MF.getInfo<MipsFunctionInfo>());
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
InternalLinkage = G->getGlobal()->hasInternalLinkage();
IsTailCall &= (InternalLinkage || G->getGlobal()->hasLocalLinkage() ||
G->getGlobal()->hasPrivateLinkage() ||
@@ -3238,11 +3258,10 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// ByValChain is the output chain of the last Memcpy node created for copying
// byval arguments to the stack.
unsigned StackAlignment = TFL->getStackAlignment();
- NextStackOffset = alignTo(NextStackOffset, StackAlignment);
- SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, DL, true);
+ StackSize = alignTo(StackSize, StackAlignment);
if (!(IsTailCall || MemcpyInByVal))
- Chain = DAG.getCALLSEQ_START(Chain, NextStackOffset, 0, DL);
+ Chain = DAG.getCALLSEQ_START(Chain, StackSize, 0, DL);
SDValue StackPtr =
DAG.getCopyFromReg(Chain, DL, ABI.IsN64() ? Mips::SP_64 : Mips::SP,
@@ -3312,19 +3331,19 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
break;
case CCValAssign::SExtUpper:
UseUpperBits = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CCValAssign::SExt:
Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, LocVT, Arg);
break;
case CCValAssign::ZExtUpper:
UseUpperBits = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CCValAssign::ZExt:
Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, LocVT, Arg);
break;
case CCValAssign::AExtUpper:
UseUpperBits = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CCValAssign::AExt:
Arg = DAG.getNode(ISD::ANY_EXTEND, DL, LocVT, Arg);
break;
@@ -3463,28 +3482,27 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
Chain = DAG.getNode(MipsISD::JmpLink, DL, NodeTys, Ops);
- SDValue InFlag = Chain.getValue(1);
+ SDValue InGlue = Chain.getValue(1);
DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
// Create the CALLSEQ_END node in the case of where it is not a call to
// memcpy.
if (!(MemcpyInByVal)) {
- Chain = DAG.getCALLSEQ_END(Chain, NextStackOffsetVal,
- DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
- InFlag = Chain.getValue(1);
+ Chain = DAG.getCALLSEQ_END(Chain, StackSize, 0, InGlue, DL);
+ InGlue = Chain.getValue(1);
}
// Handle result values, copying them out of physregs into vregs that we
// return.
- return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
+ return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, DL, DAG,
InVals, CLI);
}
/// LowerCallResult - Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers.
SDValue MipsTargetLowering::LowerCallResult(
- SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
+ SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
TargetLowering::CallLoweringInfo &CLI) const {
@@ -3504,9 +3522,9 @@ SDValue MipsTargetLowering::LowerCallResult(
assert(VA.isRegLoc() && "Can only return in registers!");
SDValue Val = DAG.getCopyFromReg(Chain, DL, RVLocs[i].getLocReg(),
- RVLocs[i].getLocVT(), InFlag);
+ RVLocs[i].getLocVT(), InGlue);
Chain = Val.getValue(1);
- InFlag = Val.getValue(2);
+ InGlue = Val.getValue(2);
if (VA.isUpperBitsInLoc()) {
unsigned ValSizeInBits = Ins[i].ArgVT.getSizeInBits();
@@ -3636,7 +3654,7 @@ SDValue MipsTargetLowering::LowerFormalArguments(
"Functions with the interrupt attribute cannot have arguments!");
CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg);
- MipsFI->setFormalArgInfo(CCInfo.getNextStackOffset(),
+ MipsFI->setFormalArgInfo(CCInfo.getStackSize(),
CCInfo.getInRegsParamsCount() > 0);
unsigned CurArgIdx = 0;
@@ -3706,15 +3724,6 @@ SDValue MipsTargetLowering::LowerFormalArguments(
assert(!VA.needsCustom() && "unexpected custom memory argument");
- if (ABI.IsO32()) {
- // We ought to be able to use LocVT directly but O32 sets it to i32
- // when allocating floating point values to integer registers.
- // This shouldn't influence how we load the value into registers unless
- // we are targeting softfloat.
- if (VA.getValVT().isFloatingPoint() && !Subtarget.useSoftFloat())
- LocVT = VA.getValVT();
- }
-
// Only arguments pased on the stack should make it here.
assert(VA.isMemLoc());
@@ -3823,7 +3832,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
// Analyze return values.
CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
- SDValue Flag;
+ SDValue Glue;
SmallVector<SDValue, 4> RetOps(1, Chain);
// Copy the result values into the output registers.
@@ -3843,19 +3852,19 @@ MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
break;
case CCValAssign::AExtUpper:
UseUpperBits = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CCValAssign::AExt:
Val = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Val);
break;
case CCValAssign::ZExtUpper:
UseUpperBits = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CCValAssign::ZExt:
Val = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Val);
break;
case CCValAssign::SExtUpper:
UseUpperBits = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CCValAssign::SExt:
Val = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Val);
break;
@@ -3869,10 +3878,10 @@ MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
}
- Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Flag);
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
// Guarantee that all emitted copies are stuck together with flags.
- Flag = Chain.getValue(1);
+ Glue = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
}
@@ -3890,16 +3899,16 @@ MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(DAG.getDataLayout()));
unsigned V0 = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
- Chain = DAG.getCopyToReg(Chain, DL, V0, Val, Flag);
- Flag = Chain.getValue(1);
+ Chain = DAG.getCopyToReg(Chain, DL, V0, Val, Glue);
+ Glue = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(V0, getPointerTy(DAG.getDataLayout())));
}
RetOps[0] = Chain; // Update chain.
- // Add the flag if we have it.
- if (Flag.getNode())
- RetOps.push_back(Flag);
+ // Add the glue if we have it.
+ if (Glue.getNode())
+ RetOps.push_back(Glue);
// ISRs must use "eret".
if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt"))
@@ -3974,7 +3983,7 @@ MipsTargetLowering::getSingleConstraintMatchWeight(
break;
case 'f': // FPU or MSA register
if (Subtarget.hasMSA() && type->isVectorTy() &&
- type->getPrimitiveSizeInBits().getFixedSize() == 128)
+ type->getPrimitiveSizeInBits().getFixedValue() == 128)
weight = CW_Register;
else if (type->isFloatTy())
weight = CW_Register;
@@ -4029,7 +4038,7 @@ static std::pair<bool, bool> parsePhysicalReg(StringRef C, StringRef &Prefix,
EVT MipsTargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
ISD::NodeType) const {
bool Cond = !Subtarget.isABI_O32() && VT.getSizeInBits() == 32;
- EVT MinVT = getRegisterType(Context, Cond ? MVT::i64 : MVT::i32);
+ EVT MinVT = getRegisterType(Cond ? MVT::i64 : MVT::i32);
return VT.bitsLT(MinVT) ? MinVT : VT;
}
@@ -4054,7 +4063,7 @@ parseRegForInlineAsmConstraint(StringRef C, MVT VT) const {
RC = TRI->getRegClass(Prefix == "hi" ?
Mips::HI32RegClassID : Mips::LO32RegClassID);
return std::make_pair(*(RC->begin()), RC);
- } else if (Prefix.startswith("$msa")) {
+ } else if (Prefix.starts_with("$msa")) {
// Parse $msa(ir|csr|access|save|modify|request|map|unmap)
// No numeric characters follow the name.
@@ -4119,14 +4128,18 @@ MipsTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
case 'd': // Address register. Same as 'r' unless generating MIPS16 code.
case 'y': // Same as 'r'. Exists for compatibility.
case 'r':
- if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8 || VT == MVT::i1) {
+ if ((VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8 ||
+ VT == MVT::i1) ||
+ (VT == MVT::f32 && Subtarget.useSoftFloat())) {
if (Subtarget.inMips16Mode())
return std::make_pair(0U, &Mips::CPU16RegsRegClass);
return std::make_pair(0U, &Mips::GPR32RegClass);
}
- if (VT == MVT::i64 && !Subtarget.isGP64bit())
+ if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat())) &&
+ !Subtarget.isGP64bit())
return std::make_pair(0U, &Mips::GPR32RegClass);
- if (VT == MVT::i64 && Subtarget.isGP64bit())
+ if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat())) &&
+ Subtarget.isGP64bit())
return std::make_pair(0U, &Mips::GPR64RegClass);
// This will generate an error message
return std::make_pair(0U, nullptr);
@@ -4181,14 +4194,15 @@ MipsTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops.
void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
- std::string &Constraint,
- std::vector<SDValue>&Ops,
- SelectionDAG &DAG) const {
+ StringRef Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const {
SDLoc DL(Op);
SDValue Result;
// Only support length 1 constraints for now.
- if (Constraint.length() > 1) return;
+ if (Constraint.size() > 1)
+ return;
char ConstraintLetter = Constraint[0];
switch (ConstraintLetter) {
@@ -4498,7 +4512,7 @@ void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
int VaArgOffset;
if (ArgRegs.size() == Idx)
- VaArgOffset = alignTo(State.getNextStackOffset(), RegSizeInBytes);
+ VaArgOffset = alignTo(State.getStackSize(), RegSizeInBytes);
else {
VaArgOffset =
(int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) -