aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp')
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp831
1 files changed, 579 insertions, 252 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 6638ff6a6358..d56d4bcc9169 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -436,14 +436,11 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
} else if (ValueVT.bitsLT(PartEVT)) {
- // Bitcast Val back the original type and extract the corresponding
- // vector we want.
- unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits();
- EVT WiderVecType = EVT::getVectorVT(*DAG.getContext(),
- ValueVT.getVectorElementType(), Elts);
- Val = DAG.getBitcast(WiderVecType, Val);
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
- DAG.getVectorIdxConstant(0, DL));
+ const uint64_t ValueSize = ValueVT.getFixedSizeInBits();
+ EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
+ // Drop the extra bits.
+ Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
+ return DAG.getBitcast(ValueVT, Val);
}
diagnosePossiblyInvalidConstraint(
@@ -610,30 +607,39 @@ static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
std::reverse(Parts, Parts + OrigNumParts);
}
-static SDValue widenVectorToPartType(SelectionDAG &DAG,
- SDValue Val, const SDLoc &DL, EVT PartVT) {
- if (!PartVT.isFixedLengthVector())
+static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val,
+ const SDLoc &DL, EVT PartVT) {
+ if (!PartVT.isVector())
return SDValue();
EVT ValueVT = Val.getValueType();
- unsigned PartNumElts = PartVT.getVectorNumElements();
- unsigned ValueNumElts = ValueVT.getVectorNumElements();
- if (PartNumElts > ValueNumElts &&
- PartVT.getVectorElementType() == ValueVT.getVectorElementType()) {
- EVT ElementVT = PartVT.getVectorElementType();
- // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
- // undef elements.
- SmallVector<SDValue, 16> Ops;
- DAG.ExtractVectorElements(Val, Ops);
- SDValue EltUndef = DAG.getUNDEF(ElementVT);
- for (unsigned i = ValueNumElts, e = PartNumElts; i != e; ++i)
- Ops.push_back(EltUndef);
+ ElementCount PartNumElts = PartVT.getVectorElementCount();
+ ElementCount ValueNumElts = ValueVT.getVectorElementCount();
+
+ // We only support widening vectors with equivalent element types and
+ // fixed/scalable properties. If a target needs to widen a fixed-length type
+ // to a scalable one, it should be possible to use INSERT_SUBVECTOR below.
+ if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
+ PartNumElts.isScalable() != ValueNumElts.isScalable() ||
+ PartVT.getVectorElementType() != ValueVT.getVectorElementType())
+ return SDValue();
- // FIXME: Use CONCAT for 2x -> 4x.
- return DAG.getBuildVector(PartVT, DL, Ops);
- }
+ // Widening a scalable vector to another scalable vector is done by inserting
+ // the vector into a larger undef one.
+ if (PartNumElts.isScalable())
+ return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
+ Val, DAG.getVectorIdxConstant(0, DL));
- return SDValue();
+ EVT ElementVT = PartVT.getVectorElementType();
+ // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
+ // undef elements.
+ SmallVector<SDValue, 16> Ops;
+ DAG.ExtractVectorElements(Val, Ops);
+ SDValue EltUndef = DAG.getUNDEF(ElementVT);
+ Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
+
+ // FIXME: Use CONCAT for 2x -> 4x.
+ return DAG.getBuildVector(PartVT, DL, Ops);
}
/// getCopyToPartsVector - Create a series of nodes that contain the specified
@@ -714,13 +720,25 @@ static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
EVT BuiltVectorTy = EVT::getVectorVT(
*DAG.getContext(), IntermediateVT.getScalarType(), DestEltCnt.getValue());
- if (ValueVT != BuiltVectorTy) {
- if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy))
- Val = Widened;
+ if (ValueVT == BuiltVectorTy) {
+ // Nothing to do.
+ } else if (ValueVT.getSizeInBits() == BuiltVectorTy.getSizeInBits()) {
+ // Bitconvert vector->vector case.
Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
+ } else if (SDValue Widened =
+ widenVectorToPartType(DAG, Val, DL, BuiltVectorTy)) {
+ Val = Widened;
+ } else if (BuiltVectorTy.getVectorElementType().bitsGE(
+ ValueVT.getVectorElementType()) &&
+ BuiltVectorTy.getVectorElementCount() ==
+ ValueVT.getVectorElementCount()) {
+ // Promoted vector extract
+ Val = DAG.getAnyExtOrTrunc(Val, DL, BuiltVectorTy);
}
+ assert(Val.getValueType() == BuiltVectorTy && "Unexpected vector value type");
+
// Split the vector into intermediate operands.
SmallVector<SDValue, 8> Ops(NumIntermediates);
for (unsigned i = 0; i != NumIntermediates; ++i) {
@@ -970,8 +988,9 @@ void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
}
for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
- unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
MVT RegisterVT = RegVTs[Value];
+ unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value],
+ RegisterVT);
for (unsigned i = 0; i != NumRegs; ++i) {
assert(Reg < Regs.size() && "Mismatch in # registers expected");
unsigned TheReg = Regs[Reg++];
@@ -1119,6 +1138,33 @@ void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
}
}
+void SelectionDAGBuilder::addDanglingDebugInfo(const DbgValueInst *DI,
+ DebugLoc DL, unsigned Order) {
+ // We treat variadic dbg_values differently at this stage.
+ if (DI->hasArgList()) {
+ // For variadic dbg_values we will now insert an undef.
+ // FIXME: We can potentially recover these!
+ SmallVector<SDDbgOperand, 2> Locs;
+ for (const Value *V : DI->getValues()) {
+ auto Undef = UndefValue::get(V->getType());
+ Locs.push_back(SDDbgOperand::fromConst(Undef));
+ }
+ SDDbgValue *SDV = DAG.getDbgValueList(
+ DI->getVariable(), DI->getExpression(), Locs, {},
+ /*IsIndirect=*/false, DL, Order, /*IsVariadic=*/true);
+ DAG.AddDbgValue(SDV, /*isParameter=*/false);
+ } else {
+ // TODO: Dangling debug info will eventually either be resolved or produce
+ // an Undef DBG_VALUE. However in the resolution case, a gap may appear
+ // between the original dbg.value location and its resolved DBG_VALUE,
+ // which we should ideally fill with an extra Undef DBG_VALUE.
+ assert(DI->getNumVariableLocationOps() == 1 &&
+ "DbgValueInst without an ArgList should have a single location "
+ "operand.");
+ DanglingDebugInfoMap[DI->getValue(0)].emplace_back(DI, DL, Order);
+ }
+}
+
void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
const DIExpression *Expr) {
auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
@@ -1156,6 +1202,7 @@ void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
for (auto &DDI : DDIV) {
const DbgValueInst *DI = DDI.getDI();
+ assert(!DI->hasArgList() && "Not implemented for variadic dbg_values");
assert(DI && "Ill-formed DanglingDebugInfo");
DebugLoc dl = DDI.getdl();
unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
@@ -1185,37 +1232,41 @@ void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
<< ValSDNodeOrder << "\n");
SDV = getDbgValue(Val, Variable, Expr, dl,
std::max(DbgSDNodeOrder, ValSDNodeOrder));
- DAG.AddDbgValue(SDV, Val.getNode(), false);
+ DAG.AddDbgValue(SDV, false);
} else
LLVM_DEBUG(dbgs() << "Resolved dangling debug info for " << *DI
<< "in EmitFuncArgumentDbgValue\n");
} else {
LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
- auto Undef =
- UndefValue::get(DDI.getDI()->getVariableLocation()->getType());
+ auto Undef = UndefValue::get(DDI.getDI()->getValue(0)->getType());
auto SDV =
DAG.getConstantDbgValue(Variable, Expr, Undef, dl, DbgSDNodeOrder);
- DAG.AddDbgValue(SDV, nullptr, false);
+ DAG.AddDbgValue(SDV, false);
}
}
DDIV.clear();
}
void SelectionDAGBuilder::salvageUnresolvedDbgValue(DanglingDebugInfo &DDI) {
- Value *V = DDI.getDI()->getValue();
+ // TODO: For the variadic implementation, instead of only checking the fail
+ // state of `handleDebugValue`, we need know specifically which values were
+ // invalid, so that we attempt to salvage only those values when processing
+ // a DIArgList.
+ assert(!DDI.getDI()->hasArgList() &&
+ "Not implemented for variadic dbg_values");
+ Value *V = DDI.getDI()->getValue(0);
DILocalVariable *Var = DDI.getDI()->getVariable();
DIExpression *Expr = DDI.getDI()->getExpression();
DebugLoc DL = DDI.getdl();
DebugLoc InstDL = DDI.getDI()->getDebugLoc();
unsigned SDOrder = DDI.getSDNodeOrder();
-
// Currently we consider only dbg.value intrinsics -- we tell the salvager
// that DW_OP_stack_value is desired.
assert(isa<DbgValueInst>(DDI.getDI()));
bool StackValue = true;
// Can this Value can be encoded without any further work?
- if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder))
+ if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder, /*IsVariadic=*/false))
return;
// Attempt to salvage back through as many instructions as possible. Bail if
@@ -1223,20 +1274,27 @@ void SelectionDAGBuilder::salvageUnresolvedDbgValue(DanglingDebugInfo &DDI) {
// variable. FIXME: Further work could recover those too.
while (isa<Instruction>(V)) {
Instruction &VAsInst = *cast<Instruction>(V);
- DIExpression *NewExpr = salvageDebugInfoImpl(VAsInst, Expr, StackValue);
+ // Temporary "0", awaiting real implementation.
+ SmallVector<Value *, 4> AdditionalValues;
+ DIExpression *SalvagedExpr =
+ salvageDebugInfoImpl(VAsInst, Expr, StackValue, 0, AdditionalValues);
// If we cannot salvage any further, and haven't yet found a suitable debug
// expression, bail out.
- if (!NewExpr)
+ // TODO: If AdditionalValues isn't empty, then the salvage can only be
+ // represented with a DBG_VALUE_LIST, so we give up. When we have support
+ // here for variadic dbg_values, remove that condition.
+ if (!SalvagedExpr || !AdditionalValues.empty())
break;
// New value and expr now represent this debuginfo.
V = VAsInst.getOperand(0);
- Expr = NewExpr;
+ Expr = SalvagedExpr;
// Some kind of simplification occurred: check whether the operand of the
// salvaged debug expression can be encoded in this DAG.
- if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder)) {
+ if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder,
+ /*IsVariadic=*/false)) {
LLVM_DEBUG(dbgs() << "Salvaged debug location info for:\n "
<< DDI.getDI() << "\nBy stripping back to:\n " << V);
return;
@@ -1246,9 +1304,9 @@ void SelectionDAGBuilder::salvageUnresolvedDbgValue(DanglingDebugInfo &DDI) {
// This was the final opportunity to salvage this debug information, and it
// couldn't be done. Place an undef DBG_VALUE at this location to terminate
// any earlier variable location.
- auto Undef = UndefValue::get(DDI.getDI()->getVariableLocation()->getType());
+ auto Undef = UndefValue::get(DDI.getDI()->getValue(0)->getType());
auto SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder);
- DAG.AddDbgValue(SDV, nullptr, false);
+ DAG.AddDbgValue(SDV, false);
LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n " << DDI.getDI()
<< "\n");
@@ -1256,53 +1314,72 @@ void SelectionDAGBuilder::salvageUnresolvedDbgValue(DanglingDebugInfo &DDI) {
<< "\n");
}
-bool SelectionDAGBuilder::handleDebugValue(const Value *V, DILocalVariable *Var,
+bool SelectionDAGBuilder::handleDebugValue(ArrayRef<const Value *> Values,
+ DILocalVariable *Var,
DIExpression *Expr, DebugLoc dl,
- DebugLoc InstDL, unsigned Order) {
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- SDDbgValue *SDV;
- if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
- isa<ConstantPointerNull>(V)) {
- SDV = DAG.getConstantDbgValue(Var, Expr, V, dl, SDNodeOrder);
- DAG.AddDbgValue(SDV, nullptr, false);
+ DebugLoc InstDL, unsigned Order,
+ bool IsVariadic) {
+ if (Values.empty())
return true;
- }
+ SmallVector<SDDbgOperand> LocationOps;
+ SmallVector<SDNode *> Dependencies;
+ for (const Value *V : Values) {
+ // Constant value.
+ if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
+ isa<ConstantPointerNull>(V)) {
+ LocationOps.emplace_back(SDDbgOperand::fromConst(V));
+ continue;
+ }
- // If the Value is a frame index, we can create a FrameIndex debug value
- // without relying on the DAG at all.
- if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
- auto SI = FuncInfo.StaticAllocaMap.find(AI);
- if (SI != FuncInfo.StaticAllocaMap.end()) {
- auto SDV =
- DAG.getFrameIndexDbgValue(Var, Expr, SI->second,
- /*IsIndirect*/ false, dl, SDNodeOrder);
- // Do not attach the SDNodeDbgValue to an SDNode: this variable location
- // is still available even if the SDNode gets optimized out.
- DAG.AddDbgValue(SDV, nullptr, false);
- return true;
+ // If the Value is a frame index, we can create a FrameIndex debug value
+ // without relying on the DAG at all.
+ if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
+ auto SI = FuncInfo.StaticAllocaMap.find(AI);
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(SI->second));
+ continue;
+ }
}
- }
- // Do not use getValue() in here; we don't want to generate code at
- // this point if it hasn't been done yet.
- SDValue N = NodeMap[V];
- if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
- N = UnusedArgNodeMap[V];
- if (N.getNode()) {
- if (EmitFuncArgumentDbgValue(V, Var, Expr, dl, false, N))
- return true;
- SDV = getDbgValue(N, Var, Expr, dl, SDNodeOrder);
- DAG.AddDbgValue(SDV, N.getNode(), false);
- return true;
- }
+ // Do not use getValue() in here; we don't want to generate code at
+ // this point if it hasn't been done yet.
+ SDValue N = NodeMap[V];
+ if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
+ N = UnusedArgNodeMap[V];
+ if (N.getNode()) {
+ // Only emit func arg dbg value for non-variadic dbg.values for now.
+ if (!IsVariadic && EmitFuncArgumentDbgValue(V, Var, Expr, dl, false, N))
+ return true;
+ if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
+ // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can
+ // describe stack slot locations.
+ //
+ // Consider "int x = 0; int *px = &x;". There are two kinds of
+ // interesting debug values here after optimization:
+ //
+ // dbg.value(i32* %px, !"int *px", !DIExpression()), and
+ // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
+ //
+ // Both describe the direct values of their associated variables.
+ Dependencies.push_back(N.getNode());
+ LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(FISDN->getIndex()));
+ continue;
+ }
+ LocationOps.emplace_back(
+ SDDbgOperand::fromNode(N.getNode(), N.getResNo()));
+ continue;
+ }
+
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ // Special rules apply for the first dbg.values of parameter variables in a
+ // function. Identify them by the fact they reference Argument Values, that
+ // they're parameters, and they are parameters of the current function. We
+ // need to let them dangle until they get an SDNode.
+ bool IsParamOfFunc =
+ isa<Argument>(V) && Var->isParameter() && !InstDL.getInlinedAt();
+ if (IsParamOfFunc)
+ return false;
- // Special rules apply for the first dbg.values of parameter variables in a
- // function. Identify them by the fact they reference Argument Values, that
- // they're parameters, and they are parameters of the current function. We
- // need to let them dangle until they get an SDNode.
- bool IsParamOfFunc = isa<Argument>(V) && Var->isParameter() &&
- !InstDL.getInlinedAt();
- if (!IsParamOfFunc) {
// The value is not used in this block yet (or it would have an SDNode).
// We still want the value to appear for the user if possible -- if it has
// an associated VReg, we can refer to that instead.
@@ -1314,6 +1391,9 @@ bool SelectionDAGBuilder::handleDebugValue(const Value *V, DILocalVariable *Var,
RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
V->getType(), None);
if (RFV.occupiesMultipleRegs()) {
+ // FIXME: We could potentially support variadic dbg_values here.
+ if (IsVariadic)
+ return false;
unsigned Offset = 0;
unsigned BitsToDescribe = 0;
if (auto VarSize = Var->getSizeInBits())
@@ -1321,31 +1401,41 @@ bool SelectionDAGBuilder::handleDebugValue(const Value *V, DILocalVariable *Var,
if (auto Fragment = Expr->getFragmentInfo())
BitsToDescribe = Fragment->SizeInBits;
for (auto RegAndSize : RFV.getRegsAndSizes()) {
- unsigned RegisterSize = RegAndSize.second;
// Bail out if all bits are described already.
if (Offset >= BitsToDescribe)
break;
+ // TODO: handle scalable vectors.
+ unsigned RegisterSize = RegAndSize.second;
unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
- ? BitsToDescribe - Offset
- : RegisterSize;
+ ? BitsToDescribe - Offset
+ : RegisterSize;
auto FragmentExpr = DIExpression::createFragmentExpression(
Expr, Offset, FragmentSize);
if (!FragmentExpr)
- continue;
- SDV = DAG.getVRegDbgValue(Var, *FragmentExpr, RegAndSize.first,
- false, dl, SDNodeOrder);
- DAG.AddDbgValue(SDV, nullptr, false);
+ continue;
+ SDDbgValue *SDV = DAG.getVRegDbgValue(
+ Var, *FragmentExpr, RegAndSize.first, false, dl, SDNodeOrder);
+ DAG.AddDbgValue(SDV, false);
Offset += RegisterSize;
}
- } else {
- SDV = DAG.getVRegDbgValue(Var, Expr, Reg, false, dl, SDNodeOrder);
- DAG.AddDbgValue(SDV, nullptr, false);
+ return true;
}
- return true;
+ // We can use simple vreg locations for variadic dbg_values as well.
+ LocationOps.emplace_back(SDDbgOperand::fromVReg(Reg));
+ continue;
}
+ // We failed to create a SDDbgOperand for V.
+ return false;
}
- return false;
+ // We have created a SDDbgOperand for each Value in Values.
+ // Should use Order instead of SDNodeOrder?
+ assert(!LocationOps.empty());
+ SDDbgValue *SDV =
+ DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
+ /*IsIndirect=*/false, dl, SDNodeOrder, IsVariadic);
+ DAG.AddDbgValue(SDV, /*isParameter=*/false);
+ return true;
}
void SelectionDAGBuilder::resolveOrClearDbgInfo() {
@@ -1458,9 +1548,8 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
SmallVector<SDValue, 4> Constants;
- for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
- OI != OE; ++OI) {
- SDNode *Val = getValue(*OI).getNode();
+ for (const Use &U : C->operands()) {
+ SDNode *Val = getValue(U).getNode();
// If the operand is an empty aggregate, there are no values.
if (!Val) continue;
// Add each leaf value from the operand to the Constants list
@@ -1592,6 +1681,8 @@ void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
// Update machine-CFG edge.
MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
FuncInfo.MBB->addSuccessor(TargetMBB);
+ TargetMBB->setIsEHCatchretTarget(true);
+ DAG.getMachineFunction().setHasEHCatchret(true);
auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
bool IsSEH = isAsynchronousEHPersonality(Pers);
@@ -1851,7 +1942,7 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
I.getOperand(0)->getType(), F->getCallingConv(),
- /*IsVarArg*/ false);
+ /*IsVarArg*/ false, DL);
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
@@ -1991,7 +2082,7 @@ bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
// If this is an argument, we can export it if the BB is the entry block or
// if it is already exported.
if (isa<Argument>(V)) {
- if (FromBB == &FromBB->getParent()->getEntryBlock())
+ if (FromBB->isEntryBlock())
return true;
// Otherwise, can only export this if it is already exported.
@@ -2782,23 +2873,27 @@ void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
// Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
// have to do anything here to lower funclet bundles.
- assert(!I.hasOperandBundlesOtherThan({LLVMContext::OB_deopt,
- LLVMContext::OB_gc_transition,
- LLVMContext::OB_gc_live,
- LLVMContext::OB_funclet,
- LLVMContext::OB_cfguardtarget}) &&
+ assert(!I.hasOperandBundlesOtherThan(
+ {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition,
+ LLVMContext::OB_gc_live, LLVMContext::OB_funclet,
+ LLVMContext::OB_cfguardtarget,
+ LLVMContext::OB_clang_arc_attachedcall}) &&
"Cannot lower invokes with arbitrary operand bundles yet!");
const Value *Callee(I.getCalledOperand());
const Function *Fn = dyn_cast<Function>(Callee);
if (isa<InlineAsm>(Callee))
- visitInlineAsm(I);
+ visitInlineAsm(I, EHPadBB);
else if (Fn && Fn->isIntrinsic()) {
switch (Fn->getIntrinsicID()) {
default:
llvm_unreachable("Cannot invoke this intrinsic");
case Intrinsic::donothing:
// Ignore invokes to @llvm.donothing: jump directly to the next BB.
+ case Intrinsic::seh_try_begin:
+ case Intrinsic::seh_scope_begin:
+ case Intrinsic::seh_try_end:
+ case Intrinsic::seh_scope_end:
break;
case Intrinsic::experimental_patchpoint_void:
case Intrinsic::experimental_patchpoint_i64:
@@ -2829,7 +2924,7 @@ void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
// with deopt state.
LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
} else {
- LowerCallTo(I, getValue(Callee), false, EHPadBB);
+ LowerCallTo(I, getValue(Callee), false, false, EHPadBB);
}
// If the value of the invoke is used outside of its defining block, make it
@@ -4273,7 +4368,7 @@ static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index,
Base = SDB->getValue(C);
- unsigned NumElts = cast<FixedVectorType>(Ptr->getType())->getNumElements();
+ ElementCount NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts);
Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT);
IndexType = ISD::SIGNED_SCALED;
@@ -4314,7 +4409,7 @@ void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
EVT VT = Src0.getValueType();
Align Alignment = cast<ConstantInt>(I.getArgOperand(2))
->getMaybeAlignValue()
- .getValueOr(DAG.getEVTAlign(VT));
+ .getValueOr(DAG.getEVTAlign(VT.getScalarType()));
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
AAMDNodes AAInfo;
@@ -4339,6 +4434,14 @@ void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
IndexType = ISD::SIGNED_UNSCALED;
Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
}
+
+ EVT IdxVT = Index.getValueType();
+ EVT EltTy = IdxVT.getVectorElementType();
+ if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
+ EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
+ Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
+ }
+
SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
Ops, MMO, IndexType, false);
@@ -4424,7 +4527,7 @@ void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
Align Alignment = cast<ConstantInt>(I.getArgOperand(1))
->getMaybeAlignValue()
- .getValueOr(DAG.getEVTAlign(VT));
+ .getValueOr(DAG.getEVTAlign(VT.getScalarType()));
AAMDNodes AAInfo;
I.getAAMetadata(AAInfo);
@@ -4450,6 +4553,14 @@ void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
IndexType = ISD::SIGNED_UNSCALED;
Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
}
+
+ EVT IdxVT = Index.getValueType();
+ EVT EltTy = IdxVT.getVectorElementType();
+ if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
+ EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
+ Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
+ }
+
SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
Ops, MMO, IndexType, ISD::NON_EXTLOAD);
@@ -4702,6 +4813,12 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
SDVTList VTs = DAG.getVTList(ValueVTs);
+ // Propagate fast-math-flags from IR to node(s).
+ SDNodeFlags Flags;
+ if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
+ Flags.copyFMF(*FPMO);
+ SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
+
// Create the node.
SDValue Result;
if (IsTgtIntrinsic) {
@@ -5377,6 +5494,8 @@ getUnderlyingArgRegs(SmallVectorImpl<std::pair<unsigned, TypeSize>> &Regs,
/// If the DbgValueInst is a dbg_value of a function argument, create the
/// corresponding DBG_VALUE machine instruction for it now. At the end of
/// instruction selection, they will be inserted to the entry BB.
+/// We don't currently support this for variadic dbg_values, as they shouldn't
+/// appear for function arguments or in the prologue.
bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
const Value *V, DILocalVariable *Variable, DIExpression *Expr,
DILocation *DL, bool IsDbgDeclare, const SDValue &N) {
@@ -5384,6 +5503,35 @@ bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
if (!Arg)
return false;
+ MachineFunction &MF = DAG.getMachineFunction();
+ const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
+
+ // Helper to create DBG_INSTR_REFs or DBG_VALUEs, depending on what kind
+ // we've been asked to pursue.
+ auto MakeVRegDbgValue = [&](Register Reg, DIExpression *FragExpr,
+ bool Indirect) {
+ if (Reg.isVirtual() && TM.Options.ValueTrackingVariableLocations) {
+ // For VRegs, in instruction referencing mode, create a DBG_INSTR_REF
+ // pointing at the VReg, which will be patched up later.
+ auto &Inst = TII->get(TargetOpcode::DBG_INSTR_REF);
+ auto MIB = BuildMI(MF, DL, Inst);
+ MIB.addReg(Reg, RegState::Debug);
+ MIB.addImm(0);
+ MIB.addMetadata(Variable);
+ auto *NewDIExpr = FragExpr;
+ // We don't have an "Indirect" field in DBG_INSTR_REF, fold that into
+ // the DIExpression.
+ if (Indirect)
+ NewDIExpr = DIExpression::prepend(FragExpr, DIExpression::DerefBefore);
+ MIB.addMetadata(NewDIExpr);
+ return MIB;
+ } else {
+ // Create a completely standard DBG_VALUE.
+ auto &Inst = TII->get(TargetOpcode::DBG_VALUE);
+ return BuildMI(MF, DL, Inst, Indirect, Reg, Variable, FragExpr);
+ }
+ };
+
if (!IsDbgDeclare) {
// ArgDbgValues are hoisted to the beginning of the entry block. So we
// should only emit as ArgDbgValue if the dbg.value intrinsic is found in
@@ -5449,9 +5597,6 @@ bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
}
}
- MachineFunction &MF = DAG.getMachineFunction();
- const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
-
bool IsIndirect = false;
Optional<MachineOperand> Op;
// Some arguments' frame index is recorded during argument lowering.
@@ -5518,13 +5663,12 @@ bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
if (!FragmentExpr) {
SDDbgValue *SDV = DAG.getConstantDbgValue(
Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder);
- DAG.AddDbgValue(SDV, nullptr, false);
+ DAG.AddDbgValue(SDV, false);
continue;
}
- assert(!IsDbgDeclare && "DbgDeclare operand is not in memory?");
- FuncInfo.ArgDbgValues.push_back(
- BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsDbgDeclare,
- RegAndSize.first, Variable, *FragmentExpr));
+ MachineInstr *NewMI =
+ MakeVRegDbgValue(RegAndSize.first, *FragmentExpr, IsDbgDeclare);
+ FuncInfo.ArgDbgValues.push_back(NewMI);
}
};
@@ -5555,11 +5699,15 @@ bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
assert(Variable->isValidLocationForIntrinsic(DL) &&
"Expected inlined-at fields to agree");
- IsIndirect = (Op->isReg()) ? IsIndirect : true;
- FuncInfo.ArgDbgValues.push_back(
- BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
- *Op, Variable, Expr));
+ MachineInstr *NewMI = nullptr;
+ if (Op->isReg())
+ NewMI = MakeVRegDbgValue(Op->getReg(), Expr, IsIndirect);
+ else
+ NewMI = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), true, *Op,
+ Variable, Expr);
+
+ FuncInfo.ArgDbgValues.push_back(NewMI);
return true;
}
@@ -5616,7 +5764,7 @@ void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I,
SDValue Callee = DAG.getExternalSymbol(
FunctionName,
DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
- LowerCallTo(I, Callee, I.isTailCall());
+ LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
}
/// Given a @llvm.call.preallocated.setup, return the corresponding
@@ -5718,10 +5866,12 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
// FIXME: Support passing different dest/src alignments to the memcpy DAG
// node.
SDValue Root = isVol ? getRoot() : getMemoryRoot();
+ AAMDNodes AAInfo;
+ I.getAAMetadata(AAInfo);
SDValue MC = DAG.getMemcpy(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
/* AlwaysInline */ false, isTC,
MachinePointerInfo(I.getArgOperand(0)),
- MachinePointerInfo(I.getArgOperand(1)));
+ MachinePointerInfo(I.getArgOperand(1)), AAInfo);
updateDAGForMaybeTailCall(MC);
return;
}
@@ -5739,10 +5889,12 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
// FIXME: Support passing different dest/src alignments to the memcpy DAG
// node.
+ AAMDNodes AAInfo;
+ I.getAAMetadata(AAInfo);
SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Alignment, isVol,
/* AlwaysInline */ true, isTC,
MachinePointerInfo(I.getArgOperand(0)),
- MachinePointerInfo(I.getArgOperand(1)));
+ MachinePointerInfo(I.getArgOperand(1)), AAInfo);
updateDAGForMaybeTailCall(MC);
return;
}
@@ -5756,8 +5908,10 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
bool isVol = MSI.isVolatile();
bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
SDValue Root = isVol ? getRoot() : getMemoryRoot();
+ AAMDNodes AAInfo;
+ I.getAAMetadata(AAInfo);
SDValue MS = DAG.getMemset(Root, sdl, Op1, Op2, Op3, Alignment, isVol, isTC,
- MachinePointerInfo(I.getArgOperand(0)));
+ MachinePointerInfo(I.getArgOperand(0)), AAInfo);
updateDAGForMaybeTailCall(MS);
return;
}
@@ -5775,9 +5929,11 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
// FIXME: Support passing different dest/src alignments to the memmove DAG
// node.
SDValue Root = isVol ? getRoot() : getMemoryRoot();
+ AAMDNodes AAInfo;
+ I.getAAMetadata(AAInfo);
SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
isTC, MachinePointerInfo(I.getArgOperand(0)),
- MachinePointerInfo(I.getArgOperand(1)));
+ MachinePointerInfo(I.getArgOperand(1)), AAInfo);
updateDAGForMaybeTailCall(MM);
return;
}
@@ -5859,7 +6015,10 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
}
case Intrinsic::dbg_addr:
case Intrinsic::dbg_declare: {
+ // Assume dbg.addr and dbg.declare can not currently use DIArgList, i.e.
+ // they are non-variadic.
const auto &DI = cast<DbgVariableIntrinsic>(I);
+ assert(!DI.hasArgList() && "Only dbg.value should currently use DIArgList");
DILocalVariable *Variable = DI.getVariable();
DIExpression *Expression = DI.getExpression();
dropDanglingDebugInfo(Variable, Expression);
@@ -5867,7 +6026,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
LLVM_DEBUG(dbgs() << "SelectionDAG visiting debug intrinsic: " << DI
<< "\n");
// Check if address has undef value.
- const Value *Address = DI.getVariableLocation();
+ const Value *Address = DI.getVariableLocationOp(0);
if (!Address || isa<UndefValue>(Address) ||
(Address->use_empty() && !isa<Argument>(Address))) {
LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI
@@ -5898,8 +6057,9 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
if (FI != std::numeric_limits<int>::max()) {
if (Intrinsic == Intrinsic::dbg_addr) {
SDDbgValue *SDV = DAG.getFrameIndexDbgValue(
- Variable, Expression, FI, /*IsIndirect*/ true, dl, SDNodeOrder);
- DAG.AddDbgValue(SDV, getRoot().getNode(), isParameter);
+ Variable, Expression, FI, getRoot().getNode(), /*IsIndirect*/ true,
+ dl, SDNodeOrder);
+ DAG.AddDbgValue(SDV, isParameter);
} else {
LLVM_DEBUG(dbgs() << "Skipping " << DI
<< " (variable info stashed in MF side table)\n");
@@ -5931,7 +6091,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
true, dl, SDNodeOrder);
}
- DAG.AddDbgValue(SDV, N.getNode(), isParameter);
+ DAG.AddDbgValue(SDV, isParameter);
} else {
// If Address is an argument then try to emit its dbg value using
// virtual register info from the FuncInfo.ValueMap.
@@ -5960,20 +6120,17 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
DILocalVariable *Variable = DI.getVariable();
DIExpression *Expression = DI.getExpression();
dropDanglingDebugInfo(Variable, Expression);
- const Value *V = DI.getValue();
- if (!V)
+ SmallVector<Value *, 4> Values(DI.getValues());
+ if (Values.empty())
return;
- if (handleDebugValue(V, Variable, Expression, dl, DI.getDebugLoc(),
- SDNodeOrder))
+ if (std::count(Values.begin(), Values.end(), nullptr))
return;
- // TODO: Dangling debug info will eventually either be resolved or produce
- // an Undef DBG_VALUE. However in the resolution case, a gap may appear
- // between the original dbg.value location and its resolved DBG_VALUE, which
- // we should ideally fill with an extra Undef DBG_VALUE.
-
- DanglingDebugInfoMap[V].emplace_back(&DI, dl, SDNodeOrder);
+ bool IsVariadic = DI.hasArgList();
+ if (!handleDebugValue(Values, Variable, Expression, dl, DI.getDebugLoc(),
+ SDNodeOrder, IsVariadic))
+ addDanglingDebugInfo(&DI, dl, SDNodeOrder);
return;
}
@@ -6165,6 +6322,12 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
getValue(I.getArgOperand(0)),
getValue(I.getArgOperand(1)), Flags));
return;
+ case Intrinsic::arithmetic_fence: {
+ setValue(&I, DAG.getNode(ISD::ARITH_FENCE, sdl,
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0)), Flags));
+ return;
+ }
case Intrinsic::fma:
setValue(&I, DAG.getNode(
ISD::FMA, sdl, getValue(I.getArgOperand(0)).getValueType(),
@@ -6215,19 +6378,25 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
getValue(I.getArgOperand(0)))));
return;
case Intrinsic::fptosi_sat: {
- EVT Type = TLI.getValueType(DAG.getDataLayout(), I.getType());
- SDValue SatW = DAG.getConstant(Type.getScalarSizeInBits(), sdl, MVT::i32);
- setValue(&I, DAG.getNode(ISD::FP_TO_SINT_SAT, sdl, Type,
- getValue(I.getArgOperand(0)), SatW));
+ EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
+ setValue(&I, DAG.getNode(ISD::FP_TO_SINT_SAT, sdl, VT,
+ getValue(I.getArgOperand(0)),
+ DAG.getValueType(VT.getScalarType())));
return;
}
case Intrinsic::fptoui_sat: {
- EVT Type = TLI.getValueType(DAG.getDataLayout(), I.getType());
- SDValue SatW = DAG.getConstant(Type.getScalarSizeInBits(), sdl, MVT::i32);
- setValue(&I, DAG.getNode(ISD::FP_TO_UINT_SAT, sdl, Type,
- getValue(I.getArgOperand(0)), SatW));
+ EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
+ setValue(&I, DAG.getNode(ISD::FP_TO_UINT_SAT, sdl, VT,
+ getValue(I.getArgOperand(0)),
+ DAG.getValueType(VT.getScalarType())));
return;
}
+ case Intrinsic::set_rounding:
+ Res = DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other,
+ {getRoot(), getValue(I.getArgOperand(0))});
+ setValue(&I, Res);
+ DAG.setRoot(Res.getValue(0));
+ return;
case Intrinsic::pcmarker: {
SDValue Tmp = getValue(I.getArgOperand(0));
DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
@@ -6642,9 +6811,8 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
SmallVector<const Value *, 4> Allocas;
getUnderlyingObjects(ObjectPtr, Allocas);
- for (SmallVectorImpl<const Value*>::iterator Object = Allocas.begin(),
- E = Allocas.end(); Object != E; ++Object) {
- const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
+ for (const Value *Alloca : Allocas) {
+ const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Alloca);
// Could not find an Alloca.
if (!LifetimeObject)
@@ -6688,6 +6856,10 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
lowerCallToExternalSymbol(I, FunctionName);
return;
case Intrinsic::donothing:
+ case Intrinsic::seh_try_begin:
+ case Intrinsic::seh_scope_begin:
+ case Intrinsic::seh_try_end:
+ case Intrinsic::seh_scope_end:
// ignore
return;
case Intrinsic::experimental_stackmap:
@@ -6849,7 +7021,9 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
case Intrinsic::experimental_deoptimize:
LowerDeoptimizeCall(&I);
return;
-
+ case Intrinsic::experimental_stepvector:
+ visitStepVector(I);
+ return;
case Intrinsic::vector_reduce_fadd:
case Intrinsic::vector_reduce_fmul:
case Intrinsic::vector_reduce_add:
@@ -6986,6 +7160,14 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
SDValue Vec = getValue(I.getOperand(0));
SDValue SubVec = getValue(I.getOperand(1));
SDValue Index = getValue(I.getOperand(2));
+
+ // The intrinsic's index type is i64, but the SDNode requires an index type
+ // suitable for the target. Convert the index as required.
+ MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
+ if (Index.getValueType() != VectorIdxTy)
+ Index = DAG.getVectorIdxConstant(
+ cast<ConstantSDNode>(Index)->getZExtValue(), DL);
+
EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ResultVT, Vec, SubVec,
Index));
@@ -6998,9 +7180,22 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
SDValue Index = getValue(I.getOperand(1));
EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
+ // The intrinsic's index type is i64, but the SDNode requires an index type
+ // suitable for the target. Convert the index as required.
+ MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
+ if (Index.getValueType() != VectorIdxTy)
+ Index = DAG.getVectorIdxConstant(
+ cast<ConstantSDNode>(Index)->getZExtValue(), DL);
+
setValue(&I, DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResultVT, Vec, Index));
return;
}
+ case Intrinsic::experimental_vector_reverse:
+ visitVectorReverse(I);
+ return;
+ case Intrinsic::experimental_vector_splice:
+ visitVectorSplice(I);
+ return;
}
}
@@ -7104,7 +7299,10 @@ void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
case ISD::STRICT_FSETCC:
case ISD::STRICT_FSETCCS: {
auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
- Opers.push_back(DAG.getCondCode(getFCmpCondCode(FPCmp->getPredicate())));
+ ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate());
+ if (TM.Options.NoNaNsFPMath)
+ Condition = getFCmpCodeWithoutNaN(Condition);
+ Opers.push_back(DAG.getCondCode(Condition));
break;
}
}
@@ -7134,6 +7332,7 @@ static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) {
void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
const VPIntrinsic &VPIntrin) {
+ SDLoc DL = getCurSDLoc();
unsigned Opcode = getISDForVPIntrinsic(VPIntrin);
SmallVector<EVT, 4> ValueVTs;
@@ -7141,46 +7340,92 @@ void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
ComputeValueVTs(TLI, DAG.getDataLayout(), VPIntrin.getType(), ValueVTs);
SDVTList VTs = DAG.getVTList(ValueVTs);
+ auto EVLParamPos =
+ VPIntrinsic::getVectorLengthParamPos(VPIntrin.getIntrinsicID());
+
+ MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
+ assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
+ "Unexpected target EVL type");
+
// Request operands.
SmallVector<SDValue, 7> OpValues;
- for (int i = 0; i < (int)VPIntrin.getNumArgOperands(); ++i)
- OpValues.push_back(getValue(VPIntrin.getArgOperand(i)));
+ for (unsigned I = 0; I < VPIntrin.getNumArgOperands(); ++I) {
+ auto Op = getValue(VPIntrin.getArgOperand(I));
+ if (I == EVLParamPos)
+ Op = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, Op);
+ OpValues.push_back(Op);
+ }
- SDLoc DL = getCurSDLoc();
SDValue Result = DAG.getNode(Opcode, DL, VTs, OpValues);
setValue(&VPIntrin, Result);
}
-std::pair<SDValue, SDValue>
-SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
- const BasicBlock *EHPadBB) {
+SDValue SelectionDAGBuilder::lowerStartEH(SDValue Chain,
+ const BasicBlock *EHPadBB,
+ MCSymbol *&BeginLabel) {
MachineFunction &MF = DAG.getMachineFunction();
MachineModuleInfo &MMI = MF.getMMI();
- MCSymbol *BeginLabel = nullptr;
- if (EHPadBB) {
- // Insert a label before the invoke call to mark the try range. This can be
- // used to detect deletion of the invoke via the MachineModuleInfo.
- BeginLabel = MMI.getContext().createTempSymbol();
+ // Insert a label before the invoke call to mark the try range. This can be
+ // used to detect deletion of the invoke via the MachineModuleInfo.
+ BeginLabel = MMI.getContext().createTempSymbol();
- // For SjLj, keep track of which landing pads go with which invokes
- // so as to maintain the ordering of pads in the LSDA.
- unsigned CallSiteIndex = MMI.getCurrentCallSite();
- if (CallSiteIndex) {
- MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
- LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
+ // For SjLj, keep track of which landing pads go with which invokes
+ // so as to maintain the ordering of pads in the LSDA.
+ unsigned CallSiteIndex = MMI.getCurrentCallSite();
+ if (CallSiteIndex) {
+ MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
+ LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
- // Now that the call site is handled, stop tracking it.
- MMI.setCurrentCallSite(0);
- }
+ // Now that the call site is handled, stop tracking it.
+ MMI.setCurrentCallSite(0);
+ }
+
+ return DAG.getEHLabel(getCurSDLoc(), Chain, BeginLabel);
+}
+
+SDValue SelectionDAGBuilder::lowerEndEH(SDValue Chain, const InvokeInst *II,
+ const BasicBlock *EHPadBB,
+ MCSymbol *BeginLabel) {
+ assert(BeginLabel && "BeginLabel should've been set");
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineModuleInfo &MMI = MF.getMMI();
+
+ // Insert a label at the end of the invoke call to mark the try range. This
+ // can be used to detect deletion of the invoke via the MachineModuleInfo.
+ MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
+ Chain = DAG.getEHLabel(getCurSDLoc(), Chain, EndLabel);
+
+ // Inform MachineModuleInfo of range.
+ auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
+ // There is a platform (e.g. wasm) that uses funclet style IR but does not
+ // actually use outlined funclets and their LSDA info style.
+ if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
+ assert(II && "II should've been set");
+ WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
+ EHInfo->addIPToStateRange(II, BeginLabel, EndLabel);
+ } else if (!isScopedEHPersonality(Pers)) {
+ assert(EHPadBB);
+ MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
+ }
+
+ return Chain;
+}
+
+std::pair<SDValue, SDValue>
+SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
+ const BasicBlock *EHPadBB) {
+ MCSymbol *BeginLabel = nullptr;
+
+ if (EHPadBB) {
// Both PendingLoads and PendingExports must be flushed here;
// this call might not return.
(void)getRoot();
- DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel));
-
+ DAG.setRoot(lowerStartEH(getControlRoot(), EHPadBB, BeginLabel));
CLI.setChain(getRoot());
}
+
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
@@ -7202,22 +7447,8 @@ SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
}
if (EHPadBB) {
- // Insert a label at the end of the invoke call to mark the try range. This
- // can be used to detect deletion of the invoke via the MachineModuleInfo.
- MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
- DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel));
-
- // Inform MachineModuleInfo of range.
- auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
- // There is a platform (e.g. wasm) that uses funclet style IR but does not
- // actually use outlined funclets and their LSDA info style.
- if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
- assert(CLI.CB);
- WinEHFuncInfo *EHInfo = DAG.getMachineFunction().getWinEHFuncInfo();
- EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CB), BeginLabel, EndLabel);
- } else if (!isScopedEHPersonality(Pers)) {
- MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
- }
+ DAG.setRoot(lowerEndEH(getRoot(), cast_or_null<InvokeInst>(CLI.CB), EHPadBB,
+ BeginLabel));
}
return Result;
@@ -7225,6 +7456,7 @@ SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
bool isTailCall,
+ bool isMustTailCall,
const BasicBlock *EHPadBB) {
auto &DL = DAG.getDataLayout();
FunctionType *FTy = CB.getFunctionType();
@@ -7241,7 +7473,7 @@ void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
// attribute.
auto *Caller = CB.getParent()->getParent();
if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() ==
- "true")
+ "true" && !isMustTailCall)
isTailCall = false;
// We can't tail call inside a function with a swifterror argument. Lowering
@@ -7528,10 +7760,12 @@ bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
// because the return pointer needs to be adjusted by the size of
// the copied memory.
SDValue Root = isVol ? getRoot() : getMemoryRoot();
+ AAMDNodes AAInfo;
+ I.getAAMetadata(AAInfo);
SDValue MC = DAG.getMemcpy(Root, sdl, Dst, Src, Size, Alignment, isVol, false,
/*isTailCall=*/false,
MachinePointerInfo(I.getArgOperand(0)),
- MachinePointerInfo(I.getArgOperand(1)));
+ MachinePointerInfo(I.getArgOperand(1)), AAInfo);
assert(MC.getNode() != nullptr &&
"** memcpy should not be lowered as TailCall in mempcpy context **");
DAG.setRoot(MC);
@@ -7851,7 +8085,8 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
// CFGuardTarget bundles are lowered in LowerCallTo.
assert(!I.hasOperandBundlesOtherThan(
{LLVMContext::OB_deopt, LLVMContext::OB_funclet,
- LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated}) &&
+ LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated,
+ LLVMContext::OB_clang_arc_attachedcall}) &&
"Cannot lower calls with arbitrary operand bundles!");
SDValue Callee = getValue(I.getCalledOperand());
@@ -7862,7 +8097,7 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
// Check if we can potentially perform a tail call. More detailed checking
// is be done within LowerCallTo, after more information about the call is
// known.
- LowerCallTo(I, Callee, I.isTailCall());
+ LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
}
namespace {
@@ -8055,7 +8290,7 @@ static void GetRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
// remember that AX is actually i16 to get the right extension.
const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
- if (OpInfo.ConstraintVT != MVT::Other) {
+ if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
// If this is an FP operand in an integer register (or visa versa), or more
// generally if the operand value disagrees with the register class we plan
// to stick it in, fix the operand type.
@@ -8102,7 +8337,7 @@ static void GetRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
// Initialize NumRegs.
unsigned NumRegs = 1;
if (OpInfo.ConstraintVT != MVT::Other)
- NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
+ NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT, RegVT);
// If this is a constraint for a specific physical register, like {r17},
// assign it now.
@@ -8186,7 +8421,8 @@ public:
} // end anonymous namespace
/// visitInlineAsm - Handle a call to an InlineAsm object.
-void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call) {
+void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
+ const BasicBlock *EHPadBB) {
const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
/// ConstraintOperands - Information about all of the constraints.
@@ -8274,19 +8510,28 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call) {
ExtraInfo.update(T);
}
-
// We won't need to flush pending loads if this asm doesn't touch
// memory and is nonvolatile.
SDValue Flag, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot();
+ bool EmitEHLabels = isa<InvokeInst>(Call) && IA->canThrow();
+ if (EmitEHLabels) {
+ assert(EHPadBB && "InvokeInst must have an EHPadBB");
+ }
bool IsCallBr = isa<CallBrInst>(Call);
- if (IsCallBr) {
- // If this is a callbr we need to flush pending exports since inlineasm_br
- // is a terminator. We need to do this before nodes are glued to
- // the inlineasm_br node.
+
+ if (IsCallBr || EmitEHLabels) {
+ // If this is a callbr or invoke we need to flush pending exports since
+ // inlineasm_br and invoke are terminators.
+ // We need to do this before nodes are glued to the inlineasm_br node.
Chain = getControlRoot();
}
+ MCSymbol *BeginLabel = nullptr;
+ if (EmitEHLabels) {
+ Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
+ }
+
// Second pass over the constraints: compute which constraint option to use.
for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
// If this is an output operand with a matching input operand, look up the
@@ -8425,21 +8670,18 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call) {
return;
}
- MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
SmallVector<unsigned, 4> Regs;
-
- if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT)) {
- unsigned NumRegs = InlineAsm::getNumOperandRegisters(OpFlag);
- MachineRegisterInfo &RegInfo =
- DAG.getMachineFunction().getRegInfo();
- for (unsigned i = 0; i != NumRegs; ++i)
- Regs.push_back(RegInfo.createVirtualRegister(RC));
- } else {
- emitInlineAsmError(Call,
- "inline asm error: This value type register "
- "class is not natively supported!");
- return;
- }
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
+ RegisterSDNode *R = dyn_cast<RegisterSDNode>(AsmNodeOperands[CurOp+1]);
+ Register TiedReg = R->getReg();
+ MVT RegVT = R->getSimpleValueType(0);
+ const TargetRegisterClass *RC = TiedReg.isVirtual() ?
+ MRI.getRegClass(TiedReg) : TRI.getMinimalPhysRegClass(TiedReg);
+ unsigned NumRegs = InlineAsm::getNumOperandRegisters(OpFlag);
+ for (unsigned i = 0; i != NumRegs; ++i)
+ Regs.push_back(MRI.createVirtualRegister(RC));
RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
@@ -8677,8 +8919,13 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call) {
if (!OutChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
+ if (EmitEHLabels) {
+ Chain = lowerEndEH(Chain, cast<InvokeInst>(&Call), EHPadBB, BeginLabel);
+ }
+
// Only Update Root if inline assembly has a memory effect.
- if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr)
+ if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr ||
+ EmitEHLabels)
DAG.setRoot(Chain);
}
@@ -9217,6 +9464,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
Entry.IsByRef = false;
Entry.IsReturned = false;
Entry.IsSwiftSelf = false;
+ Entry.IsSwiftAsync = false;
Entry.IsSwiftError = false;
Entry.IsCFGuardTarget = false;
Entry.Alignment = Alignment;
@@ -9229,7 +9477,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
CLI.IsTailCall = false;
} else {
bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
- CLI.RetTy, CLI.CallConv, CLI.IsVarArg);
+ CLI.RetTy, CLI.CallConv, CLI.IsVarArg, DL);
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
ISD::ArgFlagsTy Flags;
if (NeedsRegBlock) {
@@ -9287,9 +9535,9 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
// FIXME: Split arguments if CLI.IsPostTypeLegalization
Type *FinalType = Args[i].Ty;
if (Args[i].IsByVal)
- FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
+ FinalType = Args[i].IndirectType;
bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
- FinalType, CLI.CallConv, CLI.IsVarArg);
+ FinalType, CLI.CallConv, CLI.IsVarArg, DL);
for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
++Value) {
EVT VT = ValueVTs[Value];
@@ -9302,6 +9550,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
// for a type depending on the context. Give the target a chance to
// specify the alignment it wants.
const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL));
+ Flags.setOrigAlign(OriginalAlignment);
if (Args[i].Ty->isPointerTy()) {
Flags.setPointer();
@@ -9329,6 +9578,8 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
Flags.setSRet();
if (Args[i].IsSwiftSelf)
Flags.setSwiftSelf();
+ if (Args[i].IsSwiftAsync)
+ Flags.setSwiftAsync();
if (Args[i].IsSwiftError)
Flags.setSwiftError();
if (Args[i].IsCFGuardTarget)
@@ -9355,27 +9606,26 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
// in the various CC lowering callbacks.
Flags.setByVal();
}
+ Align MemAlign;
if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
- PointerType *Ty = cast<PointerType>(Args[i].Ty);
- Type *ElementTy = Ty->getElementType();
-
- unsigned FrameSize = DL.getTypeAllocSize(
- Args[i].ByValType ? Args[i].ByValType : ElementTy);
+ unsigned FrameSize = DL.getTypeAllocSize(Args[i].IndirectType);
Flags.setByValSize(FrameSize);
// info is not there but there are cases it cannot get right.
- Align FrameAlign;
if (auto MA = Args[i].Alignment)
- FrameAlign = *MA;
+ MemAlign = *MA;
else
- FrameAlign = Align(getByValTypeAlignment(ElementTy, DL));
- Flags.setByValAlign(FrameAlign);
+ MemAlign = Align(getByValTypeAlignment(Args[i].IndirectType, DL));
+ } else if (auto MA = Args[i].Alignment) {
+ MemAlign = *MA;
+ } else {
+ MemAlign = OriginalAlignment;
}
+ Flags.setMemAlign(MemAlign);
if (Args[i].IsNest)
Flags.setNest();
if (NeedsRegBlock)
Flags.setInConsecutiveRegs();
- Flags.setOrigAlign(OriginalAlignment);
MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
CLI.CallConv, VT);
@@ -9660,8 +9910,9 @@ findArgumentCopyElisionCandidates(const DataLayout &DL,
// We will look through cast uses, so ignore them completely.
if (I.isCast())
continue;
- // Ignore debug info intrinsics, they don't escape or store to allocas.
- if (isa<DbgInfoIntrinsic>(I))
+ // Ignore debug info and pseudo op intrinsics, they don't escape or store
+ // to allocas.
+ if (I.isDebugOrPseudoInst())
continue;
// This is an unknown instruction. Assume it escapes or writes to all
// static alloca operands.
@@ -9688,13 +9939,17 @@ findArgumentCopyElisionCandidates(const DataLayout &DL,
continue;
// Check if the stored value is an argument, and that this store fully
- // initializes the alloca. Don't elide copies from the same argument twice.
+ // initializes the alloca.
+ // If the argument type has padding bits we can't directly forward a pointer
+ // as the upper bits may contain garbage.
+ // Don't elide copies from the same argument twice.
const Value *Val = SI->getValueOperand()->stripPointerCasts();
const auto *Arg = dyn_cast<Argument>(Val);
if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
Arg->getType()->isEmptyTy() ||
DL.getTypeStoreSize(Arg->getType()) !=
DL.getTypeAllocSize(AI->getAllocatedType()) ||
+ !DL.typeSizeEqualsStoreSize(Arg->getType()) ||
ArgCopyElisionCandidates.count(Arg)) {
*Info = StaticAllocaInfo::Clobbered;
continue;
@@ -9829,18 +10084,13 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
if (Arg.hasAttribute(Attribute::ByVal))
FinalType = Arg.getParamByValType();
bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
- FinalType, F.getCallingConv(), F.isVarArg());
+ FinalType, F.getCallingConv(), F.isVarArg(), DL);
for (unsigned Value = 0, NumValues = ValueVTs.size();
Value != NumValues; ++Value) {
EVT VT = ValueVTs[Value];
Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
ISD::ArgFlagsTy Flags;
- // Certain targets (such as MIPS), may have a different ABI alignment
- // for a type depending on the context. Give the target a chance to
- // specify the alignment it wants.
- const Align OriginalAlignment(
- TLI->getABIAlignmentForCallingConv(ArgTy, DL));
if (Arg.getType()->isPointerTy()) {
Flags.setPointer();
@@ -9868,6 +10118,8 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
Flags.setSRet();
if (Arg.hasAttribute(Attribute::SwiftSelf))
Flags.setSwiftSelf();
+ if (Arg.hasAttribute(Attribute::SwiftAsync))
+ Flags.setSwiftAsync();
if (Arg.hasAttribute(Attribute::SwiftError))
Flags.setSwiftError();
if (Arg.hasAttribute(Attribute::ByVal))
@@ -9893,6 +10145,14 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
Flags.setByVal();
}
+ // Certain targets (such as MIPS), may have a different ABI alignment
+ // for a type depending on the context. Give the target a chance to
+ // specify the alignment it wants.
+ const Align OriginalAlignment(
+ TLI->getABIAlignmentForCallingConv(ArgTy, DL));
+ Flags.setOrigAlign(OriginalAlignment);
+
+ Align MemAlign;
Type *ArgMemTy = nullptr;
if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() ||
Flags.isByRef()) {
@@ -9904,24 +10164,27 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
// For in-memory arguments, size and alignment should be passed from FE.
// BE will guess if this info is not there but there are cases it cannot
// get right.
- MaybeAlign MemAlign = Arg.getParamAlign();
- if (!MemAlign)
+ if (auto ParamAlign = Arg.getParamStackAlign())
+ MemAlign = *ParamAlign;
+ else if ((ParamAlign = Arg.getParamAlign()))
+ MemAlign = *ParamAlign;
+ else
MemAlign = Align(TLI->getByValTypeAlignment(ArgMemTy, DL));
-
- if (Flags.isByRef()) {
+ if (Flags.isByRef())
Flags.setByRefSize(MemSize);
- Flags.setByRefAlign(*MemAlign);
- } else {
+ else
Flags.setByValSize(MemSize);
- Flags.setByValAlign(*MemAlign);
- }
+ } else if (auto ParamAlign = Arg.getParamStackAlign()) {
+ MemAlign = *ParamAlign;
+ } else {
+ MemAlign = OriginalAlignment;
}
+ Flags.setMemAlign(MemAlign);
if (Arg.hasAttribute(Attribute::Nest))
Flags.setNest();
if (NeedsRegBlock)
Flags.setInConsecutiveRegs();
- Flags.setOrigAlign(OriginalAlignment);
if (ArgCopyElisionCandidates.count(&Arg))
Flags.setCopyElisionCandidate();
if (Arg.hasAttribute(Attribute::Returned))
@@ -10807,6 +11070,36 @@ void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
}
}
+void SelectionDAGBuilder::visitStepVector(const CallInst &I) {
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ auto DL = getCurSDLoc();
+ EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
+ setValue(&I, DAG.getStepVector(DL, ResultVT));
+}
+
+void SelectionDAGBuilder::visitVectorReverse(const CallInst &I) {
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
+
+ SDLoc DL = getCurSDLoc();
+ SDValue V = getValue(I.getOperand(0));
+ assert(VT == V.getValueType() && "Malformed vector.reverse!");
+
+ if (VT.isScalableVector()) {
+ setValue(&I, DAG.getNode(ISD::VECTOR_REVERSE, DL, VT, V));
+ return;
+ }
+
+ // Use VECTOR_SHUFFLE for the fixed-length vector
+ // to maintain existing behavior.
+ SmallVector<int, 8> Mask;
+ unsigned NumElts = VT.getVectorMinNumElements();
+ for (unsigned i = 0; i != NumElts; ++i)
+ Mask.push_back(NumElts - 1 - i);
+
+ setValue(&I, DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), Mask));
+}
+
void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
@@ -10824,3 +11117,37 @@ void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) {
setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
DAG.getVTList(ValueVTs), Values));
}
+
+void SelectionDAGBuilder::visitVectorSplice(const CallInst &I) {
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
+
+ SDLoc DL = getCurSDLoc();
+ SDValue V1 = getValue(I.getOperand(0));
+ SDValue V2 = getValue(I.getOperand(1));
+ int64_t Imm = cast<ConstantInt>(I.getOperand(2))->getSExtValue();
+
+ // VECTOR_SHUFFLE doesn't support a scalable mask so use a dedicated node.
+ if (VT.isScalableVector()) {
+ MVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
+ setValue(&I, DAG.getNode(ISD::VECTOR_SPLICE, DL, VT, V1, V2,
+ DAG.getConstant(Imm, DL, IdxVT)));
+ return;
+ }
+
+ unsigned NumElts = VT.getVectorNumElements();
+
+ if ((-Imm > NumElts) || (Imm >= NumElts)) {
+ // Result is undefined if immediate is out-of-bounds.
+ setValue(&I, DAG.getUNDEF(VT));
+ return;
+ }
+
+ uint64_t Idx = (NumElts + Imm) % NumElts;
+
+ // Use VECTOR_SHUFFLE to maintain original behaviour for fixed-length vectors.
+ SmallVector<int, 8> Mask;
+ for (unsigned i = 0; i < NumElts; ++i)
+ Mask.push_back(Idx + i);
+ setValue(&I, DAG.getVectorShuffle(VT, DL, V1, V2, Mask));
+}