aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen/GlobalISel/IRTranslator.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2019-10-23 17:51:42 +0000
committerDimitry Andric <dim@FreeBSD.org>2019-10-23 17:51:42 +0000
commit1d5ae1026e831016fc29fd927877c86af904481f (patch)
tree2cdfd12620fcfa5d9e4a0389f85368e8e36f63f9 /lib/CodeGen/GlobalISel/IRTranslator.cpp
parente6d1592492a3a379186bfb02bd0f4eda0669c0d5 (diff)
downloadsrc-1d5ae1026e831016fc29fd927877c86af904481f.tar.gz
src-1d5ae1026e831016fc29fd927877c86af904481f.zip
Vendor import of stripped llvm trunk r375505, the last commit before thevendor/llvm/llvm-trunk-r375505vendor/llvm
upstream Subversion repository was made read-only, and the LLVM project migrated to GitHub: https://llvm.org/svn/llvm-project/llvm/trunk@375505
Notes
Notes: svn path=/vendor/llvm/dist/; revision=353940 svn path=/vendor/llvm/llvm-r375505/; revision=353941; tag=vendor/llvm/llvm-trunk-r375505
Diffstat (limited to 'lib/CodeGen/GlobalISel/IRTranslator.cpp')
-rw-r--r--lib/CodeGen/GlobalISel/IRTranslator.cpp392
1 files changed, 200 insertions, 192 deletions
diff --git a/lib/CodeGen/GlobalISel/IRTranslator.cpp b/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 6e99bdbd8264..45cef4aca888 100644
--- a/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -32,6 +32,7 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/StackProtector.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
@@ -334,7 +335,7 @@ bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
bool IRTranslator::translateCompare(const User &U,
MachineIRBuilder &MIRBuilder) {
- const CmpInst *CI = dyn_cast<CmpInst>(&U);
+ auto *CI = dyn_cast<CmpInst>(&U);
Register Op0 = getOrCreateVReg(*U.getOperand(0));
Register Op1 = getOrCreateVReg(*U.getOperand(1));
Register Res = getOrCreateVReg(U);
@@ -345,11 +346,12 @@ bool IRTranslator::translateCompare(const User &U,
MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
else if (Pred == CmpInst::FCMP_FALSE)
MIRBuilder.buildCopy(
- Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
+ Res, getOrCreateVReg(*Constant::getNullValue(U.getType())));
else if (Pred == CmpInst::FCMP_TRUE)
MIRBuilder.buildCopy(
- Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
+ Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType())));
else {
+ assert(CI && "Instruction should be CmpInst");
MIRBuilder.buildInstr(TargetOpcode::G_FCMP, {Res}, {Pred, Op0, Op1},
MachineInstr::copyFlagsFromInstruction(*CI));
}
@@ -588,8 +590,8 @@ void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
Cond = MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
} else {
- assert(CB.PredInfo.Pred == CmpInst::ICMP_ULE &&
- "Can only handle ULE ranges");
+ assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE &&
+ "Can only handle SLE ranges");
const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
@@ -598,7 +600,7 @@ void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
Cond =
- MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, CmpOpReg, CondRHS).getReg(0);
+ MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
} else {
const LLT &CmpTy = MRI->getType(CmpOpReg);
auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
@@ -728,7 +730,7 @@ bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
MHS = nullptr;
} else {
// Check I->Low <= Cond <= I->High.
- Pred = CmpInst::ICMP_ULE;
+ Pred = CmpInst::ICMP_SLE;
LHS = I->Low;
MHS = Cond;
RHS = I->High;
@@ -879,7 +881,8 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
return true;
}
-
+ const MDNode *Ranges =
+ Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
for (unsigned i = 0; i < Regs.size(); ++i) {
Register Addr;
MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
@@ -888,7 +891,7 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
unsigned BaseAlign = getMemOpAlignment(LI);
auto MMO = MF->getMachineMemOperand(
Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
- MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
+ MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), Ranges,
LI.getSyncScopeID(), LI.getOrdering());
MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
}
@@ -1075,36 +1078,29 @@ bool IRTranslator::translateGetElementPtr(const User &U,
}
if (Offset != 0) {
- Register NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
- MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetMIB.getReg(0));
-
- BaseReg = NewBaseReg;
+ BaseReg =
+ MIRBuilder.buildGEP(PtrTy, BaseReg, OffsetMIB.getReg(0)).getReg(0);
Offset = 0;
}
Register IdxReg = getOrCreateVReg(*Idx);
- if (MRI->getType(IdxReg) != OffsetTy) {
- Register NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
- MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
- IdxReg = NewIdxReg;
- }
+ if (MRI->getType(IdxReg) != OffsetTy)
+ IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
// N = N + Idx * ElementSize;
// Avoid doing it for ElementSize of 1.
Register GepOffsetReg;
if (ElementSize != 1) {
- GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
auto ElementSizeMIB = MIRBuilder.buildConstant(
getLLTForType(*OffsetIRTy, *DL), ElementSize);
- MIRBuilder.buildMul(GepOffsetReg, ElementSizeMIB.getReg(0), IdxReg);
+ GepOffsetReg =
+ MIRBuilder.buildMul(OffsetTy, ElementSizeMIB, IdxReg).getReg(0);
} else
GepOffsetReg = IdxReg;
- Register NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
- MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg);
- BaseReg = NewBaseReg;
+ BaseReg = MIRBuilder.buildGEP(PtrTy, BaseReg, GepOffsetReg).getReg(0);
}
}
@@ -1119,54 +1115,51 @@ bool IRTranslator::translateGetElementPtr(const User &U,
return true;
}
-bool IRTranslator::translateMemfunc(const CallInst &CI,
+bool IRTranslator::translateMemFunc(const CallInst &CI,
MachineIRBuilder &MIRBuilder,
- unsigned ID) {
+ Intrinsic::ID ID) {
// If the source is undef, then just emit a nop.
- if (isa<UndefValue>(CI.getArgOperand(1))) {
- switch (ID) {
- case Intrinsic::memmove:
- case Intrinsic::memcpy:
- case Intrinsic::memset:
- return true;
- default:
- break;
- }
- }
-
- LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
- Type *DstTy = CI.getArgOperand(0)->getType();
- if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
- SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
- return false;
+ if (isa<UndefValue>(CI.getArgOperand(1)))
+ return true;
- SmallVector<CallLowering::ArgInfo, 8> Args;
- for (int i = 0; i < 3; ++i) {
- const auto &Arg = CI.getArgOperand(i);
- Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
+ ArrayRef<Register> Res;
+ auto ICall = MIRBuilder.buildIntrinsic(ID, Res, true);
+ for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI)
+ ICall.addUse(getOrCreateVReg(**AI));
+
+ unsigned DstAlign = 0, SrcAlign = 0;
+ unsigned IsVol =
+ cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1))
+ ->getZExtValue();
+
+ if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
+ DstAlign = std::max<unsigned>(MCI->getDestAlignment(), 1);
+ SrcAlign = std::max<unsigned>(MCI->getSourceAlignment(), 1);
+ } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
+ DstAlign = std::max<unsigned>(MMI->getDestAlignment(), 1);
+ SrcAlign = std::max<unsigned>(MMI->getSourceAlignment(), 1);
+ } else {
+ auto *MSI = cast<MemSetInst>(&CI);
+ DstAlign = std::max<unsigned>(MSI->getDestAlignment(), 1);
}
- const char *Callee;
- switch (ID) {
- case Intrinsic::memmove:
- case Intrinsic::memcpy: {
- Type *SrcTy = CI.getArgOperand(1)->getType();
- if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
- return false;
- Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
- break;
- }
- case Intrinsic::memset:
- Callee = "memset";
- break;
- default:
- return false;
- }
+ // We need to propagate the tail call flag from the IR inst as an argument.
+ // Otherwise, we have to pessimize and assume later that we cannot tail call
+ // any memory intrinsics.
+ ICall.addImm(CI.isTailCall() ? 1 : 0);
- return CLI->lowerCall(MIRBuilder, CI.getCallingConv(),
- MachineOperand::CreateES(Callee),
- CallLowering::ArgInfo({0}, CI.getType()), Args);
+ // Create mem operands to store the alignment and volatile info.
+ auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
+ ICall.addMemOperand(MF->getMachineMemOperand(
+ MachinePointerInfo(CI.getArgOperand(0)),
+ MachineMemOperand::MOStore | VolFlag, 1, DstAlign));
+ if (ID != Intrinsic::memset)
+ ICall.addMemOperand(MF->getMachineMemOperand(
+ MachinePointerInfo(CI.getArgOperand(1)),
+ MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign));
+
+ return true;
}
void IRTranslator::getStackGuard(Register DstReg,
@@ -1186,7 +1179,7 @@ void IRTranslator::getStackGuard(Register DstReg,
MachineMemOperand::MODereferenceable;
MachineMemOperand *MemRef =
MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
- DL->getPointerABIAlignment(0));
+ DL->getPointerABIAlignment(0).value());
MIB.setMemRefs({MemRef});
}
@@ -1208,6 +1201,8 @@ unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
break;
case Intrinsic::bswap:
return TargetOpcode::G_BSWAP;
+ case Intrinsic::bitreverse:
+ return TargetOpcode::G_BITREVERSE;
case Intrinsic::ceil:
return TargetOpcode::G_FCEIL;
case Intrinsic::cos:
@@ -1383,16 +1378,17 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
if (!V) {
// Currently the optimizer can produce this; insert an undef to
// help debugging. Probably the optimizer should not do this.
- MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
+ MIRBuilder.buildDirectDbgValue(0, DI.getVariable(), DI.getExpression());
} else if (const auto *CI = dyn_cast<Constant>(V)) {
MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
} else {
- Register Reg = getOrCreateVReg(*V);
- // FIXME: This does not handle register-indirect values at offset 0. The
- // direct/indirect thing shouldn't really be handled by something as
- // implicit as reg+noreg vs reg+imm in the first palce, but it seems
- // pretty baked in right now.
- MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
+ for (Register Reg : getOrCreateVRegs(*V)) {
+ // FIXME: This does not handle register-indirect values at offset 0. The
+ // direct/indirect thing shouldn't really be handled by something as
+ // implicit as reg+noreg vs reg+imm in the first place, but it seems
+ // pretty baked in right now.
+ MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
+ }
}
return true;
}
@@ -1433,7 +1429,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
case Intrinsic::memcpy:
case Intrinsic::memmove:
case Intrinsic::memset:
- return translateMemfunc(CI, MIRBuilder, ID);
+ return translateMemFunc(CI, MIRBuilder, ID);
case Intrinsic::eh_typeid_for: {
GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
Register Reg = getOrCreateVReg(CI);
@@ -1441,18 +1437,12 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
MIRBuilder.buildConstant(Reg, TypeID);
return true;
}
- case Intrinsic::objectsize: {
- // If we don't know by now, we're never going to know.
- const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
+ case Intrinsic::objectsize:
+ llvm_unreachable("llvm.objectsize.* should have been lowered already");
- MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
- return true;
- }
case Intrinsic::is_constant:
- // If this wasn't constant-folded away by now, then it's not a
- // constant.
- MIRBuilder.buildConstant(getOrCreateVReg(CI), 0);
- return true;
+ llvm_unreachable("llvm.is.constant.* should have been lowered already");
+
case Intrinsic::stackguard:
getStackGuard(getOrCreateVReg(CI), MIRBuilder);
return true;
@@ -1551,6 +1541,46 @@ bool IRTranslator::translateInlineAsm(const CallInst &CI,
return true;
}
+bool IRTranslator::translateCallSite(const ImmutableCallSite &CS,
+ MachineIRBuilder &MIRBuilder) {
+ const Instruction &I = *CS.getInstruction();
+ ArrayRef<Register> Res = getOrCreateVRegs(I);
+
+ SmallVector<ArrayRef<Register>, 8> Args;
+ Register SwiftInVReg = 0;
+ Register SwiftErrorVReg = 0;
+ for (auto &Arg : CS.args()) {
+ if (CLI->supportSwiftError() && isSwiftError(Arg)) {
+ assert(SwiftInVReg == 0 && "Expected only one swift error argument");
+ LLT Ty = getLLTForType(*Arg->getType(), *DL);
+ SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
+ MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
+ &I, &MIRBuilder.getMBB(), Arg));
+ Args.emplace_back(makeArrayRef(SwiftInVReg));
+ SwiftErrorVReg =
+ SwiftError.getOrCreateVRegDefAt(&I, &MIRBuilder.getMBB(), Arg);
+ continue;
+ }
+ Args.push_back(getOrCreateVRegs(*Arg));
+ }
+
+ // We don't set HasCalls on MFI here yet because call lowering may decide to
+ // optimize into tail calls. Instead, we defer that to selection where a final
+ // scan is done to check if any instructions are calls.
+ bool Success =
+ CLI->lowerCall(MIRBuilder, CS, Res, Args, SwiftErrorVReg,
+ [&]() { return getOrCreateVReg(*CS.getCalledValue()); });
+
+ // Check if we just inserted a tail call.
+ if (Success) {
+ assert(!HasTailCall && "Can't tail call return twice from block?");
+ const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
+ HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt()));
+ }
+
+ return Success;
+}
+
bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
const CallInst &CI = cast<CallInst>(U);
auto TII = MF->getTarget().getIntrinsicInfo();
@@ -1570,34 +1600,8 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
}
- if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
- ArrayRef<Register> Res = getOrCreateVRegs(CI);
-
- SmallVector<ArrayRef<Register>, 8> Args;
- Register SwiftInVReg = 0;
- Register SwiftErrorVReg = 0;
- for (auto &Arg: CI.arg_operands()) {
- if (CLI->supportSwiftError() && isSwiftError(Arg)) {
- assert(SwiftInVReg == 0 && "Expected only one swift error argument");
- LLT Ty = getLLTForType(*Arg->getType(), *DL);
- SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
- MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
- &CI, &MIRBuilder.getMBB(), Arg));
- Args.emplace_back(makeArrayRef(SwiftInVReg));
- SwiftErrorVReg =
- SwiftError.getOrCreateVRegDefAt(&CI, &MIRBuilder.getMBB(), Arg);
- continue;
- }
- Args.push_back(getOrCreateVRegs(*Arg));
- }
-
- MF->getFrameInfo().setHasCalls(true);
- bool Success =
- CLI->lowerCall(MIRBuilder, &CI, Res, Args, SwiftErrorVReg,
- [&]() { return getOrCreateVReg(*CI.getCalledValue()); });
-
- return Success;
- }
+ if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic)
+ return translateCallSite(&CI, MIRBuilder);
assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
@@ -1615,14 +1619,29 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
if (isa<FPMathOperator>(CI))
MIB->copyIRFlags(CI);
- for (auto &Arg : CI.arg_operands()) {
+ for (auto &Arg : enumerate(CI.arg_operands())) {
// Some intrinsics take metadata parameters. Reject them.
- if (isa<MetadataAsValue>(Arg))
- return false;
- ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg);
- if (VRegs.size() > 1)
+ if (isa<MetadataAsValue>(Arg.value()))
return false;
- MIB.addUse(VRegs[0]);
+
+ // If this is required to be an immediate, don't materialize it in a
+ // register.
+ if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
+ // imm arguments are more convenient than cimm (and realistically
+ // probably sufficient), so use them.
+ assert(CI->getBitWidth() <= 64 &&
+ "large intrinsic immediates not handled");
+ MIB.addImm(CI->getSExtValue());
+ } else {
+ MIB.addFPImm(cast<ConstantFP>(Arg.value()));
+ }
+ } else {
+ ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value());
+ if (VRegs.size() > 1)
+ return false;
+ MIB.addUse(VRegs[0]);
+ }
}
// Add a MachineMemOperand if it is a target mem intrinsic.
@@ -1630,13 +1649,14 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
TargetLowering::IntrinsicInfo Info;
// TODO: Add a GlobalISel version of getTgtMemIntrinsic.
if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
- unsigned Align = Info.align;
- if (Align == 0)
- Align = DL->getABITypeAlignment(Info.memVT.getTypeForEVT(F->getContext()));
+ MaybeAlign Align = Info.align;
+ if (!Align)
+ Align = MaybeAlign(
+ DL->getABITypeAlignment(Info.memVT.getTypeForEVT(F->getContext())));
uint64_t Size = Info.memVT.getStoreSize();
- MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
- Info.flags, Size, Align));
+ MIB.addMemOperand(MF->getMachineMemOperand(
+ MachinePointerInfo(Info.ptrVal), Info.flags, Size, Align->value()));
}
return true;
@@ -1672,30 +1692,7 @@ bool IRTranslator::translateInvoke(const User &U,
MCSymbol *BeginSymbol = Context.createTempSymbol();
MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
- ArrayRef<Register> Res;
- if (!I.getType()->isVoidTy())
- Res = getOrCreateVRegs(I);
- SmallVector<ArrayRef<Register>, 8> Args;
- Register SwiftErrorVReg = 0;
- Register SwiftInVReg = 0;
- for (auto &Arg : I.arg_operands()) {
- if (CLI->supportSwiftError() && isSwiftError(Arg)) {
- assert(SwiftInVReg == 0 && "Expected only one swift error argument");
- LLT Ty = getLLTForType(*Arg->getType(), *DL);
- SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
- MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
- &I, &MIRBuilder.getMBB(), Arg));
- Args.push_back(makeArrayRef(SwiftInVReg));
- SwiftErrorVReg =
- SwiftError.getOrCreateVRegDefAt(&I, &MIRBuilder.getMBB(), Arg);
- continue;
- }
-
- Args.push_back(getOrCreateVRegs(*Arg));
- }
-
- if (!CLI->lowerCall(MIRBuilder, &I, Res, Args, SwiftErrorVReg,
- [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
+ if (!translateCallSite(&I, MIRBuilder))
return false;
MCSymbol *EndSymbol = Context.createTempSymbol();
@@ -1811,36 +1808,25 @@ bool IRTranslator::translateAlloca(const User &U,
Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
Register TySize =
- getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
+ getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
MIRBuilder.buildMul(AllocSize, NumElts, TySize);
- LLT PtrTy = getLLTForType(*AI.getType(), *DL);
- auto &TLI = *MF->getSubtarget().getTargetLowering();
- Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
-
- Register SPTmp = MRI->createGenericVirtualRegister(PtrTy);
- MIRBuilder.buildCopy(SPTmp, SPReg);
-
- Register AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
- MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
-
- // Handle alignment. We have to realign if the allocation granule was smaller
- // than stack alignment, or the specific alloca requires more than stack
- // alignment.
unsigned StackAlign =
MF->getSubtarget().getFrameLowering()->getStackAlignment();
- Align = std::max(Align, StackAlign);
- if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
- // Round the size of the allocation up to the stack alignment size
- // by add SA-1 to the size. This doesn't overflow because we're computing
- // an address inside an alloca.
- Register AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
- MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
- AllocTmp = AlignedAlloc;
- }
+ if (Align <= StackAlign)
+ Align = 0;
+
+ // Round the size of the allocation up to the stack alignment size
+ // by add SA-1 to the size. This doesn't overflow because we're computing
+ // an address inside an alloca.
+ auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign - 1);
+ auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
+ MachineInstr::NoUWrap);
+ auto AlignCst =
+ MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign - 1));
+ auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
- MIRBuilder.buildCopy(SPReg, AllocTmp);
- MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
+ MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Align);
MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
assert(MF->getFrameInfo().hasVarSizedObjects());
@@ -1926,7 +1912,7 @@ bool IRTranslator::translateShuffleVector(const User &U,
.addDef(getOrCreateVReg(U))
.addUse(getOrCreateVReg(*U.getOperand(0)))
.addUse(getOrCreateVReg(*U.getOperand(1)))
- .addUse(getOrCreateVReg(*U.getOperand(2)));
+ .addShuffleMask(cast<Constant>(U.getOperand(2)));
return true;
}
@@ -1991,7 +1977,6 @@ bool IRTranslator::translateAtomicRMW(const User &U,
unsigned Opcode = 0;
switch (I.getOperation()) {
default:
- llvm_unreachable("Unknown atomicrmw op");
return false;
case AtomicRMWInst::Xchg:
Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
@@ -2026,6 +2011,12 @@ bool IRTranslator::translateAtomicRMW(const User &U,
case AtomicRMWInst::UMin:
Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
break;
+ case AtomicRMWInst::FAdd:
+ Opcode = TargetOpcode::G_ATOMICRMW_FADD;
+ break;
+ case AtomicRMWInst::FSub:
+ Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
+ break;
}
MIRBuilder.buildAtomicRMW(
@@ -2197,6 +2188,20 @@ void IRTranslator::finalizeFunction() {
FuncInfo.clear();
}
+/// Returns true if a BasicBlock \p BB within a variadic function contains a
+/// variadic musttail call.
+static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) {
+ if (!IsVarArg)
+ return false;
+
+ // Walk the block backwards, because tail calls usually only appear at the end
+ // of a block.
+ return std::any_of(BB.rbegin(), BB.rend(), [](const Instruction &I) {
+ const auto *CI = dyn_cast<CallInst>(&I);
+ return CI && CI->isMustTailCall();
+ });
+}
+
bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
MF = &CurMF;
const Function &F = MF->getFunction();
@@ -2212,26 +2217,26 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
: TPC->isGISelCSEEnabled();
if (EnableCSE) {
- EntryBuilder = make_unique<CSEMIRBuilder>(CurMF);
+ EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
CSEInfo = &Wrapper.get(TPC->getCSEConfig());
EntryBuilder->setCSEInfo(CSEInfo);
- CurBuilder = make_unique<CSEMIRBuilder>(CurMF);
+ CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
CurBuilder->setCSEInfo(CSEInfo);
} else {
- EntryBuilder = make_unique<MachineIRBuilder>();
- CurBuilder = make_unique<MachineIRBuilder>();
+ EntryBuilder = std::make_unique<MachineIRBuilder>();
+ CurBuilder = std::make_unique<MachineIRBuilder>();
}
CLI = MF->getSubtarget().getCallLowering();
CurBuilder->setMF(*MF);
EntryBuilder->setMF(*MF);
MRI = &MF->getRegInfo();
DL = &F.getParent()->getDataLayout();
- ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
+ ORE = std::make_unique<OptimizationRemarkEmitter>(&F);
FuncInfo.MF = MF;
FuncInfo.BPI = nullptr;
const auto &TLI = *MF->getSubtarget().getTargetLowering();
const TargetMachine &TM = MF->getTarget();
- SL = make_unique<GISelSwitchLowering>(this, FuncInfo);
+ SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
SL->init(TLI, TM, *DL);
EnableOpts = TM.getOptLevel() != CodeGenOpt::None && !skipFunction(F);
@@ -2258,6 +2263,9 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
SwiftError.setFunction(CurMF);
SwiftError.createEntriesInEntryBlock(DbgLoc);
+ bool IsVarArg = F.isVarArg();
+ bool HasMustTailInVarArgFn = false;
+
// Create all blocks, in IR order, to preserve the layout.
for (const BasicBlock &BB: F) {
auto *&MBB = BBToMBB[&BB];
@@ -2267,8 +2275,13 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
if (BB.hasAddressTaken())
MBB->setHasAddressTaken();
+
+ if (!HasMustTailInVarArgFn)
+ HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB);
}
+ MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
+
// Make our arguments/constants entry block fallthrough to the IR entry block.
EntryBB->addSuccessor(&getMBB(F.front()));
@@ -2286,18 +2299,6 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
}
}
- // We don't currently support translating swifterror or swiftself functions.
- for (auto &Arg : F.args()) {
- if (Arg.hasSwiftSelfAttr()) {
- OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
- F.getSubprogram(), &F.getEntryBlock());
- R << "unable to lower arguments due to swiftself: "
- << ore::NV("Prototype", F.getType());
- reportTranslationError(*MF, *TPC, *ORE, R);
- return false;
- }
- }
-
if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) {
OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
F.getSubprogram(), &F.getEntryBlock());
@@ -2322,8 +2323,15 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
// Set the insertion point of all the following translations to
// the end of this basic block.
CurBuilder->setMBB(MBB);
-
+ HasTailCall = false;
for (const Instruction &Inst : *BB) {
+ // If we translated a tail call in the last step, then we know
+ // everything after the call is either a return, or something that is
+ // handled by the call itself. (E.g. a lifetime marker or assume
+ // intrinsic.) In this case, we should stop translating the block and
+ // move on.
+ if (HasTailCall)
+ break;
#ifndef NDEBUG
Verifier.setCurrentInst(&Inst);
#endif // ifndef NDEBUG