aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2021-02-16 20:13:02 +0000
committerDimitry Andric <dim@FreeBSD.org>2021-02-16 20:13:02 +0000
commitb60736ec1405bb0a8dd40989f67ef4c93da068ab (patch)
tree5c43fbb7c9fc45f0f87e0e6795a86267dbd12f9d /llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
parentcfca06d7963fa0909f90483b42a6d7d194d01e08 (diff)
downloadsrc-b60736ec1405bb0a8dd40989f67ef4c93da068ab.tar.gz
src-b60736ec1405bb0a8dd40989f67ef4c93da068ab.zip
Vendor import of llvm-project main 8e464dd76bef, the last commit beforevendor/llvm-project/llvmorg-12-init-17869-g8e464dd76bef
the upstream release/12.x branch was created.
Diffstat (limited to 'llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp')
-rw-r--r--llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp146
1 files changed, 75 insertions, 71 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
index 9c6833a7dab6..2628070f219c 100644
--- a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
+++ b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
@@ -9,19 +9,10 @@
//
#include "AMDGPU.h"
-#include "AMDGPUSubtarget.h"
-#include "SIInstrInfo.h"
+#include "GCNSubtarget.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/LLVMContext.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetMachine.h"
#define DEBUG_TYPE "si-shrink-instructions"
@@ -78,7 +69,7 @@ static bool foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
MachineOperand &Src0 = MI.getOperand(Src0Idx);
if (Src0.isReg()) {
Register Reg = Src0.getReg();
- if (Register::isVirtualRegister(Reg) && MRI.hasOneUse(Reg)) {
+ if (Reg.isVirtual() && MRI.hasOneUse(Reg)) {
MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
if (Def && Def->isMoveImmediate()) {
MachineOperand &MovSrc = Def->getOperand(1);
@@ -86,13 +77,9 @@ static bool foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
if (MovSrc.isImm() && (isInt<32>(MovSrc.getImm()) ||
isUInt<32>(MovSrc.getImm()))) {
- // It's possible to have only one component of a super-reg defined by
- // a single mov, so we need to clear any subregister flag.
- Src0.setSubReg(0);
Src0.ChangeToImmediate(MovSrc.getImm());
ConstantFolded = true;
} else if (MovSrc.isFI()) {
- Src0.setSubReg(0);
Src0.ChangeToFrameIndex(MovSrc.getIndex());
ConstantFolded = true;
} else if (MovSrc.isGlobal()) {
@@ -276,8 +263,8 @@ void SIShrinkInstructions::shrinkMIMG(MachineInstr &MI) {
// enabled
int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe);
int LWEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::lwe);
- unsigned TFEVal = MI.getOperand(TFEIdx).getImm();
- unsigned LWEVal = MI.getOperand(LWEIdx).getImm();
+ unsigned TFEVal = (TFEIdx == -1) ? 0 : MI.getOperand(TFEIdx).getImm();
+ unsigned LWEVal = (LWEIdx == -1) ? 0 : MI.getOperand(LWEIdx).getImm();
int ToUntie = -1;
if (TFEVal || LWEVal) {
// TFE/LWE is enabled so we need to deal with an implicit tied operand
@@ -367,19 +354,23 @@ static bool shrinkScalarLogicOp(const GCNSubtarget &ST,
}
if (NewImm != 0) {
- if (Register::isVirtualRegister(Dest->getReg()) && SrcReg->isReg()) {
+ if (Dest->getReg().isVirtual() && SrcReg->isReg()) {
MRI.setRegAllocationHint(Dest->getReg(), 0, SrcReg->getReg());
MRI.setRegAllocationHint(SrcReg->getReg(), 0, Dest->getReg());
return true;
}
if (SrcReg->isReg() && SrcReg->getReg() == Dest->getReg()) {
+ const bool IsUndef = SrcReg->isUndef();
+ const bool IsKill = SrcReg->isKill();
MI.setDesc(TII->get(Opc));
if (Opc == AMDGPU::S_BITSET0_B32 ||
Opc == AMDGPU::S_BITSET1_B32) {
Src0->ChangeToImmediate(NewImm);
// Remove the immediate and add the tied input.
- MI.getOperand(2).ChangeToRegister(Dest->getReg(), false);
+ MI.getOperand(2).ChangeToRegister(Dest->getReg(), /*IsDef*/ false,
+ /*isImp*/ false, IsKill,
+ /*isDead*/ false, IsUndef);
MI.tieOperands(0, 2);
} else {
SrcImm->setImm(NewImm);
@@ -393,17 +384,16 @@ static bool shrinkScalarLogicOp(const GCNSubtarget &ST,
// This is the same as MachineInstr::readsRegister/modifiesRegister except
// it takes subregs into account.
static bool instAccessReg(iterator_range<MachineInstr::const_mop_iterator> &&R,
- unsigned Reg, unsigned SubReg,
+ Register Reg, unsigned SubReg,
const SIRegisterInfo &TRI) {
for (const MachineOperand &MO : R) {
if (!MO.isReg())
continue;
- if (Register::isPhysicalRegister(Reg) &&
- Register::isPhysicalRegister(MO.getReg())) {
+ if (Reg.isPhysical() && MO.getReg().isPhysical()) {
if (TRI.regsOverlap(Reg, MO.getReg()))
return true;
- } else if (MO.getReg() == Reg && Register::isVirtualRegister(Reg)) {
+ } else if (MO.getReg() == Reg && Reg.isVirtual()) {
LaneBitmask Overlap = TRI.getSubRegIndexLaneMask(SubReg) &
TRI.getSubRegIndexLaneMask(MO.getSubReg());
if (Overlap.any())
@@ -426,10 +416,10 @@ static bool instModifiesReg(const MachineInstr *MI,
}
static TargetInstrInfo::RegSubRegPair
-getSubRegForIndex(unsigned Reg, unsigned Sub, unsigned I,
+getSubRegForIndex(Register Reg, unsigned Sub, unsigned I,
const SIRegisterInfo &TRI, const MachineRegisterInfo &MRI) {
if (TRI.getRegSizeInBits(Reg, MRI) != 32) {
- if (Register::isPhysicalRegister(Reg)) {
+ if (Reg.isPhysical()) {
Reg = TRI.getSubReg(Reg, TRI.getSubRegFromChannel(I));
} else {
Sub = TRI.getSubRegFromChannel(I + TRI.getChannelFromSubReg(Sub));
@@ -438,6 +428,22 @@ getSubRegForIndex(unsigned Reg, unsigned Sub, unsigned I,
return TargetInstrInfo::RegSubRegPair(Reg, Sub);
}
+static void dropInstructionKeepingImpDefs(MachineInstr &MI,
+ const SIInstrInfo *TII) {
+ for (unsigned i = MI.getDesc().getNumOperands() +
+ MI.getDesc().getNumImplicitUses() +
+ MI.getDesc().getNumImplicitDefs(), e = MI.getNumOperands();
+ i != e; ++i) {
+ const MachineOperand &Op = MI.getOperand(i);
+ if (!Op.isDef())
+ continue;
+ BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
+ TII->get(AMDGPU::IMPLICIT_DEF), Op.getReg());
+ }
+
+ MI.eraseFromParent();
+}
+
// Match:
// mov t, x
// mov x, y
@@ -477,18 +483,25 @@ static MachineInstr* matchSwap(MachineInstr &MovT, MachineRegisterInfo &MRI,
if (!TRI.isVGPR(MRI, X))
return nullptr;
+ if (MovT.hasRegisterImplicitUseOperand(AMDGPU::M0))
+ return nullptr;
+
const unsigned SearchLimit = 16;
unsigned Count = 0;
+ bool KilledT = false;
for (auto Iter = std::next(MovT.getIterator()),
E = MovT.getParent()->instr_end();
- Iter != E && Count < SearchLimit; ++Iter, ++Count) {
+ Iter != E && Count < SearchLimit && !KilledT; ++Iter, ++Count) {
MachineInstr *MovY = &*Iter;
+ KilledT = MovY->killsRegister(T, &TRI);
+
if ((MovY->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
MovY->getOpcode() != AMDGPU::COPY) ||
!MovY->getOperand(1).isReg() ||
MovY->getOperand(1).getReg() != T ||
- MovY->getOperand(1).getSubReg() != Tsub)
+ MovY->getOperand(1).getSubReg() != Tsub ||
+ MovY->hasRegisterImplicitUseOperand(AMDGPU::M0))
continue;
Register Y = MovY->getOperand(0).getReg();
@@ -522,32 +535,53 @@ static MachineInstr* matchSwap(MachineInstr &MovT, MachineRegisterInfo &MRI,
MovX = nullptr;
break;
}
+ // Implicit use of M0 is an indirect move.
+ if (I->hasRegisterImplicitUseOperand(AMDGPU::M0))
+ continue;
+
+ if (Size > 1 && (I->getNumImplicitOperands() > (I->isCopy() ? 0U : 1U)))
+ continue;
+
MovX = &*I;
}
if (!MovX)
continue;
- LLVM_DEBUG(dbgs() << "Matched v_swap_b32:\n" << MovT << *MovX << MovY);
+ LLVM_DEBUG(dbgs() << "Matched v_swap_b32:\n" << MovT << *MovX << *MovY);
for (unsigned I = 0; I < Size; ++I) {
TargetInstrInfo::RegSubRegPair X1, Y1;
X1 = getSubRegForIndex(X, Xsub, I, TRI, MRI);
Y1 = getSubRegForIndex(Y, Ysub, I, TRI, MRI);
- BuildMI(*MovT.getParent(), MovX->getIterator(), MovT.getDebugLoc(),
- TII->get(AMDGPU::V_SWAP_B32))
+ MachineBasicBlock &MBB = *MovT.getParent();
+ auto MIB = BuildMI(MBB, MovX->getIterator(), MovT.getDebugLoc(),
+ TII->get(AMDGPU::V_SWAP_B32))
.addDef(X1.Reg, 0, X1.SubReg)
.addDef(Y1.Reg, 0, Y1.SubReg)
.addReg(Y1.Reg, 0, Y1.SubReg)
.addReg(X1.Reg, 0, X1.SubReg).getInstr();
+ if (MovX->hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
+ // Drop implicit EXEC.
+ MIB->RemoveOperand(MIB->getNumExplicitOperands());
+ MIB->copyImplicitOps(*MBB.getParent(), *MovX);
+ }
}
MovX->eraseFromParent();
- MovY->eraseFromParent();
+ dropInstructionKeepingImpDefs(*MovY, TII);
MachineInstr *Next = &*std::next(MovT.getIterator());
- if (MRI.use_nodbg_empty(T))
- MovT.eraseFromParent();
- else
+
+ if (MRI.use_nodbg_empty(T)) {
+ dropInstructionKeepingImpDefs(MovT, TII);
+ } else {
Xop.setIsKill(false);
+ for (int I = MovT.getNumImplicitOperands() - 1; I >= 0; --I ) {
+ unsigned OpNo = MovT.getNumExplicitOperands() + I;
+ const MachineOperand &Op = MovT.getOperand(OpNo);
+ if (Op.isKill() && TRI.regsOverlap(X, Op.getReg()))
+ MovT.RemoveOperand(OpNo);
+ }
+ }
return Next;
}
@@ -585,8 +619,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
// optimizations happen because this will confuse them.
// XXX - not exactly a check for post-regalloc run.
MachineOperand &Src = MI.getOperand(1);
- if (Src.isImm() &&
- Register::isPhysicalRegister(MI.getOperand(0).getReg())) {
+ if (Src.isImm() && MI.getOperand(0).getReg().isPhysical()) {
int32_t ReverseImm;
if (isReverseInlineImm(TII, Src, ReverseImm)) {
MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
@@ -604,35 +637,6 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
}
}
- // Combine adjacent s_nops to use the immediate operand encoding how long
- // to wait.
- //
- // s_nop N
- // s_nop M
- // =>
- // s_nop (N + M)
- if (MI.getOpcode() == AMDGPU::S_NOP &&
- MI.getNumOperands() == 1 && // Don't merge with implicit operands
- Next != MBB.end() &&
- (*Next).getOpcode() == AMDGPU::S_NOP &&
- (*Next).getNumOperands() == 1) {
-
- MachineInstr &NextMI = *Next;
- // The instruction encodes the amount to wait with an offset of 1,
- // i.e. 0 is wait 1 cycle. Convert both to cycles and then convert back
- // after adding.
- uint8_t Nop0 = MI.getOperand(0).getImm() + 1;
- uint8_t Nop1 = NextMI.getOperand(0).getImm() + 1;
-
- // Make sure we don't overflow the bounds.
- if (Nop0 + Nop1 <= 8) {
- NextMI.getOperand(0).setImm(Nop0 + Nop1 - 1);
- MI.eraseFromParent();
- }
-
- continue;
- }
-
// FIXME: We also need to consider movs of constant operands since
// immediate operands are not folded if they have more than one use, and
// the operand folding pass is unaware if the immediate will be free since
@@ -652,7 +656,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
// FIXME: This could work better if hints worked with subregisters. If
// we have a vector add of a constant, we usually don't get the correct
// allocation due to the subregister usage.
- if (Register::isVirtualRegister(Dest->getReg()) && Src0->isReg()) {
+ if (Dest->getReg().isVirtual() && Src0->isReg()) {
MRI.setRegAllocationHint(Dest->getReg(), 0, Src0->getReg());
MRI.setRegAllocationHint(Src0->getReg(), 0, Dest->getReg());
continue;
@@ -680,7 +684,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
const MachineOperand &Dst = MI.getOperand(0);
MachineOperand &Src = MI.getOperand(1);
- if (Src.isImm() && Register::isPhysicalRegister(Dst.getReg())) {
+ if (Src.isImm() && Dst.getReg().isPhysical()) {
int32_t ReverseImm;
if (isKImmOperand(TII, Src))
MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
@@ -729,7 +733,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
if (TII->isVOPC(Op32)) {
Register DstReg = MI.getOperand(0).getReg();
- if (Register::isVirtualRegister(DstReg)) {
+ if (DstReg.isVirtual()) {
// VOPC instructions can only write to the VCC register. We can't
// force them to use VCC here, because this is only one register and
// cannot deal with sequences which would require multiple copies of
@@ -753,7 +757,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
if (!Src2->isReg())
continue;
Register SReg = Src2->getReg();
- if (Register::isVirtualRegister(SReg)) {
+ if (SReg.isVirtual()) {
MRI.setRegAllocationHint(SReg, 0, VCCReg);
continue;
}
@@ -773,7 +777,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
bool Next = false;
if (SDst->getReg() != VCCReg) {
- if (Register::isVirtualRegister(SDst->getReg()))
+ if (SDst->getReg().isVirtual())
MRI.setRegAllocationHint(SDst->getReg(), 0, VCCReg);
Next = true;
}
@@ -781,7 +785,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
// All of the instructions with carry outs also have an SGPR input in
// src2.
if (Src2 && Src2->getReg() != VCCReg) {
- if (Register::isVirtualRegister(Src2->getReg()))
+ if (Src2->getReg().isVirtual())
MRI.setRegAllocationHint(Src2->getReg(), 0, VCCReg);
Next = true;
}