aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/AMDGPU/GCNDPPCombine.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/AMDGPU/GCNDPPCombine.cpp')
-rw-r--r--lib/Target/AMDGPU/GCNDPPCombine.cpp259
1 files changed, 159 insertions, 100 deletions
diff --git a/lib/Target/AMDGPU/GCNDPPCombine.cpp b/lib/Target/AMDGPU/GCNDPPCombine.cpp
index 56071d0d2374..e1845e2e8e87 100644
--- a/lib/Target/AMDGPU/GCNDPPCombine.cpp
+++ b/lib/Target/AMDGPU/GCNDPPCombine.cpp
@@ -1,37 +1,40 @@
//=======- GCNDPPCombine.cpp - optimization for DPP instructions ---==========//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// The pass combines V_MOV_B32_dpp instruction with its VALU uses as a DPP src0
-// operand.If any of the use instruction cannot be combined with the mov the
+// operand. If any of the use instruction cannot be combined with the mov the
// whole sequence is reverted.
//
// $old = ...
// $dpp_value = V_MOV_B32_dpp $old, $vgpr_to_be_read_from_other_lane,
-// dpp_controls..., $bound_ctrl
-// $res = VALU $dpp_value, ...
+// dpp_controls..., $row_mask, $bank_mask, $bound_ctrl
+// $res = VALU $dpp_value [, src1]
//
// to
//
-// $res = VALU_DPP $folded_old, $vgpr_to_be_read_from_other_lane, ...,
-// dpp_controls..., $folded_bound_ctrl
+// $res = VALU_DPP $combined_old, $vgpr_to_be_read_from_other_lane, [src1,]
+// dpp_controls..., $row_mask, $bank_mask, $combined_bound_ctrl
//
// Combining rules :
//
-// $bound_ctrl is DPP_BOUND_ZERO, $old is any
-// $bound_ctrl is DPP_BOUND_OFF, $old is 0
+// if $row_mask and $bank_mask are fully enabled (0xF) and
+// $bound_ctrl==DPP_BOUND_ZERO or $old==0
+// -> $combined_old = undef,
+// $combined_bound_ctrl = DPP_BOUND_ZERO
//
-// ->$folded_old = undef, $folded_bound_ctrl = DPP_BOUND_ZERO
-// $bound_ctrl is DPP_BOUND_OFF, $old is undef
+// if the VALU op is binary and
+// $bound_ctrl==DPP_BOUND_OFF and
+// $old==identity value (immediate) for the VALU op
+// -> $combined_old = src1,
+// $combined_bound_ctrl = DPP_BOUND_OFF
//
-// ->$folded_old = undef, $folded_bound_ctrl = DPP_BOUND_OFF
-// $bound_ctrl is DPP_BOUND_OFF, $old is foldable
+// Otherwise cancel.
//
-// ->$folded_old = folded value, $folded_bound_ctrl = DPP_BOUND_OFF
+// The mov_dpp instruction should reside in the same BB as all its uses
//===----------------------------------------------------------------------===//
#include "AMDGPU.h"
@@ -67,20 +70,16 @@ class GCNDPPCombine : public MachineFunctionPass {
MachineOperand *getOldOpndValue(MachineOperand &OldOpnd) const;
- RegSubRegPair foldOldOpnd(MachineInstr &OrigMI,
- RegSubRegPair OldOpndVGPR,
- MachineOperand &OldOpndValue) const;
-
MachineInstr *createDPPInst(MachineInstr &OrigMI,
MachineInstr &MovMI,
- RegSubRegPair OldOpndVGPR,
+ RegSubRegPair CombOldVGPR,
MachineOperand *OldOpnd,
- bool BoundCtrlZero) const;
+ bool CombBCZ) const;
MachineInstr *createDPPInst(MachineInstr &OrigMI,
MachineInstr &MovMI,
- RegSubRegPair OldOpndVGPR,
- bool BoundCtrlZero) const;
+ RegSubRegPair CombOldVGPR,
+ bool CombBCZ) const;
bool hasNoImmOrEqual(MachineInstr &MI,
unsigned OpndName,
@@ -153,8 +152,8 @@ MachineOperand *GCNDPPCombine::getOldOpndValue(MachineOperand &OldOpnd) const {
MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI,
MachineInstr &MovMI,
- RegSubRegPair OldOpndVGPR,
- bool BoundCtrlZero) const {
+ RegSubRegPair CombOldVGPR,
+ bool CombBCZ) const {
assert(MovMI.getOpcode() == AMDGPU::V_MOV_B32_dpp);
assert(TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst)->getReg() ==
TII->getNamedOperand(OrigMI, AMDGPU::OpName::src0)->getReg());
@@ -178,9 +177,15 @@ MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI,
const int OldIdx = AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::old);
if (OldIdx != -1) {
assert(OldIdx == NumOperands);
- assert(isOfRegClass(OldOpndVGPR, AMDGPU::VGPR_32RegClass, *MRI));
- DPPInst.addReg(OldOpndVGPR.Reg, 0, OldOpndVGPR.SubReg);
+ assert(isOfRegClass(CombOldVGPR, AMDGPU::VGPR_32RegClass, *MRI));
+ DPPInst.addReg(CombOldVGPR.Reg, 0, CombOldVGPR.SubReg);
++NumOperands;
+ } else {
+ // TODO: this discards MAC/FMA instructions for now, let's add it later
+ LLVM_DEBUG(dbgs() << " failed: no old operand in DPP instruction,"
+ " TBD\n");
+ Fail = true;
+ break;
}
if (auto *Mod0 = TII->getNamedOperand(OrigMI,
@@ -199,6 +204,7 @@ MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI,
break;
}
DPPInst.add(*Src0);
+ DPPInst->getOperand(NumOperands).setIsKill(false);
++NumOperands;
if (auto *Mod1 = TII->getNamedOperand(OrigMI,
@@ -231,7 +237,7 @@ MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI,
DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::dpp_ctrl));
DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::row_mask));
DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::bank_mask));
- DPPInst.addImm(BoundCtrlZero ? 1 : 0);
+ DPPInst.addImm(CombBCZ ? 1 : 0);
} while (false);
if (Fail) {
@@ -242,64 +248,81 @@ MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI,
return DPPInst.getInstr();
}
-GCNDPPCombine::RegSubRegPair
-GCNDPPCombine::foldOldOpnd(MachineInstr &OrigMI,
- RegSubRegPair OldOpndVGPR,
- MachineOperand &OldOpndValue) const {
- assert(OldOpndValue.isImm());
- switch (OrigMI.getOpcode()) {
+static bool isIdentityValue(unsigned OrigMIOp, MachineOperand *OldOpnd) {
+ assert(OldOpnd->isImm());
+ switch (OrigMIOp) {
default: break;
+ case AMDGPU::V_ADD_U32_e32:
+ case AMDGPU::V_ADD_U32_e64:
+ case AMDGPU::V_ADD_I32_e32:
+ case AMDGPU::V_ADD_I32_e64:
+ case AMDGPU::V_OR_B32_e32:
+ case AMDGPU::V_OR_B32_e64:
+ case AMDGPU::V_SUBREV_U32_e32:
+ case AMDGPU::V_SUBREV_U32_e64:
+ case AMDGPU::V_SUBREV_I32_e32:
+ case AMDGPU::V_SUBREV_I32_e64:
case AMDGPU::V_MAX_U32_e32:
- if (OldOpndValue.getImm() == std::numeric_limits<uint32_t>::max())
- return OldOpndVGPR;
+ case AMDGPU::V_MAX_U32_e64:
+ case AMDGPU::V_XOR_B32_e32:
+ case AMDGPU::V_XOR_B32_e64:
+ if (OldOpnd->getImm() == 0)
+ return true;
break;
- case AMDGPU::V_MAX_I32_e32:
- if (OldOpndValue.getImm() == std::numeric_limits<int32_t>::max())
- return OldOpndVGPR;
+ case AMDGPU::V_AND_B32_e32:
+ case AMDGPU::V_AND_B32_e64:
+ case AMDGPU::V_MIN_U32_e32:
+ case AMDGPU::V_MIN_U32_e64:
+ if (static_cast<uint32_t>(OldOpnd->getImm()) ==
+ std::numeric_limits<uint32_t>::max())
+ return true;
break;
case AMDGPU::V_MIN_I32_e32:
- if (OldOpndValue.getImm() == std::numeric_limits<int32_t>::min())
- return OldOpndVGPR;
+ case AMDGPU::V_MIN_I32_e64:
+ if (static_cast<int32_t>(OldOpnd->getImm()) ==
+ std::numeric_limits<int32_t>::max())
+ return true;
+ break;
+ case AMDGPU::V_MAX_I32_e32:
+ case AMDGPU::V_MAX_I32_e64:
+ if (static_cast<int32_t>(OldOpnd->getImm()) ==
+ std::numeric_limits<int32_t>::min())
+ return true;
break;
-
case AMDGPU::V_MUL_I32_I24_e32:
+ case AMDGPU::V_MUL_I32_I24_e64:
case AMDGPU::V_MUL_U32_U24_e32:
- if (OldOpndValue.getImm() == 1) {
- auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1);
- assert(Src1 && Src1->isReg());
- return getRegSubRegPair(*Src1);
- }
+ case AMDGPU::V_MUL_U32_U24_e64:
+ if (OldOpnd->getImm() == 1)
+ return true;
break;
}
- return RegSubRegPair();
+ return false;
}
-// Cases to combine:
-// $bound_ctrl is DPP_BOUND_ZERO, $old is any
-// $bound_ctrl is DPP_BOUND_OFF, $old is 0
-// -> $old = undef, $bound_ctrl = DPP_BOUND_ZERO
-
-// $bound_ctrl is DPP_BOUND_OFF, $old is undef
-// -> $old = undef, $bound_ctrl = DPP_BOUND_OFF
-
-// $bound_ctrl is DPP_BOUND_OFF, $old is foldable
-// -> $old = folded value, $bound_ctrl = DPP_BOUND_OFF
-
MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI,
MachineInstr &MovMI,
- RegSubRegPair OldOpndVGPR,
+ RegSubRegPair CombOldVGPR,
MachineOperand *OldOpndValue,
- bool BoundCtrlZero) const {
- assert(OldOpndVGPR.Reg);
- if (!BoundCtrlZero && OldOpndValue) {
- assert(OldOpndValue->isImm());
- OldOpndVGPR = foldOldOpnd(OrigMI, OldOpndVGPR, *OldOpndValue);
- if (!OldOpndVGPR.Reg) {
- LLVM_DEBUG(dbgs() << " failed: old immediate cannot be folded\n");
+ bool CombBCZ) const {
+ assert(CombOldVGPR.Reg);
+ if (!CombBCZ && OldOpndValue && OldOpndValue->isImm()) {
+ auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1);
+ if (!Src1 || !Src1->isReg()) {
+ LLVM_DEBUG(dbgs() << " failed: no src1 or it isn't a register\n");
+ return nullptr;
+ }
+ if (!isIdentityValue(OrigMI.getOpcode(), OldOpndValue)) {
+ LLVM_DEBUG(dbgs() << " failed: old immediate isn't an identity\n");
+ return nullptr;
+ }
+ CombOldVGPR = getRegSubRegPair(*Src1);
+ if (!isOfRegClass(CombOldVGPR, AMDGPU::VGPR_32RegClass, *MRI)) {
+ LLVM_DEBUG(dbgs() << " failed: src1 isn't a VGPR32 register\n");
return nullptr;
}
}
- return createDPPInst(OrigMI, MovMI, OldOpndVGPR, BoundCtrlZero);
+ return createDPPInst(OrigMI, MovMI, CombOldVGPR, CombBCZ);
}
// returns true if MI doesn't have OpndName immediate operand or the
@@ -316,31 +339,64 @@ bool GCNDPPCombine::hasNoImmOrEqual(MachineInstr &MI, unsigned OpndName,
bool GCNDPPCombine::combineDPPMov(MachineInstr &MovMI) const {
assert(MovMI.getOpcode() == AMDGPU::V_MOV_B32_dpp);
+ LLVM_DEBUG(dbgs() << "\nDPP combine: " << MovMI);
+
+ auto *DstOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst);
+ assert(DstOpnd && DstOpnd->isReg());
+ auto DPPMovReg = DstOpnd->getReg();
+ if (execMayBeModifiedBeforeAnyUse(*MRI, DPPMovReg, MovMI)) {
+ LLVM_DEBUG(dbgs() << " failed: EXEC mask should remain the same"
+ " for all uses\n");
+ return false;
+ }
+
+ auto *RowMaskOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::row_mask);
+ assert(RowMaskOpnd && RowMaskOpnd->isImm());
+ auto *BankMaskOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::bank_mask);
+ assert(BankMaskOpnd && BankMaskOpnd->isImm());
+ const bool MaskAllLanes = RowMaskOpnd->getImm() == 0xF &&
+ BankMaskOpnd->getImm() == 0xF;
+
auto *BCZOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::bound_ctrl);
assert(BCZOpnd && BCZOpnd->isImm());
- bool BoundCtrlZero = 0 != BCZOpnd->getImm();
-
- LLVM_DEBUG(dbgs() << "\nDPP combine: " << MovMI);
+ bool BoundCtrlZero = BCZOpnd->getImm();
auto *OldOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::old);
assert(OldOpnd && OldOpnd->isReg());
- auto OldOpndVGPR = getRegSubRegPair(*OldOpnd);
- auto *OldOpndValue = getOldOpndValue(*OldOpnd);
+
+ auto * const OldOpndValue = getOldOpndValue(*OldOpnd);
+ // OldOpndValue is either undef (IMPLICIT_DEF) or immediate or something else
+ // We could use: assert(!OldOpndValue || OldOpndValue->isImm())
+ // but the third option is used to distinguish undef from non-immediate
+ // to reuse IMPLICIT_DEF instruction later
assert(!OldOpndValue || OldOpndValue->isImm() || OldOpndValue == OldOpnd);
- if (OldOpndValue) {
- if (BoundCtrlZero) {
- OldOpndVGPR.Reg = AMDGPU::NoRegister; // should be undef, ignore old opnd
- OldOpndValue = nullptr;
- } else {
- if (!OldOpndValue->isImm()) {
- LLVM_DEBUG(dbgs() << " failed: old operand isn't an imm or undef\n");
- return false;
- }
- if (OldOpndValue->getImm() == 0) {
- OldOpndVGPR.Reg = AMDGPU::NoRegister; // should be undef
- OldOpndValue = nullptr;
- BoundCtrlZero = true;
+
+ bool CombBCZ = false;
+
+ if (MaskAllLanes && BoundCtrlZero) { // [1]
+ CombBCZ = true;
+ } else {
+ if (!OldOpndValue || !OldOpndValue->isImm()) {
+ LLVM_DEBUG(dbgs() << " failed: the DPP mov isn't combinable\n");
+ return false;
+ }
+
+ if (OldOpndValue->getParent()->getParent() != MovMI.getParent()) {
+ LLVM_DEBUG(dbgs() <<
+ " failed: old reg def and mov should be in the same BB\n");
+ return false;
+ }
+
+ if (OldOpndValue->getImm() == 0) {
+ if (MaskAllLanes) {
+ assert(!BoundCtrlZero); // by check [1]
+ CombBCZ = true;
}
+ } else if (BoundCtrlZero) {
+ assert(!MaskAllLanes); // by check [1]
+ LLVM_DEBUG(dbgs() <<
+ " failed: old!=0 and bctrl:0 and not all lanes isn't combinable\n");
+ return false;
}
}
@@ -348,25 +404,28 @@ bool GCNDPPCombine::combineDPPMov(MachineInstr &MovMI) const {
if (!OldOpndValue)
dbgs() << "undef";
else
- dbgs() << OldOpndValue->getImm();
- dbgs() << ", bound_ctrl=" << BoundCtrlZero << '\n');
-
- std::vector<MachineInstr*> OrigMIs, DPPMIs;
- if (!OldOpndVGPR.Reg) { // OldOpndVGPR = undef
- OldOpndVGPR = RegSubRegPair(
+ dbgs() << *OldOpndValue;
+ dbgs() << ", bound_ctrl=" << CombBCZ << '\n');
+
+ SmallVector<MachineInstr*, 4> OrigMIs, DPPMIs;
+ auto CombOldVGPR = getRegSubRegPair(*OldOpnd);
+ // try to reuse previous old reg if its undefined (IMPLICIT_DEF)
+ if (CombBCZ && OldOpndValue) { // CombOldVGPR should be undef
+ CombOldVGPR = RegSubRegPair(
MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass));
auto UndefInst = BuildMI(*MovMI.getParent(), MovMI, MovMI.getDebugLoc(),
- TII->get(AMDGPU::IMPLICIT_DEF), OldOpndVGPR.Reg);
+ TII->get(AMDGPU::IMPLICIT_DEF), CombOldVGPR.Reg);
DPPMIs.push_back(UndefInst.getInstr());
}
OrigMIs.push_back(&MovMI);
bool Rollback = true;
- for (auto &Use : MRI->use_nodbg_operands(
- TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst)->getReg())) {
+ for (auto &Use : MRI->use_nodbg_operands(DPPMovReg)) {
Rollback = true;
auto &OrigMI = *Use.getParent();
+ LLVM_DEBUG(dbgs() << " try: " << OrigMI);
+
auto OrigOp = OrigMI.getOpcode();
if (TII->isVOP3(OrigOp)) {
if (!TII->hasVALU32BitEncoding(OrigOp)) {
@@ -389,8 +448,8 @@ bool GCNDPPCombine::combineDPPMov(MachineInstr &MovMI) const {
LLVM_DEBUG(dbgs() << " combining: " << OrigMI);
if (&Use == TII->getNamedOperand(OrigMI, AMDGPU::OpName::src0)) {
- if (auto *DPPInst = createDPPInst(OrigMI, MovMI, OldOpndVGPR,
- OldOpndValue, BoundCtrlZero)) {
+ if (auto *DPPInst = createDPPInst(OrigMI, MovMI, CombOldVGPR,
+ OldOpndValue, CombBCZ)) {
DPPMIs.push_back(DPPInst);
Rollback = false;
}
@@ -401,8 +460,8 @@ bool GCNDPPCombine::combineDPPMov(MachineInstr &MovMI) const {
BB->insert(OrigMI, NewMI);
if (TII->commuteInstruction(*NewMI)) {
LLVM_DEBUG(dbgs() << " commuted: " << *NewMI);
- if (auto *DPPInst = createDPPInst(*NewMI, MovMI, OldOpndVGPR,
- OldOpndValue, BoundCtrlZero)) {
+ if (auto *DPPInst = createDPPInst(*NewMI, MovMI, CombOldVGPR,
+ OldOpndValue, CombBCZ)) {
DPPMIs.push_back(DPPInst);
Rollback = false;
}