diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2020-01-17 20:45:01 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2020-01-17 20:45:01 +0000 |
commit | 706b4fc47bbc608932d3b491ae19a3b9cde9497b (patch) | |
tree | 4adf86a776049cbf7f69a1929c4babcbbef925eb /llvm/lib/Target/Hexagon | |
parent | 7cc9cf2bf09f069cb2dd947ead05d0b54301fb71 (diff) |
Vendor import of llvm-project master e26a78e70, the last commit beforevendor/llvm-project/llvmorg-10-init-17466-ge26a78e7085
the llvmorg-11-init tag, from which release/10.x was branched.
Diffstat (limited to 'llvm/lib/Target/Hexagon')
50 files changed, 481 insertions, 254 deletions
diff --git a/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp b/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp index 590c4a2eb69d..cee1954e369b 100644 --- a/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp +++ b/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp @@ -813,10 +813,10 @@ bool HexagonAsmParser::RegisterMatchesArch(unsigned MatchNum) const { return true; } -// extern "C" void LLVMInitializeHexagonAsmLexer(); +// extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeHexagonAsmLexer(); /// Force static initialization. -extern "C" void LLVMInitializeHexagonAsmParser() { +extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeHexagonAsmParser() { RegisterMCAsmParser<HexagonAsmParser> X(getTheHexagonTarget()); } diff --git a/llvm/lib/Target/Hexagon/BitTracker.cpp b/llvm/lib/Target/Hexagon/BitTracker.cpp index efd5ed915127..8a07b991ff5a 100644 --- a/llvm/lib/Target/Hexagon/BitTracker.cpp +++ b/llvm/lib/Target/Hexagon/BitTracker.cpp @@ -860,7 +860,7 @@ void BT::visitNonBranch(const MachineInstr &MI) { << " cell: " << ME.getCell(RU, Map) << "\n"; } dbgs() << "Outputs:\n"; - for (const std::pair<unsigned, RegisterCell> &P : ResMap) { + for (const std::pair<const unsigned, RegisterCell> &P : ResMap) { RegisterRef RD(P.first); dbgs() << " " << printReg(P.first, &ME.TRI) << " cell: " << ME.getCell(RD, ResMap) << "\n"; diff --git a/llvm/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp b/llvm/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp index 99e3ee871570..7a90d585eb9a 100644 --- a/llvm/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp +++ b/llvm/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp @@ -53,11 +53,9 @@ public: DecodeStatus getSingleInstruction(MCInst &Instr, MCInst &MCB, ArrayRef<uint8_t> Bytes, uint64_t Address, - raw_ostream &VStream, raw_ostream &CStream, - bool &Complete) const; + raw_ostream &CStream, bool &Complete) const; DecodeStatus getInstruction(MCInst &Instr, uint64_t &Size, ArrayRef<uint8_t> Bytes, uint64_t Address, - raw_ostream &VStream, raw_ostream &CStream) const override; void remapInstruction(MCInst &Instr) const; }; @@ -158,7 +156,7 @@ static MCDisassembler *createHexagonDisassembler(const Target &T, return new HexagonDisassembler(STI, Ctx, T.createMCInstrInfo()); } -extern "C" void LLVMInitializeHexagonDisassembler() { +extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeHexagonDisassembler() { TargetRegistry::RegisterMCDisassembler(getTheHexagonTarget(), createHexagonDisassembler); } @@ -166,7 +164,6 @@ extern "C" void LLVMInitializeHexagonDisassembler() { DecodeStatus HexagonDisassembler::getInstruction(MCInst &MI, uint64_t &Size, ArrayRef<uint8_t> Bytes, uint64_t Address, - raw_ostream &os, raw_ostream &cs) const { DecodeStatus Result = DecodeStatus::Success; bool Complete = false; @@ -179,7 +176,7 @@ DecodeStatus HexagonDisassembler::getInstruction(MCInst &MI, uint64_t &Size, if (Bytes.size() < HEXAGON_INSTR_SIZE) return MCDisassembler::Fail; MCInst *Inst = new (getContext()) MCInst; - Result = getSingleInstruction(*Inst, MI, Bytes, Address, os, cs, Complete); + Result = getSingleInstruction(*Inst, MI, Bytes, Address, cs, Complete); MI.addOperand(MCOperand::createInst(Inst)); Size += HEXAGON_INSTR_SIZE; Bytes = Bytes.slice(HEXAGON_INSTR_SIZE); @@ -290,9 +287,11 @@ static void adjustDuplex(MCInst &MI, MCContext &Context) { } } -DecodeStatus HexagonDisassembler::getSingleInstruction( - MCInst &MI, MCInst &MCB, ArrayRef<uint8_t> Bytes, uint64_t Address, - raw_ostream &os, raw_ostream &cs, bool &Complete) const { +DecodeStatus HexagonDisassembler::getSingleInstruction(MCInst &MI, MCInst &MCB, + ArrayRef<uint8_t> Bytes, + uint64_t Address, + raw_ostream &cs, + bool &Complete) const { assert(Bytes.size() >= HEXAGON_INSTR_SIZE); uint32_t Instruction = support::endian::read32le(Bytes.data()); diff --git a/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp b/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp index 3d771d388e28..30fdde70d01a 100644 --- a/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp +++ b/llvm/lib/Target/Hexagon/HexagonAsmPrinter.cpp @@ -771,6 +771,6 @@ void HexagonAsmPrinter::EmitInstruction(const MachineInstr *MI) { OutStreamer->EmitInstruction(MCB, getSubtargetInfo()); } -extern "C" void LLVMInitializeHexagonAsmPrinter() { +extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeHexagonAsmPrinter() { RegisterAsmPrinter<HexagonAsmPrinter> X(getTheHexagonTarget()); } diff --git a/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp b/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp index 3068fb6f9629..799b85ed48b4 100644 --- a/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp +++ b/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp @@ -27,6 +27,7 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/TargetRegisterInfo.h" #include "llvm/IR/DebugLoc.h" +#include "llvm/InitializePasses.h" #include "llvm/MC/MCInstrDesc.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" diff --git a/llvm/lib/Target/Hexagon/HexagonCommonGEP.cpp b/llvm/lib/Target/Hexagon/HexagonCommonGEP.cpp index cf1b0a0f7daa..6d2aadb066cf 100644 --- a/llvm/lib/Target/Hexagon/HexagonCommonGEP.cpp +++ b/llvm/lib/Target/Hexagon/HexagonCommonGEP.cpp @@ -6,17 +6,14 @@ // //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "commgep" - #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/GraphTraits.h" -#include "llvm/ADT/SetVector.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SetVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/PostDominators.h" -#include "llvm/Transforms/Utils/Local.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Constant.h" #include "llvm/IR/Constants.h" @@ -30,6 +27,7 @@ #include "llvm/IR/User.h" #include "llvm/IR/Value.h" #include "llvm/IR/Verifier.h" +#include "llvm/InitializePasses.h" #include "llvm/Pass.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/Casting.h" @@ -37,6 +35,7 @@ #include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/Utils/Local.h" #include <algorithm> #include <cassert> #include <cstddef> @@ -47,6 +46,8 @@ #include <utility> #include <vector> +#define DEBUG_TYPE "commgep" + using namespace llvm; static cl::opt<bool> OptSpeculate("commgep-speculate", cl::init(true), diff --git a/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp b/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp index ddc9b847ef1c..aa9a715718bf 100644 --- a/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp +++ b/llvm/lib/Target/Hexagon/HexagonConstExtenders.cpp @@ -15,6 +15,7 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/Register.h" +#include "llvm/InitializePasses.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/raw_ostream.h" @@ -554,7 +555,7 @@ namespace { LLVM_ATTRIBUTE_UNUSED raw_ostream &operator<< (raw_ostream &OS, const PrintIMap &P) { OS << "{\n"; - for (const std::pair<HCE::ExtenderInit,HCE::IndexList> &Q : P.IMap) { + for (const std::pair<const HCE::ExtenderInit, HCE::IndexList> &Q : P.IMap) { OS << " " << PrintInit(Q.first, P.HRI) << " -> {"; for (unsigned I : Q.second) OS << ' ' << I; @@ -1638,7 +1639,7 @@ bool HCE::replaceInstrExact(const ExtDesc &ED, Register ExtR) { return true; } - if ((MI.mayLoad() || MI.mayStore()) && !isStoreImmediate(ExtOpc)) { + if (MI.mayLoadOrStore() && !isStoreImmediate(ExtOpc)) { // For memory instructions, there is an asymmetry in the addressing // modes. Addressing modes allowing extenders can be replaced with // addressing modes that use registers, but the order of operands @@ -1793,7 +1794,7 @@ bool HCE::replaceInstrExpr(const ExtDesc &ED, const ExtenderInit &ExtI, return true; } - if (MI.mayLoad() || MI.mayStore()) { + if (MI.mayLoadOrStore()) { unsigned IdxOpc = getRegOffOpcode(ExtOpc); assert(IdxOpc && "Expecting indexed opcode"); MachineInstrBuilder MIB = BuildMI(MBB, At, dl, HII->get(IdxOpc)); @@ -1843,7 +1844,7 @@ bool HCE::replaceInstr(unsigned Idx, Register ExtR, const ExtenderInit &ExtI) { // These two addressing modes must be converted into indexed forms // regardless of what the initializer looks like. bool IsAbs = false, IsAbsSet = false; - if (MI.mayLoad() || MI.mayStore()) { + if (MI.mayLoadOrStore()) { unsigned AM = HII->getAddrMode(MI); IsAbs = AM == HexagonII::Absolute; IsAbsSet = AM == HexagonII::AbsoluteSet; @@ -1894,7 +1895,7 @@ bool HCE::replaceExtenders(const AssignmentMap &IMap) { LocDefList Defs; bool Changed = false; - for (const std::pair<ExtenderInit,IndexList> &P : IMap) { + for (const std::pair<const ExtenderInit, IndexList> &P : IMap) { const IndexList &Idxs = P.second; if (Idxs.size() < CountThreshold) continue; diff --git a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp index a82501cabb9b..5b61d1084e08 100644 --- a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp +++ b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp @@ -134,11 +134,21 @@ namespace { uint32_t properties() const; unsigned size() const { return Size; } - LatticeCell &operator= (const LatticeCell &L) { + LatticeCell(const LatticeCell &L) { + // This memcpy also copies Properties (when L.Size == 0). + uint32_t N = + L.IsSpecial ? sizeof L.Properties : L.Size * sizeof(const Constant *); + memcpy(Values, L.Values, N); + Kind = L.Kind; + Size = L.Size; + IsSpecial = L.IsSpecial; + } + + LatticeCell &operator=(const LatticeCell &L) { if (this != &L) { // This memcpy also copies Properties (when L.Size == 0). uint32_t N = L.IsSpecial ? sizeof L.Properties - : L.Size*sizeof(const Constant*); + : L.Size * sizeof(const Constant *); memcpy(Values, L.Values, N); Kind = L.Kind; Size = L.Size; @@ -260,7 +270,7 @@ namespace { void propagate(MachineFunction &MF); bool rewrite(MachineFunction &MF); - MachineRegisterInfo *MRI; + MachineRegisterInfo *MRI = nullptr; MachineConstEvaluator &MCE; using CFGEdge = std::pair<unsigned, unsigned>; diff --git a/llvm/lib/Target/Hexagon/HexagonDepMapAsm2Intrin.td b/llvm/lib/Target/Hexagon/HexagonDepMapAsm2Intrin.td index e4a2ba0ec29c..61a1df5eb94b 100644 --- a/llvm/lib/Target/Hexagon/HexagonDepMapAsm2Intrin.td +++ b/llvm/lib/Target/Hexagon/HexagonDepMapAsm2Intrin.td @@ -1,4 +1,4 @@ -//===----------------------------------------------------------------------===// +//===-------------------------------------------------------*- tablegen -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp b/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp index 0844fb8a8629..d0285a7aa377 100644 --- a/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp +++ b/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp @@ -217,7 +217,7 @@ namespace { MachineDominatorTree *MDT = nullptr; MachineLoopInfo *MLI = nullptr; BlockSetType Deleted; - const MachineBranchProbabilityInfo *MBPI; + const MachineBranchProbabilityInfo *MBPI = nullptr; }; } // end anonymous namespace @@ -282,6 +282,7 @@ bool HexagonEarlyIfConversion::matchFlowPattern(MachineBasicBlock *B, // can fall through into the other, in other words, it will be executed // in both cases. We only want to predicate the block that is executed // conditionally. + assert(TB && FB && "Failed to find triangle control flow blocks"); unsigned TNP = TB->pred_size(), FNP = FB->pred_size(); unsigned TNS = TB->succ_size(), FNS = FB->succ_size(); @@ -682,7 +683,7 @@ bool HexagonEarlyIfConversion::isPredicableStore(const MachineInstr *MI) bool HexagonEarlyIfConversion::isSafeToSpeculate(const MachineInstr *MI) const { - if (MI->mayLoad() || MI->mayStore()) + if (MI->mayLoadOrStore()) return false; if (MI->isCall() || MI->isBarrier() || MI->isBranch()) return false; diff --git a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp index 8984ee82960d..c1d0599830cc 100644 --- a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp +++ b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp @@ -106,6 +106,7 @@ #include "llvm/CodeGen/TargetSubtargetInfo.h" #include "llvm/IR/DebugLoc.h" #include "llvm/IR/Function.h" +#include "llvm/InitializePasses.h" #include "llvm/MC/LaneBitmask.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" @@ -1040,7 +1041,7 @@ bool HexagonExpandCondsets::predicate(MachineInstr &TfrI, bool Cond, bool CanDown = canMoveOver(*DefI, Defs, Uses); // The TfrI does not access memory, but DefI could. Check if it's safe // to move DefI down to TfrI. - if (DefI->mayLoad() || DefI->mayStore()) + if (DefI->mayLoadOrStore()) if (!canMoveMemTo(*DefI, TfrI, true)) CanDown = false; diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp index bfa3372d7faf..aff8e57b0a94 100644 --- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -36,6 +36,7 @@ #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachinePostDominators.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/PseudoSourceValue.h" #include "llvm/CodeGen/RegisterScavenging.h" #include "llvm/CodeGen/TargetRegisterInfo.h" #include "llvm/IR/Attributes.h" @@ -223,8 +224,7 @@ namespace { bool HexagonCallFrameInformation::runOnMachineFunction(MachineFunction &MF) { auto &HFI = *MF.getSubtarget<HexagonSubtarget>().getFrameLowering(); - bool NeedCFI = MF.getMMI().hasDebugInfo() || - MF.getFunction().needsUnwindTableEntry(); + bool NeedCFI = MF.needsFrameMoves(); if (!NeedCFI) return false; @@ -1363,6 +1363,7 @@ void HexagonFrameLowering::processFunctionBeforeFrameFinalized( if (!HasAlloca || !NeedsAlign) return; + SmallSet<int, 4> DealignSlots; unsigned LFS = MFI.getLocalFrameSize(); for (int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { if (!MFI.isSpillSlotObjectIndex(i) || MFI.isDeadObjectIndex(i)) @@ -1373,7 +1374,8 @@ void HexagonFrameLowering::processFunctionBeforeFrameFinalized( unsigned A = std::max(MFI.getObjectAlignment(i), 8U); MFI.setObjectAlignment(i, 8); LFS = alignTo(LFS+S, A); - MFI.mapLocalFrameObject(i, -LFS); + MFI.mapLocalFrameObject(i, -static_cast<int64_t>(LFS)); + DealignSlots.insert(i); } MFI.setLocalFrameSize(LFS); @@ -1383,6 +1385,38 @@ void HexagonFrameLowering::processFunctionBeforeFrameFinalized( MFI.setLocalFrameMaxAlign(Align(8)); MFI.setUseLocalStackAllocationBlock(true); + // Go over all MachineMemOperands in the code, and change the ones that + // refer to the dealigned stack slots to reflect the new alignment. + if (!DealignSlots.empty()) { + for (MachineBasicBlock &BB : MF) { + for (MachineInstr &MI : BB) { + bool KeepOld = true; + ArrayRef<MachineMemOperand*> memops = MI.memoperands(); + SmallVector<MachineMemOperand*,1> new_memops; + for (MachineMemOperand *MMO : memops) { + auto *PV = MMO->getPseudoValue(); + if (auto *FS = dyn_cast_or_null<FixedStackPseudoSourceValue>(PV)) { + int FI = FS->getFrameIndex(); + if (DealignSlots.count(FI)) { + unsigned A = MFI.getObjectAlignment(FI); + auto *NewMMO = MF.getMachineMemOperand(MMO->getPointerInfo(), + MMO->getFlags(), MMO->getSize(), A, + MMO->getAAInfo(), MMO->getRanges(), + MMO->getSyncScopeID(), MMO->getOrdering(), + MMO->getFailureOrdering()); + new_memops.push_back(NewMMO); + KeepOld = false; + continue; + } + } + new_memops.push_back(MMO); + } + if (!KeepOld) + MI.setMemRefs(MF, new_memops); + } + } + } + // Set the physical aligned-stack base address register. unsigned AP = 0; if (const MachineInstr *AI = getAlignaInstr(MF)) @@ -1750,16 +1784,21 @@ bool HexagonFrameLowering::expandStoreVec2(MachineBasicBlock &B, Register SrcHi = HRI.getSubReg(SrcR, Hexagon::vsub_hi); bool IsKill = MI->getOperand(2).isKill(); int FI = MI->getOperand(0).getIndex(); + bool NeedsAligna = needsAligna(MF); unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass); unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass); unsigned HasAlign = MFI.getObjectAlignment(FI); unsigned StoreOpc; + auto UseAligned = [&] (unsigned NeedAlign, unsigned HasAlign) { + return !NeedsAligna && (NeedAlign <= HasAlign); + }; + // Store low part. if (LPR.contains(SrcLo)) { - StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai - : Hexagon::V6_vS32Ub_ai; + StoreOpc = UseAligned(NeedAlign, HasAlign) ? Hexagon::V6_vS32b_ai + : Hexagon::V6_vS32Ub_ai; BuildMI(B, It, DL, HII.get(StoreOpc)) .addFrameIndex(FI) .addImm(0) @@ -1769,8 +1808,8 @@ bool HexagonFrameLowering::expandStoreVec2(MachineBasicBlock &B, // Store high part. if (LPR.contains(SrcHi)) { - StoreOpc = NeedAlign <= MinAlign(HasAlign, Size) ? Hexagon::V6_vS32b_ai - : Hexagon::V6_vS32Ub_ai; + StoreOpc = UseAligned(NeedAlign, HasAlign) ? Hexagon::V6_vS32b_ai + : Hexagon::V6_vS32Ub_ai; BuildMI(B, It, DL, HII.get(StoreOpc)) .addFrameIndex(FI) .addImm(Size) @@ -1797,23 +1836,28 @@ bool HexagonFrameLowering::expandLoadVec2(MachineBasicBlock &B, Register DstHi = HRI.getSubReg(DstR, Hexagon::vsub_hi); Register DstLo = HRI.getSubReg(DstR, Hexagon::vsub_lo); int FI = MI->getOperand(1).getIndex(); + bool NeedsAligna = needsAligna(MF); unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass); unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass); unsigned HasAlign = MFI.getObjectAlignment(FI); unsigned LoadOpc; + auto UseAligned = [&] (unsigned NeedAlign, unsigned HasAlign) { + return !NeedsAligna && (NeedAlign <= HasAlign); + }; + // Load low part. - LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai - : Hexagon::V6_vL32Ub_ai; + LoadOpc = UseAligned(NeedAlign, HasAlign) ? Hexagon::V6_vL32b_ai + : Hexagon::V6_vL32Ub_ai; BuildMI(B, It, DL, HII.get(LoadOpc), DstLo) .addFrameIndex(FI) .addImm(0) .cloneMemRefs(*MI); // Load high part. - LoadOpc = NeedAlign <= MinAlign(HasAlign, Size) ? Hexagon::V6_vL32b_ai - : Hexagon::V6_vL32Ub_ai; + LoadOpc = UseAligned(NeedAlign, HasAlign) ? Hexagon::V6_vL32b_ai + : Hexagon::V6_vL32Ub_ai; BuildMI(B, It, DL, HII.get(LoadOpc), DstHi) .addFrameIndex(FI) .addImm(Size) @@ -1832,6 +1876,7 @@ bool HexagonFrameLowering::expandStoreVec(MachineBasicBlock &B, if (!MI->getOperand(0).isFI()) return false; + bool NeedsAligna = needsAligna(MF); auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo(); DebugLoc DL = MI->getDebugLoc(); Register SrcR = MI->getOperand(2).getReg(); @@ -1840,8 +1885,9 @@ bool HexagonFrameLowering::expandStoreVec(MachineBasicBlock &B, unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass); unsigned HasAlign = MFI.getObjectAlignment(FI); - unsigned StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai - : Hexagon::V6_vS32Ub_ai; + bool UseAligned = !NeedsAligna && (NeedAlign <= HasAlign); + unsigned StoreOpc = UseAligned ? Hexagon::V6_vS32b_ai + : Hexagon::V6_vS32Ub_ai; BuildMI(B, It, DL, HII.get(StoreOpc)) .addFrameIndex(FI) .addImm(0) @@ -1861,6 +1907,7 @@ bool HexagonFrameLowering::expandLoadVec(MachineBasicBlock &B, if (!MI->getOperand(1).isFI()) return false; + bool NeedsAligna = needsAligna(MF); auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo(); DebugLoc DL = MI->getDebugLoc(); Register DstR = MI->getOperand(0).getReg(); @@ -1868,8 +1915,9 @@ bool HexagonFrameLowering::expandLoadVec(MachineBasicBlock &B, unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass); unsigned HasAlign = MFI.getObjectAlignment(FI); - unsigned LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai - : Hexagon::V6_vL32Ub_ai; + bool UseAligned = !NeedsAligna && (NeedAlign <= HasAlign); + unsigned LoadOpc = UseAligned ? Hexagon::V6_vL32b_ai + : Hexagon::V6_vL32Ub_ai; BuildMI(B, It, DL, HII.get(LoadOpc), DstR) .addFrameIndex(FI) .addImm(0) @@ -1912,11 +1960,9 @@ bool HexagonFrameLowering::expandSpillMacros(MachineFunction &MF, Changed |= expandLoadVecPred(B, I, MRI, HII, NewRegs); break; case Hexagon::PS_vloadrw_ai: - case Hexagon::PS_vloadrwu_ai: Changed |= expandLoadVec2(B, I, MRI, HII, NewRegs); break; case Hexagon::PS_vstorerw_ai: - case Hexagon::PS_vstorerwu_ai: Changed |= expandStoreVec2(B, I, MRI, HII, NewRegs); break; } @@ -1961,7 +2007,15 @@ void HexagonFrameLowering::determineCalleeSaves(MachineFunction &MF, for (auto *RC : SpillRCs) { if (!needToReserveScavengingSpillSlots(MF, HRI, RC)) continue; - unsigned Num = RC == &Hexagon::IntRegsRegClass ? NumberScavengerSlots : 1; + unsigned Num = 1; + switch (RC->getID()) { + case Hexagon::IntRegsRegClassID: + Num = NumberScavengerSlots; + break; + case Hexagon::HvxQRRegClassID: + Num = 2; // Vector predicate spills also need a vector register. + break; + } unsigned S = HRI.getSpillSize(*RC), A = HRI.getSpillAlignment(*RC); for (unsigned i = 0; i < Num; i++) { int NewFI = MFI.CreateSpillStackObject(S, A); @@ -2389,9 +2443,9 @@ bool HexagonFrameLowering::needsAligna(const MachineFunction &MF) const { const MachineFrameInfo &MFI = MF.getFrameInfo(); if (!MFI.hasVarSizedObjects()) return false; - unsigned MaxA = MFI.getMaxAlignment(); - if (MaxA <= getStackAlignment()) - return false; + // Do not check for max stack object alignment here, because the stack + // may not be complete yet. Assume that we will need PS_aligna if there + // are variable-sized objects. return true; } diff --git a/llvm/lib/Target/Hexagon/HexagonGenExtract.cpp b/llvm/lib/Target/Hexagon/HexagonGenExtract.cpp index caa0e4d80397..342ca21525c5 100644 --- a/llvm/lib/Target/Hexagon/HexagonGenExtract.cpp +++ b/llvm/lib/Target/Hexagon/HexagonGenExtract.cpp @@ -17,9 +17,11 @@ #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" +#include "llvm/IR/IntrinsicsHexagon.h" #include "llvm/IR/PatternMatch.h" #include "llvm/IR/Type.h" #include "llvm/IR/Value.h" +#include "llvm/InitializePasses.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include <algorithm> diff --git a/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp b/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp index 48881e02f4d3..2f29e88bc989 100644 --- a/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp +++ b/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp @@ -29,6 +29,7 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/TargetRegisterInfo.h" #include "llvm/IR/DebugLoc.h" +#include "llvm/InitializePasses.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" @@ -92,6 +93,10 @@ namespace { RegisterSet() = default; explicit RegisterSet(unsigned s, bool t = false) : BitVector(s, t) {} RegisterSet(const RegisterSet &RS) : BitVector(RS) {} + RegisterSet &operator=(const RegisterSet &RS) { + BitVector::operator=(RS); + return *this; + } using BitVector::clear; diff --git a/llvm/lib/Target/Hexagon/HexagonGenMux.cpp b/llvm/lib/Target/Hexagon/HexagonGenMux.cpp index b559e7bbbb60..9585b14dbf80 100644 --- a/llvm/lib/Target/Hexagon/HexagonGenMux.cpp +++ b/llvm/lib/Target/Hexagon/HexagonGenMux.cpp @@ -332,6 +332,12 @@ bool HexagonGenMux::genMuxInBlock(MachineBasicBlock &B) { unsigned MxOpc = getMuxOpcode(*MX.SrcT, *MX.SrcF); if (!MxOpc) continue; + // Basic sanity check: since we are deleting instructions, validate the + // iterators. There is a possibility that one of Def1 or Def2 is translated + // to "mux" and being considered for other "mux" instructions. + if (!MX.At->getParent() || !MX.Def1->getParent() || !MX.Def2->getParent()) + continue; + MachineBasicBlock &B = *MX.At->getParent(); const DebugLoc &DL = B.findDebugLoc(MX.At); auto NewMux = BuildMI(B, MX.At, DL, HII->get(MxOpc), MX.DefR) @@ -339,8 +345,8 @@ bool HexagonGenMux::genMuxInBlock(MachineBasicBlock &B) { .add(*MX.SrcT) .add(*MX.SrcF); NewMux->clearKillInfo(); - B.erase(MX.Def1); - B.erase(MX.Def2); + B.remove(MX.Def1); + B.remove(MX.Def2); Changed = true; } diff --git a/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp b/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp index 24d33c91a29b..903287e68c99 100644 --- a/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp +++ b/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp @@ -20,6 +20,7 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/TargetRegisterInfo.h" #include "llvm/IR/DebugLoc.h" +#include "llvm/InitializePasses.h" #include "llvm/Pass.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" diff --git a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp index 62291790f0fe..1cf1500bc832 100644 --- a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -44,6 +44,7 @@ #include "llvm/CodeGen/TargetRegisterInfo.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DebugLoc.h" +#include "llvm/InitializePasses.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index 4684d8e4781a..9cf5b257a00a 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -10,8 +10,8 @@ // //===----------------------------------------------------------------------===// -#include "Hexagon.h" #include "HexagonISelDAGToDAG.h" +#include "Hexagon.h" #include "HexagonISelLowering.h" #include "HexagonMachineFunctionInfo.h" #include "HexagonTargetMachine.h" @@ -19,6 +19,7 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/IR/Intrinsics.h" +#include "llvm/IR/IntrinsicsHexagon.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" using namespace llvm; @@ -915,7 +916,6 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, switch (ConstraintID) { default: return true; - case InlineAsm::Constraint_i: case InlineAsm::Constraint_o: // Offsetable. case InlineAsm::Constraint_v: // Not offsetable. case InlineAsm::Constraint_m: // Memory. @@ -1261,7 +1261,7 @@ void HexagonDAGToDAGISel::PreprocessISelDAG() { } void HexagonDAGToDAGISel::EmitFunctionEntryCode() { - auto &HST = static_cast<const HexagonSubtarget&>(MF->getSubtarget()); + auto &HST = MF->getSubtarget<HexagonSubtarget>(); auto &HFI = *HST.getFrameLowering(); if (!HFI.needsAligna(*MF)) return; @@ -1269,12 +1269,23 @@ void HexagonDAGToDAGISel::EmitFunctionEntryCode() { MachineFrameInfo &MFI = MF->getFrameInfo(); MachineBasicBlock *EntryBB = &MF->front(); unsigned AR = FuncInfo->CreateReg(MVT::i32); - unsigned MaxA = MFI.getMaxAlignment(); + unsigned EntryMaxA = MFI.getMaxAlignment(); BuildMI(EntryBB, DebugLoc(), HII->get(Hexagon::PS_aligna), AR) - .addImm(MaxA); + .addImm(EntryMaxA); MF->getInfo<HexagonMachineFunctionInfo>()->setStackAlignBaseVReg(AR); } +void HexagonDAGToDAGISel::updateAligna() { + auto &HFI = *MF->getSubtarget<HexagonSubtarget>().getFrameLowering(); + if (!HFI.needsAligna(*MF)) + return; + auto *AlignaI = const_cast<MachineInstr*>(HFI.getAlignaInstr(*MF)); + assert(AlignaI != nullptr); + unsigned MaxA = MF->getFrameInfo().getMaxAlignment(); + if (AlignaI->getOperand(1).getImm() < MaxA) + AlignaI->getOperand(1).setImm(MaxA); +} + // Match a frame index that can be used in an addressing mode. bool HexagonDAGToDAGISel::SelectAddrFI(SDValue &N, SDValue &R) { if (N.getOpcode() != ISD::FrameIndex) diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.h b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.h index 65edb09603b3..6c77d8803359 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.h +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.h @@ -43,6 +43,7 @@ public: HII = HST->getInstrInfo(); HRI = HST->getRegisterInfo(); SelectionDAGISel::runOnMachineFunction(MF); + updateAligna(); return true; } @@ -144,6 +145,9 @@ private: void ppAddrRewriteAndSrl(std::vector<SDNode*> &&Nodes); void ppHoistZextI1(std::vector<SDNode*> &&Nodes); + // Function postprocessing. + void updateAligna(); + SmallDenseMap<SDNode *,int> RootWeights; SmallDenseMap<SDNode *,int> RootHeights; SmallDenseMap<const Value *,int> GAUsesInFunction; diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp index e7f1c345af1d..7e143a349400 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAGHVX.cpp @@ -14,6 +14,7 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/IR/Intrinsics.h" +#include "llvm/IR/IntrinsicsHexagon.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp index 8a8986e232a0..e11ecdc7d035 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -39,8 +39,9 @@ #include "llvm/IR/GlobalValue.h" #include "llvm/IR/InlineAsm.h" #include "llvm/IR/Instructions.h" -#include "llvm/IR/Intrinsics.h" #include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/IntrinsicsHexagon.h" #include "llvm/IR/Module.h" #include "llvm/IR/Type.h" #include "llvm/IR/Value.h" @@ -232,19 +233,76 @@ HexagonTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool HexagonTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { // If either no tail call or told not to tail call at all, don't. - auto Attr = - CI->getParent()->getParent()->getFnAttribute("disable-tail-calls"); - if (!CI->isTailCall() || Attr.getValueAsString() == "true") - return false; - - return true; + return CI->isTailCall(); } -Register HexagonTargetLowering::getRegisterByName(const char* RegName, EVT VT, - const MachineFunction &) const { +Register HexagonTargetLowering::getRegisterByName( + const char* RegName, LLT VT, const MachineFunction &) const { // Just support r19, the linux kernel uses it. Register Reg = StringSwitch<Register>(RegName) + .Case("r0", Hexagon::R0) + .Case("r1", Hexagon::R1) + .Case("r2", Hexagon::R2) + .Case("r3", Hexagon::R3) + .Case("r4", Hexagon::R4) + .Case("r5", Hexagon::R5) + .Case("r6", Hexagon::R6) + .Case("r7", Hexagon::R7) + .Case("r8", Hexagon::R8) + .Case("r9", Hexagon::R9) + .Case("r10", Hexagon::R10) + .Case("r11", Hexagon::R11) + .Case("r12", Hexagon::R12) + .Case("r13", Hexagon::R13) + .Case("r14", Hexagon::R14) + .Case("r15", Hexagon::R15) + .Case("r16", Hexagon::R16) + .Case("r17", Hexagon::R17) + .Case("r18", Hexagon::R18) .Case("r19", Hexagon::R19) + .Case("r20", Hexagon::R20) + .Case("r21", Hexagon::R21) + .Case("r22", Hexagon::R22) + .Case("r23", Hexagon::R23) + .Case("r24", Hexagon::R24) + .Case("r25", Hexagon::R25) + .Case("r26", Hexagon::R26) + .Case("r27", Hexagon::R27) + .Case("r28", Hexagon::R28) + .Case("r29", Hexagon::R29) + .Case("r30", Hexagon::R30) + .Case("r31", Hexagon::R31) + .Case("r1:0", Hexagon::D0) + .Case("r3:2", Hexagon::D1) + .Case("r5:4", Hexagon::D2) + .Case("r7:6", Hexagon::D3) + .Case("r9:8", Hexagon::D4) + .Case("r11:10", Hexagon::D5) + .Case("r13:12", Hexagon::D6) + .Case("r15:14", Hexagon::D7) + .Case("r17:16", Hexagon::D8) + .Case("r19:18", Hexagon::D9) + .Case("r21:20", Hexagon::D10) + .Case("r23:22", Hexagon::D11) + .Case("r25:24", Hexagon::D12) + .Case("r27:26", Hexagon::D13) + .Case("r29:28", Hexagon::D14) + .Case("r31:30", Hexagon::D15) + .Case("sp", Hexagon::R29) + .Case("fp", Hexagon::R30) + .Case("lr", Hexagon::R31) + .Case("p0", Hexagon::P0) + .Case("p1", Hexagon::P1) + .Case("p2", Hexagon::P2) + .Case("p3", Hexagon::P3) + .Case("sa0", Hexagon::SA0) + .Case("lc0", Hexagon::LC0) + .Case("sa1", Hexagon::SA1) + .Case("lc1", Hexagon::LC1) + .Case("m0", Hexagon::M0) + .Case("m1", Hexagon::M1) + .Case("usr", Hexagon::USR) + .Case("ugp", Hexagon::UGP) .Default(Register()); if (Reg) return Reg; @@ -345,10 +403,6 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, else CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon); - auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls"); - if (Attr.getValueAsString() == "true") - CLI.IsTailCall = false; - if (CLI.IsTailCall) { bool StructAttrFlag = MF.getFunction().hasStructRetAttr(); CLI.IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, @@ -413,7 +467,7 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, MemAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, MemAddr); if (ArgAlign) LargestAlignSeen = std::max(LargestAlignSeen, - VA.getLocVT().getStoreSizeInBits() >> 3); + (unsigned)VA.getLocVT().getStoreSizeInBits() >> 3); if (Flags.isByVal()) { // The argument is a struct passed by value. According to LLVM, "Arg" // is a pointer. @@ -1473,6 +1527,10 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM, setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, MVT::v4i8, Legal); setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, MVT::v4i8, Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); + // Types natively supported: for (MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}) { @@ -1847,7 +1905,8 @@ bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { return VT1.getSimpleVT() == MVT::i64 && VT2.getSimpleVT() == MVT::i32; } -bool HexagonTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { +bool HexagonTargetLowering::isFMAFasterThanFMulAndFAdd( + const MachineFunction &MF, EVT VT) const { return isOperationLegalOrCustom(ISD::FMA, VT); } diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.h b/llvm/lib/Target/Hexagon/HexagonISelLowering.h index 75f553bfec7f..e79646de6287 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.h +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.h @@ -137,7 +137,8 @@ namespace HexagonISD { /// instructions. fmuladd intrinsics will be expanded to FMAs when this /// method returns true (and FMAs are legal), otherwise fmuladd is /// expanded to mul + add. - bool isFMAFasterThanFMulAndFAdd(EVT) const override; + bool isFMAFasterThanFMulAndFAdd(const MachineFunction &, + EVT) const override; // Should we expand the build vector with shuffles? bool shouldExpandBuildVectorWithShuffles(EVT VT, @@ -229,7 +230,7 @@ namespace HexagonISD { bool mayBeEmittedAsTailCall(const CallInst *CI) const override; - Register getRegisterByName(const char* RegName, EVT VT, + Register getRegisterByName(const char* RegName, LLT VT, const MachineFunction &MF) const override; /// If a physical register, this returns the register that receives the diff --git a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp index bc8a9959c917..204950f9010e 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp @@ -194,6 +194,13 @@ HexagonTargetLowering::initializeHVXLowering() { setOperationAction(ISD::XOR, BoolV, Legal); } + if (Use64b) + for (MVT T: {MVT::v32i8, MVT::v32i16, MVT::v16i8, MVT::v16i16, MVT::v16i32}) + setOperationAction(ISD::SIGN_EXTEND_INREG, T, Legal); + else + for (MVT T: {MVT::v64i8, MVT::v64i16, MVT::v32i8, MVT::v32i16, MVT::v32i32}) + setOperationAction(ISD::SIGN_EXTEND_INREG, T, Legal); + setTargetDAGCombine(ISD::VSELECT); } diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp index 767538f92ed6..39ec8936214e 100644 --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -786,8 +786,8 @@ bool HexagonInstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB, void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, - const DebugLoc &DL, unsigned DestReg, - unsigned SrcReg, bool KillSrc) const { + const DebugLoc &DL, MCRegister DestReg, + MCRegister SrcReg, bool KillSrc) const { const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo(); unsigned KillFlag = getKillRegState(KillSrc); @@ -888,10 +888,7 @@ void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); unsigned SlotAlign = MFI.getObjectAlignment(FI); - unsigned RegAlign = TRI->getSpillAlignment(*RC); unsigned KillFlag = getKillRegState(isKill); - bool HasAlloca = MFI.hasVarSizedObjects(); - const HexagonFrameLowering &HFI = *Subtarget.getFrameLowering(); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, @@ -918,29 +915,13 @@ void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, .addFrameIndex(FI).addImm(0) .addReg(SrcReg, KillFlag).addMemOperand(MMO); } else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) { - // If there are variable-sized objects, spills will not be aligned. - if (HasAlloca) - SlotAlign = HFI.getStackAlignment(); - unsigned Opc = SlotAlign < RegAlign ? Hexagon::V6_vS32Ub_ai - : Hexagon::V6_vS32b_ai; - MachineMemOperand *MMOA = MF.getMachineMemOperand( - MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, - MFI.getObjectSize(FI), SlotAlign); - BuildMI(MBB, I, DL, get(Opc)) + BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerv_ai)) .addFrameIndex(FI).addImm(0) - .addReg(SrcReg, KillFlag).addMemOperand(MMOA); + .addReg(SrcReg, KillFlag).addMemOperand(MMO); } else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) { - // If there are variable-sized objects, spills will not be aligned. - if (HasAlloca) - SlotAlign = HFI.getStackAlignment(); - unsigned Opc = SlotAlign < RegAlign ? Hexagon::PS_vstorerwu_ai - : Hexagon::PS_vstorerw_ai; - MachineMemOperand *MMOA = MF.getMachineMemOperand( - MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, - MFI.getObjectSize(FI), SlotAlign); - BuildMI(MBB, I, DL, get(Opc)) + BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerw_ai)) .addFrameIndex(FI).addImm(0) - .addReg(SrcReg, KillFlag).addMemOperand(MMOA); + .addReg(SrcReg, KillFlag).addMemOperand(MMO); } else { llvm_unreachable("Unimplemented"); } @@ -954,9 +935,6 @@ void HexagonInstrInfo::loadRegFromStackSlot( MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); unsigned SlotAlign = MFI.getObjectAlignment(FI); - unsigned RegAlign = TRI->getSpillAlignment(*RC); - bool HasAlloca = MFI.hasVarSizedObjects(); - const HexagonFrameLowering &HFI = *Subtarget.getFrameLowering(); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, @@ -978,27 +956,11 @@ void HexagonInstrInfo::loadRegFromStackSlot( BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrq_ai), DestReg) .addFrameIndex(FI).addImm(0).addMemOperand(MMO); } else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) { - // If there are variable-sized objects, spills will not be aligned. - if (HasAlloca) - SlotAlign = HFI.getStackAlignment(); - unsigned Opc = SlotAlign < RegAlign ? Hexagon::V6_vL32Ub_ai - : Hexagon::V6_vL32b_ai; - MachineMemOperand *MMOA = MF.getMachineMemOperand( - MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, - MFI.getObjectSize(FI), SlotAlign); - BuildMI(MBB, I, DL, get(Opc), DestReg) - .addFrameIndex(FI).addImm(0).addMemOperand(MMOA); + BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrv_ai), DestReg) + .addFrameIndex(FI).addImm(0).addMemOperand(MMO); } else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) { - // If there are variable-sized objects, spills will not be aligned. - if (HasAlloca) - SlotAlign = HFI.getStackAlignment(); - unsigned Opc = SlotAlign < RegAlign ? Hexagon::PS_vloadrwu_ai - : Hexagon::PS_vloadrw_ai; - MachineMemOperand *MMOA = MF.getMachineMemOperand( - MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, - MFI.getObjectSize(FI), SlotAlign); - BuildMI(MBB, I, DL, get(Opc), DestReg) - .addFrameIndex(FI).addImm(0).addMemOperand(MMOA); + BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrw_ai), DestReg) + .addFrameIndex(FI).addImm(0).addMemOperand(MMO); } else { llvm_unreachable("Can't store this register to stack slot"); } @@ -1040,6 +1002,15 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { return true; }; + auto UseAligned = [&] (const MachineInstr &MI, unsigned NeedAlign) { + if (MI.memoperands().empty()) + return false; + return all_of(MI.memoperands(), + [NeedAlign] (const MachineMemOperand *MMO) { + return NeedAlign <= MMO->getAlignment(); + }); + }; + switch (Opc) { case TargetOpcode::COPY: { MachineOperand &MD = MI.getOperand(0); @@ -1086,47 +1057,78 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { MRI.clearKillFlags(SrcSubHi); return true; } - case Hexagon::PS_vstorerw_ai: - case Hexagon::PS_vstorerwu_ai: { - bool Aligned = Opc == Hexagon::PS_vstorerw_ai; - Register SrcReg = MI.getOperand(2).getReg(); - Register SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi); - Register SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo); - unsigned NewOpc = Aligned ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32Ub_ai; - unsigned Offset = HRI.getSpillSize(Hexagon::HvxVRRegClass); - - MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpc)) - .add(MI.getOperand(0)) - .addImm(MI.getOperand(1).getImm()) - .addReg(SrcSubLo) - .cloneMemRefs(MI); - MI1New->getOperand(0).setIsKill(false); - BuildMI(MBB, MI, DL, get(NewOpc)) - .add(MI.getOperand(0)) - // The Vectors are indexed in multiples of vector size. - .addImm(MI.getOperand(1).getImm() + Offset) - .addReg(SrcSubHi) + case Hexagon::PS_vloadrv_ai: { + Register DstReg = MI.getOperand(0).getReg(); + const MachineOperand &BaseOp = MI.getOperand(1); + assert(BaseOp.getSubReg() == 0); + int Offset = MI.getOperand(2).getImm(); + unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass); + unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vL32b_ai + : Hexagon::V6_vL32Ub_ai; + BuildMI(MBB, MI, DL, get(NewOpc), DstReg) + .addReg(BaseOp.getReg(), getRegState(BaseOp)) + .addImm(Offset) .cloneMemRefs(MI); MBB.erase(MI); return true; } - case Hexagon::PS_vloadrw_ai: - case Hexagon::PS_vloadrwu_ai: { - bool Aligned = Opc == Hexagon::PS_vloadrw_ai; + case Hexagon::PS_vloadrw_ai: { Register DstReg = MI.getOperand(0).getReg(); - unsigned NewOpc = Aligned ? Hexagon::V6_vL32b_ai : Hexagon::V6_vL32Ub_ai; - unsigned Offset = HRI.getSpillSize(Hexagon::HvxVRRegClass); - - MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpc), - HRI.getSubReg(DstReg, Hexagon::vsub_lo)) - .add(MI.getOperand(1)) - .addImm(MI.getOperand(2).getImm()) - .cloneMemRefs(MI); - MI1New->getOperand(1).setIsKill(false); - BuildMI(MBB, MI, DL, get(NewOpc), HRI.getSubReg(DstReg, Hexagon::vsub_hi)) - .add(MI.getOperand(1)) - // The Vectors are indexed in multiples of vector size. - .addImm(MI.getOperand(2).getImm() + Offset) + const MachineOperand &BaseOp = MI.getOperand(1); + assert(BaseOp.getSubReg() == 0); + int Offset = MI.getOperand(2).getImm(); + unsigned VecOffset = HRI.getSpillSize(Hexagon::HvxVRRegClass); + unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass); + unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vL32b_ai + : Hexagon::V6_vL32Ub_ai; + BuildMI(MBB, MI, DL, get(NewOpc), + HRI.getSubReg(DstReg, Hexagon::vsub_lo)) + .addReg(BaseOp.getReg(), getRegState(BaseOp) & ~RegState::Kill) + .addImm(Offset) + .cloneMemRefs(MI); + BuildMI(MBB, MI, DL, get(NewOpc), + HRI.getSubReg(DstReg, Hexagon::vsub_hi)) + .addReg(BaseOp.getReg(), getRegState(BaseOp)) + .addImm(Offset + VecOffset) + .cloneMemRefs(MI); + MBB.erase(MI); + return true; + } + case Hexagon::PS_vstorerv_ai: { + const MachineOperand &SrcOp = MI.getOperand(2); + assert(SrcOp.getSubReg() == 0); + const MachineOperand &BaseOp = MI.getOperand(0); + assert(BaseOp.getSubReg() == 0); + int Offset = MI.getOperand(1).getImm(); + unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass); + unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vS32b_ai + : Hexagon::V6_vS32Ub_ai; + BuildMI(MBB, MI, DL, get(NewOpc)) + .addReg(BaseOp.getReg(), getRegState(BaseOp)) + .addImm(Offset) + .addReg(SrcOp.getReg(), getRegState(SrcOp)) + .cloneMemRefs(MI); + MBB.erase(MI); + return true; + } + case Hexagon::PS_vstorerw_ai: { + Register SrcReg = MI.getOperand(2).getReg(); + const MachineOperand &BaseOp = MI.getOperand(0); + assert(BaseOp.getSubReg() == 0); + int Offset = MI.getOperand(1).getImm(); + unsigned VecOffset = HRI.getSpillSize(Hexagon::HvxVRRegClass); + unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass); + unsigned NewOpc = UseAligned(MI, NeedAlign) ? Hexagon::V6_vS32b_ai + : Hexagon::V6_vS32Ub_ai; + BuildMI(MBB, MI, DL, get(NewOpc)) + .addReg(BaseOp.getReg(), getRegState(BaseOp) & ~RegState::Kill) + .addImm(Offset) + .addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_lo)) + .cloneMemRefs(MI); + BuildMI(MBB, MI, DL, get(NewOpc)) + .addReg(BaseOp.getReg(), getRegState(BaseOp)) + .addImm(Offset + VecOffset) + .addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_hi)) .cloneMemRefs(MI); MBB.erase(MI); return true; @@ -2145,7 +2147,7 @@ bool HexagonInstrInfo::isDuplexPair(const MachineInstr &MIa, } bool HexagonInstrInfo::isEarlySourceInstr(const MachineInstr &MI) const { - if (MI.mayLoad() || MI.mayStore() || MI.isCompare()) + if (MI.mayLoadOrStore() || MI.isCompare()) return true; // Multiply @@ -2683,9 +2685,11 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset, // misaligns with respect to load size. switch (Opcode) { case Hexagon::PS_vstorerq_ai: + case Hexagon::PS_vstorerv_ai: case Hexagon::PS_vstorerw_ai: case Hexagon::PS_vstorerw_nt_ai: case Hexagon::PS_vloadrq_ai: + case Hexagon::PS_vloadrv_ai: case Hexagon::PS_vloadrw_ai: case Hexagon::PS_vloadrw_nt_ai: case Hexagon::V6_vL32b_ai: @@ -2941,10 +2945,7 @@ bool HexagonInstrInfo::getMemOperandWithOffset( const TargetRegisterInfo *TRI) const { unsigned AccessSize = 0; BaseOp = getBaseAndOffset(LdSt, Offset, AccessSize); - assert((!BaseOp || BaseOp->isReg()) && - "getMemOperandWithOffset only supports base " - "operands of type register."); - return BaseOp != nullptr; + return BaseOp != nullptr && BaseOp->isReg(); } /// Can these instructions execute at the same time in a bundle. diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h index 60298cd666bb..676f6f0a2a8c 100644 --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h @@ -173,7 +173,7 @@ public: /// careful implementation when multiple copy instructions are required for /// large registers. See for example the ARM target. void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, - const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, + const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override; /// Store the specified register of the given register class to the specified diff --git a/llvm/lib/Target/Hexagon/HexagonIntrinsics.td b/llvm/lib/Target/Hexagon/HexagonIntrinsics.td index c5e3cfd080d6..8ae55b207188 100644 --- a/llvm/lib/Target/Hexagon/HexagonIntrinsics.td +++ b/llvm/lib/Target/Hexagon/HexagonIntrinsics.td @@ -353,9 +353,6 @@ def: Pat<(v64i16 (trunc v64i32:$Vdd)), (v32i32 (V6_lo HvxWR:$Vdd))))>, Requires<[UseHVX]>; -def: Pat<(int_hexagon_S2_asr_i_vh DoubleRegs:$src1, IntRegs:$src2), - (S2_asr_r_vh DoubleRegs:$src1, IntRegs:$src2)>, Requires<[HasV55]>; - multiclass T_VI_pat <InstHexagon MI, Intrinsic IntID> { def: Pat<(IntID HvxVR:$src1, u3_0ImmPred:$src2), (MI HvxVR:$src1, HvxVR:$src1, u3_0ImmPred:$src2)>, diff --git a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp index bda3eccac0cd..ffaf71e23690 100644 --- a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp +++ b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp @@ -6,8 +6,6 @@ // //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "hexagon-lir" - #include "llvm/ADT/APInt.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SetVector.h" @@ -25,7 +23,6 @@ #include "llvm/Analysis/ScalarEvolutionExpander.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/Analysis/TargetLibraryInfo.h" -#include "llvm/Transforms/Utils/Local.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" @@ -42,11 +39,13 @@ #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" +#include "llvm/IR/IntrinsicsHexagon.h" #include "llvm/IR/Module.h" #include "llvm/IR/PatternMatch.h" #include "llvm/IR/Type.h" #include "llvm/IR/User.h" #include "llvm/IR/Value.h" +#include "llvm/InitializePasses.h" #include "llvm/Pass.h" #include "llvm/Support/Casting.h" #include "llvm/Support/CommandLine.h" @@ -57,6 +56,7 @@ #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils.h" +#include "llvm/Transforms/Utils/Local.h" #include <algorithm> #include <array> #include <cassert> @@ -70,6 +70,8 @@ #include <utility> #include <vector> +#define DEBUG_TYPE "hexagon-lir" + using namespace llvm; static cl::opt<bool> DisableMemcpyIdiom("disable-memcpy-idiom", @@ -2273,14 +2275,12 @@ CleanupAndExit: : CondBuilder.CreateBitCast(LoadBasePtr, Int32PtrTy); NewCall = CondBuilder.CreateCall(Fn, {Op0, Op1, NumWords}); } else { - NewCall = CondBuilder.CreateMemMove(StoreBasePtr, SI->getAlignment(), - LoadBasePtr, LI->getAlignment(), - NumBytes); + NewCall = CondBuilder.CreateMemMove( + StoreBasePtr, SI->getAlign(), LoadBasePtr, LI->getAlign(), NumBytes); } } else { - NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlignment(), - LoadBasePtr, LI->getAlignment(), - NumBytes); + NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlign(), LoadBasePtr, + LI->getAlign(), NumBytes); // Okay, the memcpy has been formed. Zap the original store and // anything that feeds into it. RecursivelyDeleteTriviallyDeadInstructions(SI, TLI); @@ -2335,7 +2335,7 @@ bool HexagonLoopIdiomRecognize::coverLoop(Loop *L, continue; if (!Worklist.count(&In) && In.mayHaveSideEffects()) return false; - for (const auto &K : In.users()) { + for (auto K : In.users()) { Instruction *UseI = dyn_cast<Instruction>(K); if (!UseI) continue; diff --git a/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp b/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp index 680d01e12af0..e3579dfa9ba9 100644 --- a/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp +++ b/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp @@ -20,6 +20,7 @@ // //===----------------------------------------------------------------------===// +#include "llvm/InitializePasses.h" #include "Hexagon.h" #include "HexagonInstrInfo.h" #include "HexagonRegisterInfo.h" diff --git a/llvm/lib/Target/Hexagon/HexagonOptAddrMode.cpp b/llvm/lib/Target/Hexagon/HexagonOptAddrMode.cpp index 9121115020a2..886034d9601a 100644 --- a/llvm/lib/Target/Hexagon/HexagonOptAddrMode.cpp +++ b/llvm/lib/Target/Hexagon/HexagonOptAddrMode.cpp @@ -28,6 +28,7 @@ #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/InitializePasses.h" #include "llvm/MC/MCInstrDesc.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" diff --git a/llvm/lib/Target/Hexagon/HexagonOptimizeSZextends.cpp b/llvm/lib/Target/Hexagon/HexagonOptimizeSZextends.cpp index d00fc23102a5..d818e0897f75 100644 --- a/llvm/lib/Target/Hexagon/HexagonOptimizeSZextends.cpp +++ b/llvm/lib/Target/Hexagon/HexagonOptimizeSZextends.cpp @@ -15,6 +15,7 @@ #include "llvm/IR/Function.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/IntrinsicsHexagon.h" #include "llvm/Pass.h" #include "llvm/Transforms/Scalar.h" diff --git a/llvm/lib/Target/Hexagon/HexagonPatterns.td b/llvm/lib/Target/Hexagon/HexagonPatterns.td index 485e658e1c84..cf711058823c 100644 --- a/llvm/lib/Target/Hexagon/HexagonPatterns.td +++ b/llvm/lib/Target/Hexagon/HexagonPatterns.td @@ -1817,9 +1817,9 @@ def: Pat<(i1 (seteq (and I64:$Rs, IsPow2_64L:$u6), 0)), def: Pat<(i1 (seteq (and I64:$Rs, IsPow2_64H:$u6), 0)), (S4_ntstbit_i (HiReg $Rs), (UDEC32 (i32 (Log2_64 $u6))))>; def: Pat<(i1 (setne (and I64:$Rs, IsPow2_64L:$u6), 0)), - (S2_tstbit_i (LoReg $Rs), (Log2_32 imm:$u6))>; + (S2_tstbit_i (LoReg $Rs), (Log2_64 imm:$u6))>; def: Pat<(i1 (setne (and I64:$Rs, IsPow2_64H:$u6), 0)), - (S2_tstbit_i (HiReg $Rs), (UDEC32 (i32 (Log2_32 imm:$u6))))>; + (S2_tstbit_i (HiReg $Rs), (UDEC32 (i32 (Log2_64 imm:$u6))))>; // Do not increase complexity of these patterns. In the DAG, "cmp i8" may be // represented as a compare against "value & 0xFF", which is an exact match diff --git a/llvm/lib/Target/Hexagon/HexagonPseudo.td b/llvm/lib/Target/Hexagon/HexagonPseudo.td index 7dd25d7d93d5..d2b6d64e3c92 100644 --- a/llvm/lib/Target/Hexagon/HexagonPseudo.td +++ b/llvm/lib/Target/Hexagon/HexagonPseudo.td @@ -408,15 +408,17 @@ let isCall = 1, Uses = [R29, R31], isAsmParserOnly = 1 in { // Vector store pseudos let Predicates = [HasV60,UseHVX], isPseudo = 1, isCodeGenOnly = 1, mayStore = 1, accessSize = HVXVectorAccess, hasSideEffects = 0 in -class STrivv_template<RegisterClass RC, InstHexagon rootInst> +class STriv_template<RegisterClass RC, InstHexagon rootInst> : InstHexagon<(outs), (ins IntRegs:$addr, s32_0Imm:$off, RC:$src), "", [], "", rootInst.Itinerary, rootInst.Type>; -def PS_vstorerw_ai: STrivv_template<HvxWR, V6_vS32b_ai>, +def PS_vstorerv_ai: STriv_template<HvxVR, V6_vS32b_ai>, Requires<[HasV60,UseHVX]>; -def PS_vstorerw_nt_ai: STrivv_template<HvxWR, V6_vS32b_nt_ai>, +def PS_vstorerv_nt_ai: STriv_template<HvxVR, V6_vS32b_nt_ai>, Requires<[HasV60,UseHVX]>; -def PS_vstorerwu_ai: STrivv_template<HvxWR, V6_vS32Ub_ai>, +def PS_vstorerw_ai: STriv_template<HvxWR, V6_vS32b_ai>, + Requires<[HasV60,UseHVX]>; +def PS_vstorerw_nt_ai: STriv_template<HvxWR, V6_vS32b_nt_ai>, Requires<[HasV60,UseHVX]>; let isPseudo = 1, isCodeGenOnly = 1, mayStore = 1, hasSideEffects = 0 in @@ -427,15 +429,17 @@ def PS_vstorerq_ai: Pseudo<(outs), // Vector load pseudos let Predicates = [HasV60, UseHVX], isPseudo = 1, isCodeGenOnly = 1, mayLoad = 1, accessSize = HVXVectorAccess, hasSideEffects = 0 in -class LDrivv_template<RegisterClass RC, InstHexagon rootInst> +class LDriv_template<RegisterClass RC, InstHexagon rootInst> : InstHexagon<(outs RC:$dst), (ins IntRegs:$addr, s32_0Imm:$off), "", [], "", rootInst.Itinerary, rootInst.Type>; -def PS_vloadrw_ai: LDrivv_template<HvxWR, V6_vL32b_ai>, +def PS_vloadrv_ai: LDriv_template<HvxVR, V6_vL32b_ai>, + Requires<[HasV60,UseHVX]>; +def PS_vloadrv_nt_ai: LDriv_template<HvxVR, V6_vL32b_nt_ai>, Requires<[HasV60,UseHVX]>; -def PS_vloadrw_nt_ai: LDrivv_template<HvxWR, V6_vL32b_nt_ai>, +def PS_vloadrw_ai: LDriv_template<HvxWR, V6_vL32b_ai>, Requires<[HasV60,UseHVX]>; -def PS_vloadrwu_ai: LDrivv_template<HvxWR, V6_vL32Ub_ai>, +def PS_vloadrw_nt_ai: LDriv_template<HvxWR, V6_vL32b_nt_ai>, Requires<[HasV60,UseHVX]>; let isPseudo = 1, isCodeGenOnly = 1, mayLoad = 1, hasSideEffects = 0 in diff --git a/llvm/lib/Target/Hexagon/HexagonRDFOpt.cpp b/llvm/lib/Target/Hexagon/HexagonRDFOpt.cpp index 910a17540e6e..517ad1c6ee7b 100644 --- a/llvm/lib/Target/Hexagon/HexagonRDFOpt.cpp +++ b/llvm/lib/Target/Hexagon/HexagonRDFOpt.cpp @@ -24,6 +24,7 @@ #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/InitializePasses.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Compiler.h" diff --git a/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp b/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp index b7171fb14272..d55aeaf10852 100644 --- a/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp @@ -73,6 +73,9 @@ HexagonRegisterInfo::getCallerSavedRegs(const MachineFunction *MF, static const MCPhysReg VecDbl[] = { W0, W1, W2, W3, W4, W5, W6, W7, W8, W9, W10, W11, W12, W13, W14, W15, 0 }; + static const MCPhysReg VecPred[] = { + Q0, Q1, Q2, Q3, 0 + }; switch (RC->getID()) { case IntRegsRegClassID: @@ -85,6 +88,8 @@ HexagonRegisterInfo::getCallerSavedRegs(const MachineFunction *MF, return VecSgl; case HvxWRRegClassID: return VecDbl; + case HvxQRRegClassID: + return VecPred; default: break; } diff --git a/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td b/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td index f12189052699..c23b837bb62f 100644 --- a/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td +++ b/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td @@ -119,7 +119,7 @@ let Namespace = "Hexagon" in { def P2 : Rp<2, "p2">, DwarfRegNum<[65]>; def P3 : Rp<3, "p3">, DwarfRegNum<[66]>; - // Fake register to represent USR.OVF bit. Artihmetic/saturating instruc- + // Fake register to represent USR.OVF bit. Arithmetic/saturating instruc- // tions modify this bit, and multiple such instructions are allowed in the // same packet. We need to ignore output dependencies on this bit, but not // on the entire USR. diff --git a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp index 55f31c628854..d80e0ed50c93 100644 --- a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp +++ b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp @@ -159,7 +159,7 @@ bool HexagonSplitDoubleRegs::isVolatileInstr(const MachineInstr *MI) const { } bool HexagonSplitDoubleRegs::isFixedInstr(const MachineInstr *MI) const { - if (MI->mayLoad() || MI->mayStore()) + if (MI->mayLoadOrStore()) if (MemRefsFixed || isVolatileInstr(MI)) return true; if (MI->isDebugInstr()) diff --git a/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp b/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp index 27fefa5f5e2b..aab37393ed36 100644 --- a/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp +++ b/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp @@ -20,8 +20,6 @@ // per packet, it also means fewer packets, and ultimately fewer cycles. //===---------------------------------------------------------------------===// -#define DEBUG_TYPE "hexagon-widen-stores" - #include "HexagonInstrInfo.h" #include "HexagonRegisterInfo.h" #include "HexagonSubtarget.h" @@ -37,6 +35,7 @@ #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/IR/DebugLoc.h" +#include "llvm/InitializePasses.h" #include "llvm/MC/MCInstrDesc.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" @@ -49,6 +48,8 @@ #include <iterator> #include <vector> +#define DEBUG_TYPE "hexagon-widen-stores" + using namespace llvm; namespace llvm { @@ -270,7 +271,7 @@ void HexagonStoreWidening::createStoreGroup(MachineInstr *BaseStore, if (MI->isCall() || MI->hasUnmodeledSideEffects()) return; - if (MI->mayLoad() || MI->mayStore()) { + if (MI->mayLoadOrStore()) { if (MI->hasOrderedMemoryRef() || instrAliased(Group, MI)) return; Other.push_back(MI); diff --git a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp index d709a82be660..9e9ce209a825 100644 --- a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp +++ b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp @@ -180,7 +180,7 @@ static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) { return *RM; } -extern "C" void LLVMInitializeHexagonTarget() { +extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeHexagonTarget() { // Register the target. RegisterTargetMachine<HexagonTargetMachine> X(getTheHexagonTarget()); diff --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp index ddbc5543348d..4d4627cd2071 100644 --- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp @@ -152,7 +152,9 @@ unsigned HexagonTTIImpl::getAddressComputationCost(Type *Tp, } unsigned HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, - unsigned Alignment, unsigned AddressSpace, const Instruction *I) { + MaybeAlign Alignment, + unsigned AddressSpace, + const Instruction *I) { assert(Opcode == Instruction::Load || Opcode == Instruction::Store); if (Opcode == Instruction::Store) return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, I); @@ -166,24 +168,30 @@ unsigned HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, // Cost of HVX loads. if (VecWidth % RegWidth == 0) return VecWidth / RegWidth; - // Cost of constructing HVX vector from scalar loads. - Alignment = std::min(Alignment, RegWidth / 8); - unsigned AlignWidth = 8 * std::max(1u, Alignment); + // Cost of constructing HVX vector from scalar loads + const Align RegAlign(RegWidth / 8); + if (!Alignment || *Alignment > RegAlign) + Alignment = RegAlign; + assert(Alignment); + unsigned AlignWidth = 8 * Alignment->value(); unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth; return 3 * NumLoads; } // Non-HVX vectors. // Add extra cost for floating point types. - unsigned Cost = VecTy->getElementType()->isFloatingPointTy() ? FloatFactor - : 1; - Alignment = std::min(Alignment, 8u); - unsigned AlignWidth = 8 * std::max(1u, Alignment); + unsigned Cost = + VecTy->getElementType()->isFloatingPointTy() ? FloatFactor : 1; + + // At this point unspecified alignment is considered as Align::None(). + const Align BoundAlignment = std::min(Alignment.valueOrOne(), Align(8)); + unsigned AlignWidth = 8 * BoundAlignment.value(); unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth; - if (Alignment == 4 || Alignment == 8) + if (Alignment == Align(4) || Alignment == Align(8)) return Cost * NumLoads; // Loads of less than 32 bits will need extra inserts to compose a vector. - unsigned LogA = Log2_32(Alignment); + assert(BoundAlignment <= Align(8)); + unsigned LogA = Log2(BoundAlignment); return (3 - LogA) * Cost * NumLoads; } @@ -214,7 +222,8 @@ unsigned HexagonTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, Alignment, AddressSpace, UseMaskForCond, UseMaskForGaps); - return getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace, nullptr); + return getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), AddressSpace, + nullptr); } unsigned HexagonTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, @@ -227,17 +236,18 @@ unsigned HexagonTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I); } -unsigned HexagonTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty, - TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info, - TTI::OperandValueProperties Opd1PropInfo, - TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value*> Args) { +unsigned HexagonTTIImpl::getArithmeticInstrCost( + unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info, + TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo, + TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args, + const Instruction *CxtI) { if (Ty->isVectorTy()) { std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, Ty); if (LT.second.isFloatingPoint()) return LT.first + FloatFactor * getTypeNumElements(Ty); } return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info, - Opd1PropInfo, Opd2PropInfo, Args); + Opd1PropInfo, Opd2PropInfo, Args, CxtI); } unsigned HexagonTTIImpl::getCastInstrCost(unsigned Opcode, Type *DstTy, diff --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h index 12ede503af83..ace0d797bbdb 100644 --- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h +++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h @@ -112,8 +112,9 @@ public: unsigned ScalarizationCostPassed = UINT_MAX); unsigned getAddressComputationCost(Type *Tp, ScalarEvolution *SE, const SCEV *S); - unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, - unsigned AddressSpace, const Instruction *I = nullptr); + unsigned getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, + unsigned AddressSpace, + const Instruction *I = nullptr); unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, unsigned AddressSpace); unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, @@ -126,12 +127,14 @@ public: bool UseMaskForGaps = false); unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, const Instruction *I); - unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, - TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue, - TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue, - TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None, - TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None, - ArrayRef<const Value *> Args = ArrayRef<const Value *>()); + unsigned getArithmeticInstrCost( + unsigned Opcode, Type *Ty, + TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue, + TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue, + TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None, + TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None, + ArrayRef<const Value *> Args = ArrayRef<const Value *>(), + const Instruction *CxtI = nullptr); unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, const Instruction *I = nullptr); unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index); diff --git a/llvm/lib/Target/Hexagon/HexagonVExtract.cpp b/llvm/lib/Target/Hexagon/HexagonVExtract.cpp index 0c0266a6839a..b7d6dbe21c74 100644 --- a/llvm/lib/Target/Hexagon/HexagonVExtract.cpp +++ b/llvm/lib/Target/Hexagon/HexagonVExtract.cpp @@ -11,6 +11,7 @@ #include "Hexagon.h" #include "HexagonInstrInfo.h" +#include "HexagonMachineFunctionInfo.h" #include "HexagonRegisterInfo.h" #include "HexagonSubtarget.h" #include "llvm/ADT/SmallVector.h" @@ -103,7 +104,10 @@ bool HexagonVExtract::runOnMachineFunction(MachineFunction &MF) { const auto &HRI = *HST->getRegisterInfo(); MachineRegisterInfo &MRI = MF.getRegInfo(); MachineFrameInfo &MFI = MF.getFrameInfo(); + Register AR = + MF.getInfo<HexagonMachineFunctionInfo>()->getStackAlignBaseVReg(); std::map<unsigned, SmallVector<MachineInstr*,4>> VExtractMap; + unsigned MaxAlign = 0; bool Changed = false; for (MachineBasicBlock &MBB : MF) { @@ -116,22 +120,41 @@ bool HexagonVExtract::runOnMachineFunction(MachineFunction &MF) { } } + auto EmitAddr = [&] (MachineBasicBlock &BB, MachineBasicBlock::iterator At, + DebugLoc dl, int FI, unsigned Offset) { + Register AddrR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); + unsigned FiOpc = AR != 0 ? Hexagon::PS_fia : Hexagon::PS_fi; + auto MIB = BuildMI(BB, At, dl, HII->get(FiOpc), AddrR); + if (AR) + MIB.addReg(AR); + MIB.addFrameIndex(FI).addImm(Offset); + return AddrR; + }; + for (auto &P : VExtractMap) { unsigned VecR = P.first; if (P.second.size() <= VExtractThreshold) continue; const auto &VecRC = *MRI.getRegClass(VecR); - int FI = MFI.CreateSpillStackObject(HRI.getSpillSize(VecRC), - HRI.getSpillAlignment(VecRC)); + unsigned Align = HRI.getSpillAlignment(VecRC); + MaxAlign = std::max(MaxAlign, Align); + // Make sure this is not a spill slot: spill slots cannot be aligned + // if there are variable-sized objects on the stack. They must be + // accessible via FP (which is not aligned), because SP is unknown, + // and AP may not be available at the location of the load/store. + int FI = MFI.CreateStackObject(HRI.getSpillSize(VecRC), Align, + /*isSpillSlot*/false); + MachineInstr *DefI = MRI.getVRegDef(VecR); MachineBasicBlock::iterator At = std::next(DefI->getIterator()); MachineBasicBlock &DefB = *DefI->getParent(); unsigned StoreOpc = VecRC.getID() == Hexagon::HvxVRRegClassID ? Hexagon::V6_vS32b_ai : Hexagon::PS_vstorerw_ai; + Register AddrR = EmitAddr(DefB, At, DefI->getDebugLoc(), FI, 0); BuildMI(DefB, At, DefI->getDebugLoc(), HII->get(StoreOpc)) - .addFrameIndex(FI) + .addReg(AddrR) .addImm(0) .addReg(VecR); @@ -144,10 +167,8 @@ bool HexagonVExtract::runOnMachineFunction(MachineFunction &MF) { MachineBasicBlock &ExtB = *ExtI->getParent(); DebugLoc DL = ExtI->getDebugLoc(); - Register BaseR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); - BuildMI(ExtB, ExtI, DL, HII->get(Hexagon::PS_fi), BaseR) - .addFrameIndex(FI) - .addImm(SR == 0 ? 0 : VecSize/2); + Register BaseR = EmitAddr(ExtB, ExtI, ExtI->getDebugLoc(), FI, + SR == 0 ? 0 : VecSize/2); unsigned ElemR = genElemLoad(ExtI, BaseR, MRI); Register ExtR = ExtI->getOperand(0).getReg(); @@ -157,6 +178,15 @@ bool HexagonVExtract::runOnMachineFunction(MachineFunction &MF) { } } + if (AR) { + // Update the required stack alignment. + MachineInstr *AlignaI = MRI.getVRegDef(AR); + assert(AlignaI->getOpcode() == Hexagon::PS_aligna); + MachineOperand &Op = AlignaI->getOperand(1); + if (MaxAlign > Op.getImm()) + Op.setImm(MaxAlign); + } + return Changed; } diff --git a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp index fab5edefb553..36d71c41da54 100644 --- a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp +++ b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp @@ -40,6 +40,7 @@ #include "llvm/CodeGen/TargetRegisterInfo.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" #include "llvm/IR/DebugLoc.h" +#include "llvm/InitializePasses.h" #include "llvm/MC/MCInstrDesc.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" @@ -111,9 +112,9 @@ namespace { } private: - const HexagonInstrInfo *HII; - const HexagonRegisterInfo *HRI; - const bool Minimal; + const HexagonInstrInfo *HII = nullptr; + const HexagonRegisterInfo *HRI = nullptr; + const bool Minimal = false; }; } // end anonymous namespace @@ -308,7 +309,7 @@ bool HexagonPacketizerList::isCallDependent(const MachineInstr &MI, // r0 = ... // J2_jumpr r0 if (DepType == SDep::Data) { - for (const MachineOperand MO : MI.operands()) + for (const MachineOperand &MO : MI.operands()) if (MO.isReg() && MO.getReg() == DepReg && !MO.isImplicit()) return true; } diff --git a/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp b/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp index e5df1d456c1e..42451e02ba36 100644 --- a/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp +++ b/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp @@ -127,9 +127,11 @@ #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" +#include "llvm/IR/IntrinsicsHexagon.h" #include "llvm/IR/Use.h" #include "llvm/IR/User.h" #include "llvm/IR/Value.h" +#include "llvm/InitializePasses.h" #include "llvm/Pass.h" #include "llvm/Support/Casting.h" #include "llvm/Support/CommandLine.h" @@ -235,7 +237,7 @@ namespace { Instruction *Inst2Replace = nullptr; // In the new PHI node that we'll construct this is the value that'll be - // used over the backedge. This is teh value that gets reused from a + // used over the backedge. This is the value that gets reused from a // previous iteration. Instruction *BackedgeInst = nullptr; std::map<Instruction *, DepChain *> DepChains; diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp index 75cb398d4097..8f1e5c1c3a97 100644 --- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp +++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp @@ -60,7 +60,7 @@ class HexagonAsmBackend : public MCAsmBackend { public: HexagonAsmBackend(const Target &T, const Triple &TT, uint8_t OSABI, StringRef CPU) - : MCAsmBackend(support::little), OSABI(OSABI), CPU(CPU), + : MCAsmBackend(support::little), OSABI(OSABI), CPU(CPU), relaxedCnt(0), MCII(T.createMCInstrInfo()), RelaxTarget(new MCInst *), Extender(nullptr) {} diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.cpp index 6b9e63f5ac9e..698dcbd4b8b1 100644 --- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.cpp +++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.cpp @@ -30,8 +30,9 @@ void HexagonInstPrinter::printRegName(raw_ostream &O, unsigned RegNo) const { O << getRegisterName(RegNo); } -void HexagonInstPrinter::printInst(const MCInst *MI, raw_ostream &OS, - StringRef Annot, const MCSubtargetInfo &STI) { +void HexagonInstPrinter::printInst(const MCInst *MI, uint64_t Address, + StringRef Annot, const MCSubtargetInfo &STI, + raw_ostream &OS) { assert(HexagonMCInstrInfo::isBundle(*MI)); assert(HexagonMCInstrInfo::bundleSize(*MI) <= HEXAGON_PACKET_SIZE); assert(HexagonMCInstrInfo::bundleSize(*MI) > 0); @@ -39,12 +40,12 @@ void HexagonInstPrinter::printInst(const MCInst *MI, raw_ostream &OS, for (auto const &I : HexagonMCInstrInfo::bundleInstructions(*MI)) { MCInst const &MCI = *I.getInst(); if (HexagonMCInstrInfo::isDuplex(MII, MCI)) { - printInstruction(MCI.getOperand(1).getInst(), OS); + printInstruction(MCI.getOperand(1).getInst(), Address, OS); OS << '\v'; HasExtender = false; - printInstruction(MCI.getOperand(0).getInst(), OS); + printInstruction(MCI.getOperand(0).getInst(), Address, OS); } else - printInstruction(&MCI, OS); + printInstruction(&MCI, Address, OS); HasExtender = HexagonMCInstrInfo::isImmext(MCI); OS << "\n"; } diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.h b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.h index ca32c3c1f50f..cd96a23e1b94 100644 --- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.h +++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonInstPrinter.h @@ -28,13 +28,13 @@ public: MCRegisterInfo const &MRI) : MCInstPrinter(MAI, MII, MRI), MII(MII) {} - void printInst(MCInst const *MI, raw_ostream &O, StringRef Annot, - const MCSubtargetInfo &STI) override; + void printInst(MCInst const *MI, uint64_t Address, StringRef Annot, + const MCSubtargetInfo &STI, raw_ostream &O) override; void printRegName(raw_ostream &O, unsigned RegNo) const override; static char const *getRegisterName(unsigned RegNo); - void printInstruction(MCInst const *MI, raw_ostream &O); + void printInstruction(const MCInst *MI, uint64_t Address, raw_ostream &O); void printOperand(MCInst const *MI, unsigned OpNo, raw_ostream &O) const; void printBrtarget(MCInst const *MI, unsigned OpNo, raw_ostream &O) const; diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp index ed571188c1e8..2b0bbdafa381 100644 --- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp +++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp @@ -92,7 +92,7 @@ static unsigned getCompoundCandidateGroup(MCInst const &MI, bool IsExtended) { case Hexagon::C2_cmpgt: case Hexagon::C2_cmpgtu: if (IsExtended) - return false; + return HexagonII::HCG_None; DstReg = MI.getOperand(0).getReg(); Src1Reg = MI.getOperand(1).getReg(); Src2Reg = MI.getOperand(2).getReg(); @@ -105,7 +105,7 @@ static unsigned getCompoundCandidateGroup(MCInst const &MI, bool IsExtended) { case Hexagon::C2_cmpgti: case Hexagon::C2_cmpgtui: if (IsExtended) - return false; + return HexagonII::HCG_None; // P0 = cmp.eq(Rs,#u2) DstReg = MI.getOperand(0).getReg(); SrcReg = MI.getOperand(1).getReg(); @@ -117,7 +117,7 @@ static unsigned getCompoundCandidateGroup(MCInst const &MI, bool IsExtended) { break; case Hexagon::A2_tfr: if (IsExtended) - return false; + return HexagonII::HCG_None; // Rd = Rs DstReg = MI.getOperand(0).getReg(); SrcReg = MI.getOperand(1).getReg(); @@ -127,7 +127,7 @@ static unsigned getCompoundCandidateGroup(MCInst const &MI, bool IsExtended) { break; case Hexagon::A2_tfrsi: if (IsExtended) - return false; + return HexagonII::HCG_None; // Rd = #u6 DstReg = MI.getOperand(0).getReg(); if (HexagonMCInstrInfo::minConstant(MI, 1) <= 63 && @@ -137,7 +137,7 @@ static unsigned getCompoundCandidateGroup(MCInst const &MI, bool IsExtended) { break; case Hexagon::S2_tstbit_i: if (IsExtended) - return false; + return HexagonII::HCG_None; DstReg = MI.getOperand(0).getReg(); Src1Reg = MI.getOperand(1).getReg(); if ((Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) && diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp index 870ab9e94a63..f8dc0547baad 100644 --- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp +++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp @@ -137,14 +137,15 @@ public: MCInstPrinter &IP) : HexagonTargetStreamer(S) {} - void prettyPrintAsm(MCInstPrinter &InstPrinter, raw_ostream &OS, - const MCInst &Inst, const MCSubtargetInfo &STI) override { + void prettyPrintAsm(MCInstPrinter &InstPrinter, uint64_t Address, + const MCInst &Inst, const MCSubtargetInfo &STI, + raw_ostream &OS) override { assert(HexagonMCInstrInfo::isBundle(Inst)); assert(HexagonMCInstrInfo::bundleSize(Inst) <= HEXAGON_PACKET_SIZE); std::string Buffer; { raw_string_ostream TempStream(Buffer); - InstPrinter.printInst(&Inst, TempStream, "", STI); + InstPrinter.printInst(&Inst, Address, "", STI, TempStream); } StringRef Contents(Buffer); auto PacketBundle = Contents.rsplit('\n'); @@ -219,7 +220,8 @@ static MCRegisterInfo *createHexagonMCRegisterInfo(const Triple &TT) { } static MCAsmInfo *createHexagonMCAsmInfo(const MCRegisterInfo &MRI, - const Triple &TT) { + const Triple &TT, + const MCTargetOptions &Options) { MCAsmInfo *MAI = new HexagonMCAsmInfo(TT); // VirtualFP = (R30 + #0). @@ -454,7 +456,7 @@ static MCInstrAnalysis *createHexagonMCInstrAnalysis(const MCInstrInfo *Info) { } // Force static initialization. -extern "C" void LLVMInitializeHexagonTargetMC() { +extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeHexagonTargetMC() { // Register the MC asm info. RegisterMCAsmInfoFn X(getTheHexagonTarget(), createHexagonMCAsmInfo); diff --git a/llvm/lib/Target/Hexagon/RDFLiveness.cpp b/llvm/lib/Target/Hexagon/RDFLiveness.cpp index 7d7b89462ff9..e2c007c9d01a 100644 --- a/llvm/lib/Target/Hexagon/RDFLiveness.cpp +++ b/llvm/lib/Target/Hexagon/RDFLiveness.cpp @@ -620,7 +620,7 @@ void Liveness::computePhiInfo() { for (NodeAddr<UseNode*> UA : PUs) { std::map<NodeId,RegisterAggr> &PUM = PhiUp[UA.Id]; RegisterRef UR = PRI.normalize(UA.Addr->getRegRef(DFG)); - for (const std::pair<NodeId,RegisterAggr> &P : PUM) { + for (const std::pair<const NodeId, RegisterAggr> &P : PUM) { bool Changed = false; const RegisterAggr &MidDefs = P.second; @@ -636,7 +636,7 @@ void Liveness::computePhiInfo() { // if MidDefs does not cover (R,U) // then add (R-MidDefs,U) to RealUseMap[P] // - for (const std::pair<RegisterId,NodeRefSet> &T : RUM) { + for (const std::pair<const RegisterId, NodeRefSet> &T : RUM) { RegisterRef R(T.first); // The current phi (PA) could be a phi for a regmask. It could // reach a whole variety of uses that are not related to the @@ -768,7 +768,7 @@ void Liveness::computeLiveIns() { auto PrA = DFG.addr<BlockNode*>(PUA.Addr->getPredecessor()); RefMap &LOX = PhiLOX[PrA.Addr->getCode()]; - for (const std::pair<RegisterId,NodeRefSet> &RS : RUs) { + for (const std::pair<const RegisterId, NodeRefSet> &RS : RUs) { // We need to visit each individual use. for (std::pair<NodeId,LaneBitmask> P : RS.second) { // Create a register ref corresponding to the use, and find @@ -991,7 +991,7 @@ void Liveness::traverse(MachineBasicBlock *B, RefMap &LiveIn) { RefMap LiveInCopy = LiveIn; LiveIn.clear(); - for (const std::pair<RegisterId,NodeRefSet> &LE : LiveInCopy) { + for (const std::pair<const RegisterId, NodeRefSet> &LE : LiveInCopy) { RegisterRef LRef(LE.first); NodeRefSet &NewDefs = LiveIn[LRef.Reg]; // To be filled. const NodeRefSet &OldDefs = LE.second; @@ -1105,7 +1105,7 @@ void Liveness::traverse(MachineBasicBlock *B, RefMap &LiveIn) { for (auto C : IIDF[B]) { RegisterAggr &LiveC = LiveMap[C]; - for (const std::pair<RegisterId,NodeRefSet> &S : LiveIn) + for (const std::pair<const RegisterId, NodeRefSet> &S : LiveIn) for (auto R : S.second) if (MDT.properlyDominates(getBlockWithRef(R.first), C)) LiveC.insert(RegisterRef(S.first, R.second)); diff --git a/llvm/lib/Target/Hexagon/TargetInfo/HexagonTargetInfo.cpp b/llvm/lib/Target/Hexagon/TargetInfo/HexagonTargetInfo.cpp index d77b235d0077..48770be3e301 100644 --- a/llvm/lib/Target/Hexagon/TargetInfo/HexagonTargetInfo.cpp +++ b/llvm/lib/Target/Hexagon/TargetInfo/HexagonTargetInfo.cpp @@ -15,7 +15,7 @@ Target &llvm::getTheHexagonTarget() { return TheHexagonTarget; } -extern "C" void LLVMInitializeHexagonTargetInfo() { +extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeHexagonTargetInfo() { RegisterTarget<Triple::hexagon, /*HasJIT=*/true> X( getTheHexagonTarget(), "hexagon", "Hexagon", "Hexagon"); } |