diff options
| author | Dimitry Andric <dim@FreeBSD.org> | 2019-10-23 17:51:42 +0000 | 
|---|---|---|
| committer | Dimitry Andric <dim@FreeBSD.org> | 2019-10-23 17:51:42 +0000 | 
| commit | 1d5ae1026e831016fc29fd927877c86af904481f (patch) | |
| tree | 2cdfd12620fcfa5d9e4a0389f85368e8e36f63f9 /lib/Target/SystemZ | |
| parent | e6d1592492a3a379186bfb02bd0f4eda0669c0d5 (diff) | |
Vendor import of stripped llvm trunk r375505, the last commit before thevendor/llvm/llvm-trunk-r375505vendor/llvm
upstream Subversion repository was made read-only, and the LLVM project
migrated to GitHub:
https://llvm.org/svn/llvm-project/llvm/trunk@375505
Notes
Notes:
    svn path=/vendor/llvm/dist/; revision=353940
    svn path=/vendor/llvm/llvm-r375505/; revision=353941; tag=vendor/llvm/llvm-trunk-r375505
Diffstat (limited to 'lib/Target/SystemZ')
32 files changed, 679 insertions, 681 deletions
| diff --git a/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp b/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp index a259ba3433d6..93c4ce4b5ccc 100644 --- a/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp +++ b/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp @@ -155,11 +155,11 @@ public:    // Create particular kinds of operand.    static std::unique_ptr<SystemZOperand> createInvalid(SMLoc StartLoc,                                                         SMLoc EndLoc) { -    return make_unique<SystemZOperand>(KindInvalid, StartLoc, EndLoc); +    return std::make_unique<SystemZOperand>(KindInvalid, StartLoc, EndLoc);    }    static std::unique_ptr<SystemZOperand> createToken(StringRef Str, SMLoc Loc) { -    auto Op = make_unique<SystemZOperand>(KindToken, Loc, Loc); +    auto Op = std::make_unique<SystemZOperand>(KindToken, Loc, Loc);      Op->Token.Data = Str.data();      Op->Token.Length = Str.size();      return Op; @@ -167,7 +167,7 @@ public:    static std::unique_ptr<SystemZOperand>    createReg(RegisterKind Kind, unsigned Num, SMLoc StartLoc, SMLoc EndLoc) { -    auto Op = make_unique<SystemZOperand>(KindReg, StartLoc, EndLoc); +    auto Op = std::make_unique<SystemZOperand>(KindReg, StartLoc, EndLoc);      Op->Reg.Kind = Kind;      Op->Reg.Num = Num;      return Op; @@ -175,7 +175,7 @@ public:    static std::unique_ptr<SystemZOperand>    createImm(const MCExpr *Expr, SMLoc StartLoc, SMLoc EndLoc) { -    auto Op = make_unique<SystemZOperand>(KindImm, StartLoc, EndLoc); +    auto Op = std::make_unique<SystemZOperand>(KindImm, StartLoc, EndLoc);      Op->Imm = Expr;      return Op;    } @@ -184,7 +184,7 @@ public:    createMem(MemoryKind MemKind, RegisterKind RegKind, unsigned Base,              const MCExpr *Disp, unsigned Index, const MCExpr *LengthImm,              unsigned LengthReg, SMLoc StartLoc, SMLoc EndLoc) { -    auto Op = make_unique<SystemZOperand>(KindMem, StartLoc, EndLoc); +    auto Op = std::make_unique<SystemZOperand>(KindMem, StartLoc, EndLoc);      Op->Mem.MemKind = MemKind;      Op->Mem.RegKind = RegKind;      Op->Mem.Base = Base; @@ -200,7 +200,7 @@ public:    static std::unique_ptr<SystemZOperand>    createImmTLS(const MCExpr *Imm, const MCExpr *Sym,                 SMLoc StartLoc, SMLoc EndLoc) { -    auto Op = make_unique<SystemZOperand>(KindImmTLS, StartLoc, EndLoc); +    auto Op = std::make_unique<SystemZOperand>(KindImmTLS, StartLoc, EndLoc);      Op->ImmTLS.Imm = Imm;      Op->ImmTLS.Sym = Sym;      return Op; diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp index 8d8ba5644e10..49b6fc490336 100644 --- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp +++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp @@ -162,5 +162,5 @@ unsigned SystemZObjectWriter::getRelocType(MCContext &Ctx,  std::unique_ptr<MCObjectTargetWriter>  llvm::createSystemZObjectWriter(uint8_t OSABI) { -  return llvm::make_unique<SystemZObjectWriter>(OSABI); +  return std::make_unique<SystemZObjectWriter>(OSABI);  } diff --git a/lib/Target/SystemZ/SystemZ.h b/lib/Target/SystemZ/SystemZ.h index 2b0f90182d7f..88cf589a3f10 100644 --- a/lib/Target/SystemZ/SystemZ.h +++ b/lib/Target/SystemZ/SystemZ.h @@ -190,7 +190,6 @@ static inline bool isImmHF(uint64_t Val) {  FunctionPass *createSystemZISelDag(SystemZTargetMachine &TM,                                     CodeGenOpt::Level OptLevel);  FunctionPass *createSystemZElimComparePass(SystemZTargetMachine &TM); -FunctionPass *createSystemZExpandPseudoPass(SystemZTargetMachine &TM);  FunctionPass *createSystemZShortenInstPass(SystemZTargetMachine &TM);  FunctionPass *createSystemZLongBranchPass(SystemZTargetMachine &TM);  FunctionPass *createSystemZLDCleanupPass(SystemZTargetMachine &TM); diff --git a/lib/Target/SystemZ/SystemZAsmPrinter.cpp b/lib/Target/SystemZ/SystemZAsmPrinter.cpp index ef378e4ade7a..10023e9e169c 100644 --- a/lib/Target/SystemZ/SystemZAsmPrinter.cpp +++ b/lib/Target/SystemZ/SystemZAsmPrinter.cpp @@ -501,6 +501,10 @@ void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) {      }      break; +  case TargetOpcode::FENTRY_CALL: +    LowerFENTRY_CALL(*MI, Lower); +    return; +    case TargetOpcode::STACKMAP:      LowerSTACKMAP(*MI);      return; @@ -546,6 +550,22 @@ static unsigned EmitNop(MCContext &OutContext, MCStreamer &OutStreamer,    }  } +void SystemZAsmPrinter::LowerFENTRY_CALL(const MachineInstr &MI, +                                         SystemZMCInstLower &Lower) { +  MCContext &Ctx = MF->getContext(); +  if (MF->getFunction().getFnAttribute("mnop-mcount") +                       .getValueAsString() == "true") { +    EmitNop(Ctx, *OutStreamer, 6, getSubtargetInfo()); +    return; +  } + +  MCSymbol *fentry = Ctx.getOrCreateSymbol("__fentry__"); +  const MCSymbolRefExpr *Op = +      MCSymbolRefExpr::create(fentry, MCSymbolRefExpr::VK_PLT, Ctx); +  OutStreamer->EmitInstruction(MCInstBuilder(SystemZ::BRASL) +                       .addReg(SystemZ::R0D).addExpr(Op), getSubtargetInfo()); +} +  void SystemZAsmPrinter::LowerSTACKMAP(const MachineInstr &MI) {    const SystemZInstrInfo *TII =      static_cast<const SystemZInstrInfo *>(MF->getSubtarget().getInstrInfo()); diff --git a/lib/Target/SystemZ/SystemZAsmPrinter.h b/lib/Target/SystemZ/SystemZAsmPrinter.h index aa5d3ca78e61..d01a17c2ebe2 100644 --- a/lib/Target/SystemZ/SystemZAsmPrinter.h +++ b/lib/Target/SystemZ/SystemZAsmPrinter.h @@ -46,6 +46,7 @@ public:    }  private: +  void LowerFENTRY_CALL(const MachineInstr &MI, SystemZMCInstLower &MCIL);    void LowerSTACKMAP(const MachineInstr &MI);    void LowerPATCHPOINT(const MachineInstr &MI, SystemZMCInstLower &Lower);  }; diff --git a/lib/Target/SystemZ/SystemZElimCompare.cpp b/lib/Target/SystemZ/SystemZElimCompare.cpp index 9cbf6b320504..946eb2ba7c79 100644 --- a/lib/Target/SystemZ/SystemZElimCompare.cpp +++ b/lib/Target/SystemZ/SystemZElimCompare.cpp @@ -152,7 +152,7 @@ Reference SystemZElimCompare::getRegReferences(MachineInstr &MI, unsigned Reg) {    for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {      const MachineOperand &MO = MI.getOperand(I);      if (MO.isReg()) { -      if (unsigned MOReg = MO.getReg()) { +      if (Register MOReg = MO.getReg()) {          if (TRI->regsOverlap(MOReg, Reg)) {            if (MO.isUse())              Ref.Use = true; @@ -378,11 +378,8 @@ bool SystemZElimCompare::adjustCCMasksForInstr(    }    // CC is now live after MI. -  if (!ConvOpc) { -    int CCDef = MI.findRegisterDefOperandIdx(SystemZ::CC, false, true, TRI); -    assert(CCDef >= 0 && "Couldn't find CC set"); -    MI.getOperand(CCDef).setIsDead(false); -  } +  if (!ConvOpc) +    MI.clearRegisterDeads(SystemZ::CC);    // Check if MI lies before Compare.    bool BeforeCmp = false; diff --git a/lib/Target/SystemZ/SystemZExpandPseudo.cpp b/lib/Target/SystemZ/SystemZExpandPseudo.cpp deleted file mode 100644 index 09708fb4241c..000000000000 --- a/lib/Target/SystemZ/SystemZExpandPseudo.cpp +++ /dev/null @@ -1,152 +0,0 @@ -//==-- SystemZExpandPseudo.cpp - Expand pseudo instructions -------*- C++ -*-=// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file contains a pass that expands pseudo instructions into target -// instructions to allow proper scheduling and other late optimizations.  This -// pass should be run after register allocation but before the post-regalloc -// scheduling pass. -// -//===----------------------------------------------------------------------===// - -#include "SystemZ.h" -#include "SystemZInstrInfo.h" -#include "SystemZSubtarget.h" -#include "llvm/CodeGen/LivePhysRegs.h" -#include "llvm/CodeGen/MachineFunctionPass.h" -#include "llvm/CodeGen/MachineInstrBuilder.h" -using namespace llvm; - -#define SYSTEMZ_EXPAND_PSEUDO_NAME "SystemZ pseudo instruction expansion pass" - -namespace llvm { -  void initializeSystemZExpandPseudoPass(PassRegistry&); -} - -namespace { -class SystemZExpandPseudo : public MachineFunctionPass { -public: -  static char ID; -  SystemZExpandPseudo() : MachineFunctionPass(ID) { -    initializeSystemZExpandPseudoPass(*PassRegistry::getPassRegistry()); -  } - -  const SystemZInstrInfo *TII; - -  bool runOnMachineFunction(MachineFunction &Fn) override; - -  StringRef getPassName() const override { return SYSTEMZ_EXPAND_PSEUDO_NAME; } - -private: -  bool expandMBB(MachineBasicBlock &MBB); -  bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, -                MachineBasicBlock::iterator &NextMBBI); -  bool expandLOCRMux(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, -                     MachineBasicBlock::iterator &NextMBBI); -}; -char SystemZExpandPseudo::ID = 0; -} - -INITIALIZE_PASS(SystemZExpandPseudo, "systemz-expand-pseudo", -                SYSTEMZ_EXPAND_PSEUDO_NAME, false, false) - -/// Returns an instance of the pseudo instruction expansion pass. -FunctionPass *llvm::createSystemZExpandPseudoPass(SystemZTargetMachine &TM) { -  return new SystemZExpandPseudo(); -} - -// MI is a load-register-on-condition pseudo instruction that could not be -// handled as a single hardware instruction.  Replace it by a branch sequence. -bool SystemZExpandPseudo::expandLOCRMux(MachineBasicBlock &MBB, -                                        MachineBasicBlock::iterator MBBI, -                                        MachineBasicBlock::iterator &NextMBBI) { -  MachineFunction &MF = *MBB.getParent(); -  const BasicBlock *BB = MBB.getBasicBlock(); -  MachineInstr &MI = *MBBI; -  DebugLoc DL = MI.getDebugLoc(); -  unsigned DestReg = MI.getOperand(0).getReg(); -  unsigned SrcReg = MI.getOperand(2).getReg(); -  unsigned CCValid = MI.getOperand(3).getImm(); -  unsigned CCMask = MI.getOperand(4).getImm(); - -  LivePhysRegs LiveRegs(TII->getRegisterInfo()); -  LiveRegs.addLiveOuts(MBB); -  for (auto I = std::prev(MBB.end()); I != MBBI; --I) -    LiveRegs.stepBackward(*I); - -  // Splice MBB at MI, moving the rest of the block into RestMBB. -  MachineBasicBlock *RestMBB = MF.CreateMachineBasicBlock(BB); -  MF.insert(std::next(MachineFunction::iterator(MBB)), RestMBB); -  RestMBB->splice(RestMBB->begin(), &MBB, MI, MBB.end()); -  RestMBB->transferSuccessors(&MBB); -  for (auto I = LiveRegs.begin(); I != LiveRegs.end(); ++I) -    RestMBB->addLiveIn(*I); - -  // Create a new block MoveMBB to hold the move instruction. -  MachineBasicBlock *MoveMBB = MF.CreateMachineBasicBlock(BB); -  MF.insert(std::next(MachineFunction::iterator(MBB)), MoveMBB); -  MoveMBB->addLiveIn(SrcReg); -  for (auto I = LiveRegs.begin(); I != LiveRegs.end(); ++I) -    MoveMBB->addLiveIn(*I); - -  // At the end of MBB, create a conditional branch to RestMBB if the -  // condition is false, otherwise fall through to MoveMBB. -  BuildMI(&MBB, DL, TII->get(SystemZ::BRC)) -    .addImm(CCValid).addImm(CCMask ^ CCValid).addMBB(RestMBB); -  MBB.addSuccessor(RestMBB); -  MBB.addSuccessor(MoveMBB); - -  // In MoveMBB, emit an instruction to move SrcReg into DestReg, -  // then fall through to RestMBB. -  TII->copyPhysReg(*MoveMBB, MoveMBB->end(), DL, DestReg, SrcReg, -                   MI.getOperand(2).isKill()); -  MoveMBB->addSuccessor(RestMBB); - -  NextMBBI = MBB.end(); -  MI.eraseFromParent(); -  return true; -} - -/// If MBBI references a pseudo instruction that should be expanded here, -/// do the expansion and return true.  Otherwise return false. -bool SystemZExpandPseudo::expandMI(MachineBasicBlock &MBB, -                                   MachineBasicBlock::iterator MBBI, -                                   MachineBasicBlock::iterator &NextMBBI) { -  MachineInstr &MI = *MBBI; -  switch (MI.getOpcode()) { -  case SystemZ::LOCRMux: -    return expandLOCRMux(MBB, MBBI, NextMBBI); -  default: -    break; -  } -  return false; -} - -/// Iterate over the instructions in basic block MBB and expand any -/// pseudo instructions.  Return true if anything was modified. -bool SystemZExpandPseudo::expandMBB(MachineBasicBlock &MBB) { -  bool Modified = false; - -  MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); -  while (MBBI != E) { -    MachineBasicBlock::iterator NMBBI = std::next(MBBI); -    Modified |= expandMI(MBB, MBBI, NMBBI); -    MBBI = NMBBI; -  } - -  return Modified; -} - -bool SystemZExpandPseudo::runOnMachineFunction(MachineFunction &MF) { -  TII = static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo()); - -  bool Modified = false; -  for (auto &MBB : MF) -    Modified |= expandMBB(MBB); -  return Modified; -} - diff --git a/lib/Target/SystemZ/SystemZFrameLowering.cpp b/lib/Target/SystemZ/SystemZFrameLowering.cpp index da28faebb326..0b8b6880accc 100644 --- a/lib/Target/SystemZ/SystemZFrameLowering.cpp +++ b/lib/Target/SystemZ/SystemZFrameLowering.cpp @@ -46,8 +46,8 @@ static const TargetFrameLowering::SpillSlot SpillOffsetTable[] = {  } // end anonymous namespace  SystemZFrameLowering::SystemZFrameLowering() -    : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 8, -                          -SystemZMC::CallFrameSize, 8, +    : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, Align(8), +                          -SystemZMC::CallFrameSize, Align(8),                            false /* StackRealignable */) {    // Create a mapping from register number to save slot offset.    RegSpillOffsets.grow(SystemZ::NUM_TARGET_REGS); @@ -118,7 +118,7 @@ static void addSavedGPR(MachineBasicBlock &MBB, MachineInstrBuilder &MIB,                          unsigned GPR64, bool IsImplicit) {    const TargetRegisterInfo *RI =        MBB.getParent()->getSubtarget().getRegisterInfo(); -  unsigned GPR32 = RI->getSubReg(GPR64, SystemZ::subreg_l32); +  Register GPR32 = RI->getSubReg(GPR64, SystemZ::subreg_l32);    bool IsLive = MBB.isLiveIn(GPR64) || MBB.isLiveIn(GPR32);    if (!IsLive || !IsImplicit) {      MIB.addReg(GPR64, getImplRegState(IsImplicit) | getKillRegState(!IsLive)); diff --git a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp index 9dc4512255cc..751034c2d41a 100644 --- a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp +++ b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp @@ -346,6 +346,11 @@ public:        : SelectionDAGISel(TM, OptLevel) {}    bool runOnMachineFunction(MachineFunction &MF) override { +    const Function &F = MF.getFunction(); +    if (F.getFnAttribute("mnop-mcount").getValueAsString() == "true" && +        F.getFnAttribute("fentry-call").getValueAsString() != "true") +      report_fatal_error("mnop-mcount only supported with fentry-call"); +      Subtarget = &MF.getSubtarget<SystemZSubtarget>();      return SelectionDAGISel::runOnMachineFunction(MF);    } @@ -1146,7 +1151,7 @@ void SystemZDAGToDAGISel::loadVectorConstant(    SDLoc DL(Node);    SmallVector<SDValue, 2> Ops;    for (unsigned OpVal : VCI.OpVals) -    Ops.push_back(CurDAG->getConstant(OpVal, DL, MVT::i32)); +    Ops.push_back(CurDAG->getTargetConstant(OpVal, DL, MVT::i32));    SDValue Op = CurDAG->getNode(VCI.Opcode, DL, VCI.VecVT, Ops);    if (VCI.VecVT == VT.getSimpleVT()) @@ -1550,8 +1555,8 @@ void SystemZDAGToDAGISel::Select(SDNode *Node) {        uint64_t ConstCCMask =          cast<ConstantSDNode>(CCMask.getNode())->getZExtValue();        // Invert the condition. -      CCMask = CurDAG->getConstant(ConstCCValid ^ ConstCCMask, SDLoc(Node), -                                   CCMask.getValueType()); +      CCMask = CurDAG->getTargetConstant(ConstCCValid ^ ConstCCMask, +                                         SDLoc(Node), CCMask.getValueType());        SDValue Op4 = Node->getOperand(4);        SDNode *UpdatedNode =          CurDAG->UpdateNodeOperands(Node, Op1, Op0, CCValid, CCMask, Op4); diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp index 78820f511ab4..e0ca9da93561 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -120,9 +120,9 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,    setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);    // Instructions are strings of 2-byte aligned 2-byte values. -  setMinFunctionAlignment(2); +  setMinFunctionAlignment(Align(2));    // For performance reasons we prefer 16-byte alignment. -  setPrefFunctionAlignment(4); +  setPrefFunctionAlignment(Align(16));    // Handle operations that are handled in a similar way for all types.    for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; @@ -206,6 +206,12 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,        // the default expansion.        if (!Subtarget.hasFPExtension())          setOperationAction(ISD::FP_TO_UINT, VT, Expand); + +      // Mirror those settings for STRICT_FP_TO_[SU]INT.  Note that these all +      // default to Expand, so need to be modified to Legal where appropriate. +      setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Legal); +      if (Subtarget.hasFPExtension()) +        setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Legal);      }    } @@ -252,7 +258,7 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,    setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Promote);    setOperationAction(ISD::CTLZ, MVT::i64, Legal); -  // On arch13 we have native support for a 64-bit CTPOP. +  // On z15 we have native support for a 64-bit CTPOP.    if (Subtarget.hasMiscellaneousExtensions3()) {      setOperationAction(ISD::CTPOP, MVT::i32, Promote);      setOperationAction(ISD::CTPOP, MVT::i64, Legal); @@ -294,14 +300,14 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,    // Handle prefetches with PFD or PFDRL.    setOperationAction(ISD::PREFETCH, MVT::Other, Custom); -  for (MVT VT : MVT::vector_valuetypes()) { +  for (MVT VT : MVT::fixedlen_vector_valuetypes()) {      // Assume by default that all vector operations need to be expanded.      for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END; ++Opcode)        if (getOperationAction(Opcode, VT) == Legal)          setOperationAction(Opcode, VT, Expand);      // Likewise all truncating stores and extending loads. -    for (MVT InnerVT : MVT::vector_valuetypes()) { +    for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {        setTruncStoreAction(VT, InnerVT, Expand);        setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);        setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); @@ -327,7 +333,7 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,    }    // Handle integer vector types. -  for (MVT VT : MVT::integer_vector_valuetypes()) { +  for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {      if (isTypeLegal(VT)) {        // These operations have direct equivalents.        setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Legal); @@ -381,6 +387,11 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,      setOperationAction(ISD::SINT_TO_FP, MVT::v2f64, Legal);      setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);      setOperationAction(ISD::UINT_TO_FP, MVT::v2f64, Legal); + +    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal); +    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f64, Legal); +    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal); +    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f64, Legal);    }    if (Subtarget.hasVectorEnhancements2()) { @@ -392,6 +403,11 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,      setOperationAction(ISD::SINT_TO_FP, MVT::v4f32, Legal);      setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);      setOperationAction(ISD::UINT_TO_FP, MVT::v4f32, Legal); + +    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal); +    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4f32, Legal); +    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal); +    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4f32, Legal);    }    // Handle floating-point types. @@ -831,7 +847,7 @@ supportedAddressingMode(Instruction *I, bool HasVector) {    }    if (isa<LoadInst>(I) && I->hasOneUse()) { -    auto *SingleUser = dyn_cast<Instruction>(*I->user_begin()); +    auto *SingleUser = cast<Instruction>(*I->user_begin());      if (SingleUser->getParent() == I->getParent()) {        if (isa<ICmpInst>(SingleUser)) {          if (auto *C = dyn_cast<ConstantInt>(SingleUser->getOperand(1))) @@ -956,7 +972,7 @@ SystemZTargetLowering::getConstraintType(StringRef Constraint) const {      case 'K': // Signed 16-bit constant      case 'L': // Signed 20-bit displacement (on all targets we support)      case 'M': // 0x7fffffff -      return C_Other; +      return C_Immediate;      default:        break; @@ -1335,7 +1351,7 @@ SDValue SystemZTargetLowering::LowerFormalArguments(          break;        } -      unsigned VReg = MRI.createVirtualRegister(RC); +      Register VReg = MRI.createVirtualRegister(RC);        MRI.addLiveIn(VA.getLocReg(), VReg);        ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);      } else { @@ -1430,7 +1446,7 @@ static bool canUseSiblingCall(const CCState &ArgCCInfo,        return false;      if (!VA.isRegLoc())        return false; -    unsigned Reg = VA.getLocReg(); +    Register Reg = VA.getLocReg();      if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)        return false;      if (Outs[I].Flags.isSwiftSelf() || Outs[I].Flags.isSwiftError()) @@ -1674,7 +1690,7 @@ SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,      RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue);      // Chain and glue the copies together. -    unsigned Reg = VA.getLocReg(); +    Register Reg = VA.getLocReg();      Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue);      Glue = Chain.getValue(1);      RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); @@ -2533,12 +2549,12 @@ static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {    }    if (C.Opcode == SystemZISD::ICMP)      return DAG.getNode(SystemZISD::ICMP, DL, MVT::i32, C.Op0, C.Op1, -                       DAG.getConstant(C.ICmpType, DL, MVT::i32)); +                       DAG.getTargetConstant(C.ICmpType, DL, MVT::i32));    if (C.Opcode == SystemZISD::TM) {      bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=                           bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));      return DAG.getNode(SystemZISD::TM, DL, MVT::i32, C.Op0, C.Op1, -                       DAG.getConstant(RegisterOnly, DL, MVT::i32)); +                       DAG.getTargetConstant(RegisterOnly, DL, MVT::i32));    }    return DAG.getNode(C.Opcode, DL, MVT::i32, C.Op0, C.Op1);  } @@ -2576,10 +2592,10 @@ static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT,  // in CCValid, so other values can be ignored.  static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg,                           unsigned CCValid, unsigned CCMask) { -  SDValue Ops[] = { DAG.getConstant(1, DL, MVT::i32), -                    DAG.getConstant(0, DL, MVT::i32), -                    DAG.getConstant(CCValid, DL, MVT::i32), -                    DAG.getConstant(CCMask, DL, MVT::i32), CCReg }; +  SDValue Ops[] = {DAG.getConstant(1, DL, MVT::i32), +                   DAG.getConstant(0, DL, MVT::i32), +                   DAG.getTargetConstant(CCValid, DL, MVT::i32), +                   DAG.getTargetConstant(CCMask, DL, MVT::i32), CCReg};    return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, MVT::i32, Ops);  } @@ -2741,9 +2757,10 @@ SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {    Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));    SDValue CCReg = emitCmp(DAG, DL, C); -  return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(), -                     Op.getOperand(0), DAG.getConstant(C.CCValid, DL, MVT::i32), -                     DAG.getConstant(C.CCMask, DL, MVT::i32), Dest, CCReg); +  return DAG.getNode( +      SystemZISD::BR_CCMASK, DL, Op.getValueType(), Op.getOperand(0), +      DAG.getTargetConstant(C.CCValid, DL, MVT::i32), +      DAG.getTargetConstant(C.CCMask, DL, MVT::i32), Dest, CCReg);  }  // Return true if Pos is CmpOp and Neg is the negative of CmpOp, @@ -2794,8 +2811,9 @@ SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,    }    SDValue CCReg = emitCmp(DAG, DL, C); -  SDValue Ops[] = {TrueOp, FalseOp, DAG.getConstant(C.CCValid, DL, MVT::i32), -                   DAG.getConstant(C.CCMask, DL, MVT::i32), CCReg}; +  SDValue Ops[] = {TrueOp, FalseOp, +                   DAG.getTargetConstant(C.CCValid, DL, MVT::i32), +                   DAG.getTargetConstant(C.CCMask, DL, MVT::i32), CCReg};    return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, Op.getValueType(), Ops);  } @@ -3882,11 +3900,8 @@ SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,    bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();    unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ;    auto *Node = cast<MemIntrinsicSDNode>(Op.getNode()); -  SDValue Ops[] = { -    Op.getOperand(0), -    DAG.getConstant(Code, DL, MVT::i32), -    Op.getOperand(1) -  }; +  SDValue Ops[] = {Op.getOperand(0), DAG.getTargetConstant(Code, DL, MVT::i32), +                   Op.getOperand(1)};    return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, DL,                                   Node->getVTList(), Ops,                                   Node->getMemoryVT(), Node->getMemOperand()); @@ -4228,7 +4243,7 @@ static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL,    Op1 = DAG.getNode(ISD::BITCAST, DL, InVT, Op1);    SDValue Op;    if (P.Opcode == SystemZISD::PERMUTE_DWORDS) { -    SDValue Op2 = DAG.getConstant(P.Operand, DL, MVT::i32); +    SDValue Op2 = DAG.getTargetConstant(P.Operand, DL, MVT::i32);      Op = DAG.getNode(SystemZISD::PERMUTE_DWORDS, DL, InVT, Op0, Op1, Op2);    } else if (P.Opcode == SystemZISD::PACK) {      MVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(P.Operand * 8), @@ -4253,7 +4268,8 @@ static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL,    unsigned StartIndex, OpNo0, OpNo1;    if (isShlDoublePermute(Bytes, StartIndex, OpNo0, OpNo1))      return DAG.getNode(SystemZISD::SHL_DOUBLE, DL, MVT::v16i8, Ops[OpNo0], -                       Ops[OpNo1], DAG.getConstant(StartIndex, DL, MVT::i32)); +                       Ops[OpNo1], +                       DAG.getTargetConstant(StartIndex, DL, MVT::i32));    // Fall back on VPERM.  Construct an SDNode for the permute vector.    SDValue IndexNodes[SystemZ::VectorBytes]; @@ -4751,7 +4767,7 @@ SDValue SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,        return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0.getOperand(Index));      // Otherwise keep it as a vector-to-vector operation.      return DAG.getNode(SystemZISD::SPLAT, DL, VT, Op.getOperand(0), -                       DAG.getConstant(Index, DL, MVT::i32)); +                       DAG.getTargetConstant(Index, DL, MVT::i32));    }    GeneralShuffle GS(VT); @@ -6041,8 +6057,8 @@ SDValue SystemZTargetLowering::combineBR_CCMASK(    if (combineCCMask(CCReg, CCValidVal, CCMaskVal))      return DAG.getNode(SystemZISD::BR_CCMASK, SDLoc(N), N->getValueType(0),                         Chain, -                       DAG.getConstant(CCValidVal, SDLoc(N), MVT::i32), -                       DAG.getConstant(CCMaskVal, SDLoc(N), MVT::i32), +                       DAG.getTargetConstant(CCValidVal, SDLoc(N), MVT::i32), +                       DAG.getTargetConstant(CCMaskVal, SDLoc(N), MVT::i32),                         N->getOperand(3), CCReg);    return SDValue();  } @@ -6063,10 +6079,9 @@ SDValue SystemZTargetLowering::combineSELECT_CCMASK(    if (combineCCMask(CCReg, CCValidVal, CCMaskVal))      return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0), -                       N->getOperand(0), -                       N->getOperand(1), -                       DAG.getConstant(CCValidVal, SDLoc(N), MVT::i32), -                       DAG.getConstant(CCMaskVal, SDLoc(N), MVT::i32), +                       N->getOperand(0), N->getOperand(1), +                       DAG.getTargetConstant(CCValidVal, SDLoc(N), MVT::i32), +                       DAG.getTargetConstant(CCMaskVal, SDLoc(N), MVT::i32),                         CCReg);    return SDValue();  } @@ -6548,19 +6563,17 @@ static bool isSelectPseudo(MachineInstr &MI) {  // Helper function, which inserts PHI functions into SinkMBB:  //   %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ], -// where %FalseValue(i) and %TrueValue(i) are taken from the consequent Selects -// in [MIItBegin, MIItEnd) range. -static void createPHIsForSelects(MachineBasicBlock::iterator MIItBegin, -                                 MachineBasicBlock::iterator MIItEnd, +// where %FalseValue(i) and %TrueValue(i) are taken from Selects. +static void createPHIsForSelects(SmallVector<MachineInstr*, 8> &Selects,                                   MachineBasicBlock *TrueMBB,                                   MachineBasicBlock *FalseMBB,                                   MachineBasicBlock *SinkMBB) {    MachineFunction *MF = TrueMBB->getParent();    const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); -  unsigned CCValid = MIItBegin->getOperand(3).getImm(); -  unsigned CCMask = MIItBegin->getOperand(4).getImm(); -  DebugLoc DL = MIItBegin->getDebugLoc(); +  MachineInstr *FirstMI = Selects.front(); +  unsigned CCValid = FirstMI->getOperand(3).getImm(); +  unsigned CCMask = FirstMI->getOperand(4).getImm();    MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin(); @@ -6572,16 +6585,15 @@ static void createPHIsForSelects(MachineBasicBlock::iterator MIItBegin,    // destination registers, and the registers that went into the PHI.    DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable; -  for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; -       MIIt = skipDebugInstructionsForward(++MIIt, MIItEnd)) { -    unsigned DestReg = MIIt->getOperand(0).getReg(); -    unsigned TrueReg = MIIt->getOperand(1).getReg(); -    unsigned FalseReg = MIIt->getOperand(2).getReg(); +  for (auto MI : Selects) { +    Register DestReg = MI->getOperand(0).getReg(); +    Register TrueReg = MI->getOperand(1).getReg(); +    Register FalseReg = MI->getOperand(2).getReg();      // If this Select we are generating is the opposite condition from      // the jump we generated, then we have to swap the operands for the      // PHI that is going to be generated. -    if (MIIt->getOperand(4).getImm() == (CCValid ^ CCMask)) +    if (MI->getOperand(4).getImm() == (CCValid ^ CCMask))        std::swap(TrueReg, FalseReg);      if (RegRewriteTable.find(TrueReg) != RegRewriteTable.end()) @@ -6590,6 +6602,7 @@ static void createPHIsForSelects(MachineBasicBlock::iterator MIItBegin,      if (RegRewriteTable.find(FalseReg) != RegRewriteTable.end())        FalseReg = RegRewriteTable[FalseReg].second; +    DebugLoc DL = MI->getDebugLoc();      BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(SystemZ::PHI), DestReg)        .addReg(TrueReg).addMBB(TrueMBB)        .addReg(FalseReg).addMBB(FalseMBB); @@ -6605,36 +6618,61 @@ static void createPHIsForSelects(MachineBasicBlock::iterator MIItBegin,  MachineBasicBlock *  SystemZTargetLowering::emitSelect(MachineInstr &MI,                                    MachineBasicBlock *MBB) const { +  assert(isSelectPseudo(MI) && "Bad call to emitSelect()");    const SystemZInstrInfo *TII =        static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());    unsigned CCValid = MI.getOperand(3).getImm();    unsigned CCMask = MI.getOperand(4).getImm(); -  DebugLoc DL = MI.getDebugLoc();    // If we have a sequence of Select* pseudo instructions using the    // same condition code value, we want to expand all of them into    // a single pair of basic blocks using the same condition. -  MachineInstr *LastMI = &MI; -  MachineBasicBlock::iterator NextMIIt = skipDebugInstructionsForward( -      std::next(MachineBasicBlock::iterator(MI)), MBB->end()); - -  if (isSelectPseudo(MI)) -    while (NextMIIt != MBB->end() && isSelectPseudo(*NextMIIt) && -           NextMIIt->getOperand(3).getImm() == CCValid && -           (NextMIIt->getOperand(4).getImm() == CCMask || -            NextMIIt->getOperand(4).getImm() == (CCValid ^ CCMask))) { -      LastMI = &*NextMIIt; -      NextMIIt = skipDebugInstructionsForward(++NextMIIt, MBB->end()); +  SmallVector<MachineInstr*, 8> Selects; +  SmallVector<MachineInstr*, 8> DbgValues; +  Selects.push_back(&MI); +  unsigned Count = 0; +  for (MachineBasicBlock::iterator NextMIIt = +         std::next(MachineBasicBlock::iterator(MI)); +       NextMIIt != MBB->end(); ++NextMIIt) { +    if (NextMIIt->definesRegister(SystemZ::CC)) +      break; +    if (isSelectPseudo(*NextMIIt)) { +      assert(NextMIIt->getOperand(3).getImm() == CCValid && +             "Bad CCValid operands since CC was not redefined."); +      if (NextMIIt->getOperand(4).getImm() == CCMask || +          NextMIIt->getOperand(4).getImm() == (CCValid ^ CCMask)) { +        Selects.push_back(&*NextMIIt); +        continue; +      } +      break;      } +    bool User = false; +    for (auto SelMI : Selects) +      if (NextMIIt->readsVirtualRegister(SelMI->getOperand(0).getReg())) { +        User = true; +        break; +      } +    if (NextMIIt->isDebugInstr()) { +      if (User) { +        assert(NextMIIt->isDebugValue() && "Unhandled debug opcode."); +        DbgValues.push_back(&*NextMIIt); +      } +    } +    else if (User || ++Count > 20) +      break; +  } +  MachineInstr *LastMI = Selects.back(); +  bool CCKilled = +      (LastMI->killsRegister(SystemZ::CC) || checkCCKill(*LastMI, MBB));    MachineBasicBlock *StartMBB = MBB; -  MachineBasicBlock *JoinMBB  = splitBlockBefore(MI, MBB); +  MachineBasicBlock *JoinMBB  = splitBlockAfter(LastMI, MBB);    MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);    // Unless CC was killed in the last Select instruction, mark it as    // live-in to both FalseMBB and JoinMBB. -  if (!LastMI->killsRegister(SystemZ::CC) && !checkCCKill(*LastMI, JoinMBB)) { +  if (!CCKilled) {      FalseMBB->addLiveIn(SystemZ::CC);      JoinMBB->addLiveIn(SystemZ::CC);    } @@ -6643,7 +6681,7 @@ SystemZTargetLowering::emitSelect(MachineInstr &MI,    //   BRC CCMask, JoinMBB    //   # fallthrough to FalseMBB    MBB = StartMBB; -  BuildMI(MBB, DL, TII->get(SystemZ::BRC)) +  BuildMI(MBB, MI.getDebugLoc(), TII->get(SystemZ::BRC))      .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);    MBB->addSuccessor(JoinMBB);    MBB->addSuccessor(FalseMBB); @@ -6657,12 +6695,14 @@ SystemZTargetLowering::emitSelect(MachineInstr &MI,    //   %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ]    //  ...    MBB = JoinMBB; -  MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI); -  MachineBasicBlock::iterator MIItEnd = skipDebugInstructionsForward( -      std::next(MachineBasicBlock::iterator(LastMI)), MBB->end()); -  createPHIsForSelects(MIItBegin, MIItEnd, StartMBB, FalseMBB, MBB); +  createPHIsForSelects(Selects, StartMBB, FalseMBB, MBB); +  for (auto SelMI : Selects) +    SelMI->eraseFromParent(); + +  MachineBasicBlock::iterator InsertPos = MBB->getFirstNonPHI(); +  for (auto DbgMI : DbgValues) +    MBB->splice(InsertPos, StartMBB, DbgMI); -  StartMBB->erase(MIItBegin, MIItEnd);    return JoinMBB;  } @@ -6678,10 +6718,10 @@ MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI,    const SystemZInstrInfo *TII =        static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); -  unsigned SrcReg = MI.getOperand(0).getReg(); +  Register SrcReg = MI.getOperand(0).getReg();    MachineOperand Base = MI.getOperand(1);    int64_t Disp = MI.getOperand(2).getImm(); -  unsigned IndexReg = MI.getOperand(3).getReg(); +  Register IndexReg = MI.getOperand(3).getReg();    unsigned CCValid = MI.getOperand(4).getImm();    unsigned CCMask = MI.getOperand(5).getImm();    DebugLoc DL = MI.getDebugLoc(); @@ -6773,7 +6813,7 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary(    // Extract the operands.  Base can be a register or a frame index.    // Src2 can be a register or immediate. -  unsigned Dest = MI.getOperand(0).getReg(); +  Register Dest = MI.getOperand(0).getReg();    MachineOperand Base = earlyUseOperand(MI.getOperand(1));    int64_t Disp = MI.getOperand(2).getImm();    MachineOperand Src2 = earlyUseOperand(MI.getOperand(3)); @@ -6833,7 +6873,7 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary(        .addReg(OldVal).addReg(BitShift).addImm(0);    if (Invert) {      // Perform the operation normally and then invert every bit of the field. -    unsigned Tmp = MRI.createVirtualRegister(RC); +    Register Tmp = MRI.createVirtualRegister(RC);      BuildMI(MBB, DL, TII->get(BinOpcode), Tmp).addReg(RotatedOldVal).add(Src2);      if (BitSize <= 32)        // XILF with the upper BitSize bits set. @@ -6842,7 +6882,7 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary(      else {        // Use LCGR and add -1 to the result, which is more compact than        // an XILF, XILH pair. -      unsigned Tmp2 = MRI.createVirtualRegister(RC); +      Register Tmp2 = MRI.createVirtualRegister(RC);        BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp);        BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal)          .addReg(Tmp2).addImm(-1); @@ -6891,7 +6931,7 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax(    bool IsSubWord = (BitSize < 32);    // Extract the operands.  Base can be a register or a frame index. -  unsigned Dest = MI.getOperand(0).getReg(); +  Register Dest = MI.getOperand(0).getReg();    MachineOperand Base = earlyUseOperand(MI.getOperand(1));    int64_t Disp = MI.getOperand(2).getImm();    Register Src2 = MI.getOperand(3).getReg(); @@ -7005,13 +7045,13 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI,    MachineRegisterInfo &MRI = MF.getRegInfo();    // Extract the operands.  Base can be a register or a frame index. -  unsigned Dest = MI.getOperand(0).getReg(); +  Register Dest = MI.getOperand(0).getReg();    MachineOperand Base = earlyUseOperand(MI.getOperand(1));    int64_t Disp = MI.getOperand(2).getImm(); -  unsigned OrigCmpVal = MI.getOperand(3).getReg(); -  unsigned OrigSwapVal = MI.getOperand(4).getReg(); -  unsigned BitShift = MI.getOperand(5).getReg(); -  unsigned NegBitShift = MI.getOperand(6).getReg(); +  Register OrigCmpVal = MI.getOperand(3).getReg(); +  Register OrigSwapVal = MI.getOperand(4).getReg(); +  Register BitShift = MI.getOperand(5).getReg(); +  Register NegBitShift = MI.getOperand(6).getReg();    int64_t BitSize = MI.getOperand(7).getImm();    DebugLoc DL = MI.getDebugLoc(); @@ -7023,14 +7063,14 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI,    assert(LOpcode && CSOpcode && "Displacement out of range");    // Create virtual registers for temporary results. -  unsigned OrigOldVal   = MRI.createVirtualRegister(RC); -  unsigned OldVal       = MRI.createVirtualRegister(RC); -  unsigned CmpVal       = MRI.createVirtualRegister(RC); -  unsigned SwapVal      = MRI.createVirtualRegister(RC); -  unsigned StoreVal     = MRI.createVirtualRegister(RC); -  unsigned RetryOldVal  = MRI.createVirtualRegister(RC); -  unsigned RetryCmpVal  = MRI.createVirtualRegister(RC); -  unsigned RetrySwapVal = MRI.createVirtualRegister(RC); +  Register OrigOldVal = MRI.createVirtualRegister(RC); +  Register OldVal = MRI.createVirtualRegister(RC); +  Register CmpVal = MRI.createVirtualRegister(RC); +  Register SwapVal = MRI.createVirtualRegister(RC); +  Register StoreVal = MRI.createVirtualRegister(RC); +  Register RetryOldVal = MRI.createVirtualRegister(RC); +  Register RetryCmpVal = MRI.createVirtualRegister(RC); +  Register RetrySwapVal = MRI.createVirtualRegister(RC);    // Insert 2 basic blocks for the loop.    MachineBasicBlock *StartMBB = MBB; @@ -7129,11 +7169,11 @@ SystemZTargetLowering::emitPair128(MachineInstr &MI,    MachineRegisterInfo &MRI = MF.getRegInfo();    DebugLoc DL = MI.getDebugLoc(); -  unsigned Dest = MI.getOperand(0).getReg(); -  unsigned Hi = MI.getOperand(1).getReg(); -  unsigned Lo = MI.getOperand(2).getReg(); -  unsigned Tmp1 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); -  unsigned Tmp2 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); +  Register Dest = MI.getOperand(0).getReg(); +  Register Hi = MI.getOperand(1).getReg(); +  Register Lo = MI.getOperand(2).getReg(); +  Register Tmp1 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); +  Register Tmp2 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);    BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Tmp1);    BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Tmp2) @@ -7157,14 +7197,14 @@ MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI,    MachineRegisterInfo &MRI = MF.getRegInfo();    DebugLoc DL = MI.getDebugLoc(); -  unsigned Dest = MI.getOperand(0).getReg(); -  unsigned Src = MI.getOperand(1).getReg(); -  unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); +  Register Dest = MI.getOperand(0).getReg(); +  Register Src = MI.getOperand(1).getReg(); +  Register In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);    BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128);    if (ClearEven) { -    unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); -    unsigned Zero64   = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); +    Register NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); +    Register Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);      BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64)        .addImm(0); @@ -7308,7 +7348,7 @@ MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper(      // The previous iteration might have created out-of-range displacements.      // Apply them using LAY if so.      if (!isUInt<12>(DestDisp)) { -      unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); +      Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);        BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg)            .add(DestBase)            .addImm(DestDisp) @@ -7317,7 +7357,7 @@ MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper(        DestDisp = 0;      }      if (!isUInt<12>(SrcDisp)) { -      unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); +      Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);        BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg)            .add(SrcBase)            .addImm(SrcDisp) @@ -7474,11 +7514,11 @@ MachineBasicBlock *SystemZTargetLowering::emitLoadAndTestCmp0(        static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());    DebugLoc DL = MI.getDebugLoc(); -  unsigned SrcReg = MI.getOperand(0).getReg(); +  Register SrcReg = MI.getOperand(0).getReg();    // Create new virtual register of the same class as source.    const TargetRegisterClass *RC = MRI->getRegClass(SrcReg); -  unsigned DstReg = MRI->createVirtualRegister(RC); +  Register DstReg = MRI->createVirtualRegister(RC);    // Replace pseudo with a normal load-and-test that models the def as    // well. diff --git a/lib/Target/SystemZ/SystemZInstrFP.td b/lib/Target/SystemZ/SystemZInstrFP.td index 19c7ec58ed3d..9c95e8aec940 100644 --- a/lib/Target/SystemZ/SystemZInstrFP.td +++ b/lib/Target/SystemZ/SystemZInstrFP.td @@ -25,10 +25,10 @@ let Predicates = [FeatureNoVectorEnhancements1] in  let Predicates = [FeatureVectorEnhancements1] in    def SelectVR128 : SelectWrapper<f128, VR128>; -defm CondStoreF32 : CondStores<FP32, nonvolatile_store, -                               nonvolatile_load, bdxaddr20only>; -defm CondStoreF64 : CondStores<FP64, nonvolatile_store, -                               nonvolatile_load, bdxaddr20only>; +defm CondStoreF32 : CondStores<FP32, simple_store, +                               simple_load, bdxaddr20only>; +defm CondStoreF64 : CondStores<FP64, simple_store, +                               simple_load, bdxaddr20only>;  //===----------------------------------------------------------------------===//  // Move instructions @@ -276,13 +276,13 @@ let Uses = [FPC], mayRaiseFPException = 1, Defs = [CC] in {  }  // fp_to_sint always rounds towards zero, which is modifier value 5. -def : Pat<(i32 (fp_to_sint FP32:$src)),  (CFEBR 5, FP32:$src)>; -def : Pat<(i32 (fp_to_sint FP64:$src)),  (CFDBR 5, FP64:$src)>; -def : Pat<(i32 (fp_to_sint FP128:$src)), (CFXBR 5, FP128:$src)>; +def : Pat<(i32 (any_fp_to_sint FP32:$src)),  (CFEBR 5, FP32:$src)>; +def : Pat<(i32 (any_fp_to_sint FP64:$src)),  (CFDBR 5, FP64:$src)>; +def : Pat<(i32 (any_fp_to_sint FP128:$src)), (CFXBR 5, FP128:$src)>; -def : Pat<(i64 (fp_to_sint FP32:$src)),  (CGEBR 5, FP32:$src)>; -def : Pat<(i64 (fp_to_sint FP64:$src)),  (CGDBR 5, FP64:$src)>; -def : Pat<(i64 (fp_to_sint FP128:$src)), (CGXBR 5, FP128:$src)>; +def : Pat<(i64 (any_fp_to_sint FP32:$src)),  (CGEBR 5, FP32:$src)>; +def : Pat<(i64 (any_fp_to_sint FP64:$src)),  (CGDBR 5, FP64:$src)>; +def : Pat<(i64 (any_fp_to_sint FP128:$src)), (CGXBR 5, FP128:$src)>;  // The FP extension feature provides versions of the above that allow  // also specifying the inexact-exception suppression flag. @@ -309,13 +309,13 @@ let Predicates = [FeatureFPExtension] in {      def CLGXBR : TernaryRRFe<"clgxbr", 0xB3AE, GR64, FP128>;    } -  def : Pat<(i32 (fp_to_uint FP32:$src)),  (CLFEBR 5, FP32:$src,  0)>; -  def : Pat<(i32 (fp_to_uint FP64:$src)),  (CLFDBR 5, FP64:$src,  0)>; -  def : Pat<(i32 (fp_to_uint FP128:$src)), (CLFXBR 5, FP128:$src, 0)>; +  def : Pat<(i32 (any_fp_to_uint FP32:$src)),  (CLFEBR 5, FP32:$src,  0)>; +  def : Pat<(i32 (any_fp_to_uint FP64:$src)),  (CLFDBR 5, FP64:$src,  0)>; +  def : Pat<(i32 (any_fp_to_uint FP128:$src)), (CLFXBR 5, FP128:$src, 0)>; -  def : Pat<(i64 (fp_to_uint FP32:$src)),  (CLGEBR 5, FP32:$src,  0)>; -  def : Pat<(i64 (fp_to_uint FP64:$src)),  (CLGDBR 5, FP64:$src,  0)>; -  def : Pat<(i64 (fp_to_uint FP128:$src)), (CLGXBR 5, FP128:$src, 0)>; +  def : Pat<(i64 (any_fp_to_uint FP32:$src)),  (CLGEBR 5, FP32:$src,  0)>; +  def : Pat<(i64 (any_fp_to_uint FP64:$src)),  (CLGDBR 5, FP64:$src,  0)>; +  def : Pat<(i64 (any_fp_to_uint FP128:$src)), (CLGXBR 5, FP128:$src, 0)>;  } diff --git a/lib/Target/SystemZ/SystemZInstrFormats.td b/lib/Target/SystemZ/SystemZInstrFormats.td index 2a1d14de3ddf..c9dbe3da686d 100644 --- a/lib/Target/SystemZ/SystemZInstrFormats.td +++ b/lib/Target/SystemZ/SystemZInstrFormats.td @@ -2141,17 +2141,17 @@ class FixedCondBranchRXY<CondVariant V, string mnemonic, bits<16> opcode,  }  class CmpBranchRIEa<string mnemonic, bits<16> opcode, -                    RegisterOperand cls, Immediate imm> +                    RegisterOperand cls, ImmOpWithPattern imm>    : InstRIEa<opcode, (outs), (ins cls:$R1, imm:$I2, cond4:$M3),               mnemonic#"$M3\t$R1, $I2", []>;  class AsmCmpBranchRIEa<string mnemonic, bits<16> opcode, -                       RegisterOperand cls, Immediate imm> +                       RegisterOperand cls, ImmOpWithPattern imm>    : InstRIEa<opcode, (outs), (ins cls:$R1, imm:$I2, imm32zx4:$M3),               mnemonic#"\t$R1, $I2, $M3", []>;  class FixedCmpBranchRIEa<CondVariant V, string mnemonic, bits<16> opcode, -                          RegisterOperand cls, Immediate imm> +                          RegisterOperand cls, ImmOpWithPattern imm>    : InstRIEa<opcode, (outs), (ins cls:$R1, imm:$I2),               mnemonic#V.suffix#"\t$R1, $I2", []> {    let isAsmParserOnly = V.alternate; @@ -2159,7 +2159,7 @@ class FixedCmpBranchRIEa<CondVariant V, string mnemonic, bits<16> opcode,  }  multiclass CmpBranchRIEaPair<string mnemonic, bits<16> opcode, -                             RegisterOperand cls, Immediate imm> { +                             RegisterOperand cls, ImmOpWithPattern imm> {    let isCodeGenOnly = 1 in      def "" : CmpBranchRIEa<mnemonic, opcode, cls, imm>;    def Asm : AsmCmpBranchRIEa<mnemonic, opcode, cls, imm>; @@ -2193,19 +2193,19 @@ multiclass CmpBranchRIEbPair<string mnemonic, bits<16> opcode,  }  class CmpBranchRIEc<string mnemonic, bits<16> opcode, -                    RegisterOperand cls, Immediate imm> +                    RegisterOperand cls, ImmOpWithPattern imm>    : InstRIEc<opcode, (outs),               (ins cls:$R1, imm:$I2, cond4:$M3, brtarget16:$RI4),               mnemonic#"$M3\t$R1, $I2, $RI4", []>;  class AsmCmpBranchRIEc<string mnemonic, bits<16> opcode, -                       RegisterOperand cls, Immediate imm> +                       RegisterOperand cls, ImmOpWithPattern imm>    : InstRIEc<opcode, (outs),               (ins cls:$R1, imm:$I2, imm32zx4:$M3, brtarget16:$RI4),               mnemonic#"\t$R1, $I2, $M3, $RI4", []>;  class FixedCmpBranchRIEc<CondVariant V, string mnemonic, bits<16> opcode, -                         RegisterOperand cls, Immediate imm> +                         RegisterOperand cls, ImmOpWithPattern imm>    : InstRIEc<opcode, (outs), (ins cls:$R1, imm:$I2, brtarget16:$RI4),               mnemonic#V.suffix#"\t$R1, $I2, $RI4", []> {    let isAsmParserOnly = V.alternate; @@ -2213,7 +2213,7 @@ class FixedCmpBranchRIEc<CondVariant V, string mnemonic, bits<16> opcode,  }  multiclass CmpBranchRIEcPair<string mnemonic, bits<16> opcode, -                            RegisterOperand cls, Immediate imm> { +                            RegisterOperand cls, ImmOpWithPattern imm> {    let isCodeGenOnly = 1 in      def "" : CmpBranchRIEc<mnemonic, opcode, cls, imm>;    def Asm : AsmCmpBranchRIEc<mnemonic, opcode, cls, imm>; @@ -2272,19 +2272,19 @@ multiclass CmpBranchRRSPair<string mnemonic, bits<16> opcode,  }  class CmpBranchRIS<string mnemonic, bits<16> opcode, -                   RegisterOperand cls, Immediate imm> +                   RegisterOperand cls, ImmOpWithPattern imm>    : InstRIS<opcode, (outs),              (ins cls:$R1, imm:$I2, cond4:$M3, bdaddr12only:$BD4),              mnemonic#"$M3\t$R1, $I2, $BD4", []>;  class AsmCmpBranchRIS<string mnemonic, bits<16> opcode, -                      RegisterOperand cls, Immediate imm> +                      RegisterOperand cls, ImmOpWithPattern imm>    : InstRIS<opcode, (outs),              (ins cls:$R1, imm:$I2, imm32zx4:$M3, bdaddr12only:$BD4),              mnemonic#"\t$R1, $I2, $M3, $BD4", []>;  class FixedCmpBranchRIS<CondVariant V, string mnemonic, bits<16> opcode, -                        RegisterOperand cls, Immediate imm> +                        RegisterOperand cls, ImmOpWithPattern imm>    : InstRIS<opcode, (outs), (ins cls:$R1, imm:$I2, bdaddr12only:$BD4),              mnemonic#V.suffix#"\t$R1, $I2, $BD4", []> {    let isAsmParserOnly = V.alternate; @@ -2292,7 +2292,7 @@ class FixedCmpBranchRIS<CondVariant V, string mnemonic, bits<16> opcode,  }  multiclass CmpBranchRISPair<string mnemonic, bits<16> opcode, -                            RegisterOperand cls, Immediate imm> { +                            RegisterOperand cls, ImmOpWithPattern imm> {    let isCodeGenOnly = 1 in      def "" : CmpBranchRIS<mnemonic, opcode, cls, imm>;    def Asm : AsmCmpBranchRIS<mnemonic, opcode, cls, imm>; @@ -2585,7 +2585,7 @@ multiclass StoreMultipleVRSaAlign<string mnemonic, bits<16> opcode> {  // We therefore match the address in the same way as a normal store and  // only use the StoreSI* instruction if the matched address is suitable.  class StoreSI<string mnemonic, bits<8> opcode, SDPatternOperator operator, -              Immediate imm> +              ImmOpWithPattern imm>    : InstSI<opcode, (outs), (ins mviaddr12pair:$BD1, imm:$I2),             mnemonic#"\t$BD1, $I2",             [(operator imm:$I2, mviaddr12pair:$BD1)]> { @@ -2593,7 +2593,7 @@ class StoreSI<string mnemonic, bits<8> opcode, SDPatternOperator operator,  }  class StoreSIY<string mnemonic, bits<16> opcode, SDPatternOperator operator, -               Immediate imm> +               ImmOpWithPattern imm>    : InstSIY<opcode, (outs), (ins mviaddr20pair:$BD1, imm:$I2),              mnemonic#"\t$BD1, $I2",              [(operator imm:$I2, mviaddr20pair:$BD1)]> { @@ -2601,7 +2601,7 @@ class StoreSIY<string mnemonic, bits<16> opcode, SDPatternOperator operator,  }  class StoreSIL<string mnemonic, bits<16> opcode, SDPatternOperator operator, -               Immediate imm> +               ImmOpWithPattern imm>    : InstSIL<opcode, (outs), (ins mviaddr12pair:$BD1, imm:$I2),              mnemonic#"\t$BD1, $I2",              [(operator imm:$I2, mviaddr12pair:$BD1)]> { @@ -2609,7 +2609,7 @@ class StoreSIL<string mnemonic, bits<16> opcode, SDPatternOperator operator,  }  multiclass StoreSIPair<string mnemonic, bits<8> siOpcode, bits<16> siyOpcode, -                       SDPatternOperator operator, Immediate imm> { +                       SDPatternOperator operator, ImmOpWithPattern imm> {    let DispKey = mnemonic in {      let DispSize = "12" in        def "" : StoreSI<mnemonic, siOpcode, operator, imm>; @@ -2665,7 +2665,7 @@ multiclass CondStoreRSYPair<string mnemonic, bits<16> opcode,    def Asm : AsmCondStoreRSY<mnemonic, opcode, cls, bytes, mode>;  } -class SideEffectUnaryI<string mnemonic, bits<8> opcode, Immediate imm> +class SideEffectUnaryI<string mnemonic, bits<8> opcode, ImmOpWithPattern imm>    : InstI<opcode, (outs), (ins imm:$I1),            mnemonic#"\t$I1", []>; @@ -2761,13 +2761,13 @@ class UnaryMemRRFc<string mnemonic, bits<16> opcode,  }  class UnaryRI<string mnemonic, bits<12> opcode, SDPatternOperator operator, -              RegisterOperand cls, Immediate imm> +              RegisterOperand cls, ImmOpWithPattern imm>    : InstRIa<opcode, (outs cls:$R1), (ins imm:$I2),              mnemonic#"\t$R1, $I2",              [(set cls:$R1, (operator imm:$I2))]>;  class UnaryRIL<string mnemonic, bits<12> opcode, SDPatternOperator operator, -               RegisterOperand cls, Immediate imm> +               RegisterOperand cls, ImmOpWithPattern imm>    : InstRILa<opcode, (outs cls:$R1), (ins imm:$I2),               mnemonic#"\t$R1, $I2",               [(set cls:$R1, (operator imm:$I2))]>; @@ -2885,14 +2885,14 @@ multiclass UnaryRXPair<string mnemonic, bits<8> rxOpcode, bits<16> rxyOpcode,  }  class UnaryVRIa<string mnemonic, bits<16> opcode, SDPatternOperator operator, -                TypedReg tr, Immediate imm, bits<4> type = 0> +                TypedReg tr, ImmOpWithPattern imm, bits<4> type = 0>    : InstVRIa<opcode, (outs tr.op:$V1), (ins imm:$I2),               mnemonic#"\t$V1, $I2", -             [(set (tr.vt tr.op:$V1), (operator imm:$I2))]> { +             [(set (tr.vt tr.op:$V1), (operator (i32 timm:$I2)))]> {    let M3 = type;  } -class UnaryVRIaGeneric<string mnemonic, bits<16> opcode, Immediate imm> +class UnaryVRIaGeneric<string mnemonic, bits<16> opcode, ImmOpWithPattern imm>    : InstVRIa<opcode, (outs VR128:$V1), (ins imm:$I2, imm32zx4:$M3),               mnemonic#"\t$V1, $I2, $M3", []>; @@ -3021,7 +3021,7 @@ class SideEffectBinaryRRFc<string mnemonic, bits<16> opcode,  }  class SideEffectBinaryIE<string mnemonic, bits<16> opcode, -                         Immediate imm1, Immediate imm2> +                         ImmOpWithPattern imm1, ImmOpWithPattern imm2>    : InstIE<opcode, (outs), (ins imm1:$I1, imm2:$I2),             mnemonic#"\t$I1, $I2", []>; @@ -3030,7 +3030,7 @@ class SideEffectBinarySI<string mnemonic, bits<8> opcode, Operand imm>             mnemonic#"\t$BD1, $I2", []>;  class SideEffectBinarySIL<string mnemonic, bits<16> opcode, -                          SDPatternOperator operator, Immediate imm> +                          SDPatternOperator operator, ImmOpWithPattern imm>    : InstSIL<opcode, (outs), (ins bdaddr12only:$BD1, imm:$I2),              mnemonic#"\t$BD1, $I2", [(operator bdaddr12only:$BD1, imm:$I2)]>; @@ -3165,7 +3165,7 @@ class BinaryRRFc<string mnemonic, bits<16> opcode,               mnemonic#"\t$R1, $R2, $M3", []>;  class BinaryMemRRFc<string mnemonic, bits<16> opcode, -                    RegisterOperand cls1, RegisterOperand cls2, Immediate imm> +                    RegisterOperand cls1, RegisterOperand cls2, ImmOpWithPattern imm>    : InstRRFc<opcode, (outs cls2:$R2, cls1:$R1), (ins cls1:$R1src, imm:$M3),              mnemonic#"\t$R1, $R2, $M3", []> {    let Constraints = "$R1 = $R1src"; @@ -3267,7 +3267,7 @@ multiclass CondBinaryRRFaPair<string mnemonic, bits<16> opcode,  }  class BinaryRI<string mnemonic, bits<12> opcode, SDPatternOperator operator, -               RegisterOperand cls, Immediate imm> +               RegisterOperand cls, ImmOpWithPattern imm>    : InstRIa<opcode, (outs cls:$R1), (ins cls:$R1src, imm:$I2),              mnemonic#"\t$R1, $I2",              [(set cls:$R1, (operator cls:$R1src, imm:$I2))]> { @@ -3276,14 +3276,14 @@ class BinaryRI<string mnemonic, bits<12> opcode, SDPatternOperator operator,  }  class BinaryRIE<string mnemonic, bits<16> opcode, SDPatternOperator operator, -                RegisterOperand cls, Immediate imm> +                RegisterOperand cls, ImmOpWithPattern imm>    : InstRIEd<opcode, (outs cls:$R1), (ins cls:$R3, imm:$I2),               mnemonic#"\t$R1, $R3, $I2",               [(set cls:$R1, (operator cls:$R3, imm:$I2))]>;  multiclass BinaryRIAndK<string mnemonic, bits<12> opcode1, bits<16> opcode2,                          SDPatternOperator operator, RegisterOperand cls, -                        Immediate imm> { +                        ImmOpWithPattern imm> {    let NumOpsKey = mnemonic in {      let NumOpsValue = "3" in        def K : BinaryRIE<mnemonic##"k", opcode2, operator, cls, imm>, @@ -3294,7 +3294,7 @@ multiclass BinaryRIAndK<string mnemonic, bits<12> opcode1, bits<16> opcode2,  }  class CondBinaryRIE<string mnemonic, bits<16> opcode, RegisterOperand cls, -                    Immediate imm> +                    ImmOpWithPattern imm>    : InstRIEg<opcode, (outs cls:$R1),               (ins cls:$R1src, imm:$I2, cond4:$valid, cond4:$M3),               mnemonic#"$M3\t$R1, $I2", @@ -3308,7 +3308,7 @@ class CondBinaryRIE<string mnemonic, bits<16> opcode, RegisterOperand cls,  // Like CondBinaryRIE, but used for the raw assembly form.  The condition-code  // mask is the third operand rather than being part of the mnemonic.  class AsmCondBinaryRIE<string mnemonic, bits<16> opcode, RegisterOperand cls, -                       Immediate imm> +                       ImmOpWithPattern imm>    : InstRIEg<opcode, (outs cls:$R1),               (ins cls:$R1src, imm:$I2, imm32zx4:$M3),               mnemonic#"\t$R1, $I2, $M3", []> { @@ -3318,7 +3318,7 @@ class AsmCondBinaryRIE<string mnemonic, bits<16> opcode, RegisterOperand cls,  // Like CondBinaryRIE, but with a fixed CC mask.  class FixedCondBinaryRIE<CondVariant V, string mnemonic, bits<16> opcode, -                         RegisterOperand cls, Immediate imm> +                         RegisterOperand cls, ImmOpWithPattern imm>    : InstRIEg<opcode, (outs cls:$R1), (ins cls:$R1src, imm:$I2),               mnemonic#V.suffix#"\t$R1, $I2", []> {    let Constraints = "$R1 = $R1src"; @@ -3328,14 +3328,14 @@ class FixedCondBinaryRIE<CondVariant V, string mnemonic, bits<16> opcode,  }  multiclass CondBinaryRIEPair<string mnemonic, bits<16> opcode, -                             RegisterOperand cls, Immediate imm> { +                             RegisterOperand cls, ImmOpWithPattern imm> {    let isCodeGenOnly = 1 in      def "" : CondBinaryRIE<mnemonic, opcode, cls, imm>;    def Asm : AsmCondBinaryRIE<mnemonic, opcode, cls, imm>;  }  class BinaryRIL<string mnemonic, bits<12> opcode, SDPatternOperator operator, -                RegisterOperand cls, Immediate imm> +                RegisterOperand cls, ImmOpWithPattern imm>    : InstRILa<opcode, (outs cls:$R1), (ins cls:$R1src, imm:$I2),               mnemonic#"\t$R1, $I2",               [(set cls:$R1, (operator cls:$R1src, imm:$I2))]> { @@ -3484,7 +3484,7 @@ class BinaryVRIb<string mnemonic, bits<16> opcode, SDPatternOperator operator,                   TypedReg tr, bits<4> type>    : InstVRIb<opcode, (outs tr.op:$V1), (ins imm32zx8:$I2, imm32zx8:$I3),               mnemonic#"\t$V1, $I2, $I3", -             [(set (tr.vt tr.op:$V1), (operator imm32zx8:$I2, imm32zx8:$I3))]> { +             [(set (tr.vt tr.op:$V1), (operator imm32zx8_timm:$I2, imm32zx8_timm:$I3))]> {    let M4 = type;  } @@ -3498,7 +3498,7 @@ class BinaryVRIc<string mnemonic, bits<16> opcode, SDPatternOperator operator,    : InstVRIc<opcode, (outs tr1.op:$V1), (ins tr2.op:$V3, imm32zx16:$I2),               mnemonic#"\t$V1, $V3, $I2",               [(set (tr1.vt tr1.op:$V1), (operator (tr2.vt tr2.op:$V3), -                                                  imm32zx16:$I2))]> { +                                                  imm32zx16_timm:$I2))]> {    let M4 = type;  } @@ -3512,7 +3512,7 @@ class BinaryVRIe<string mnemonic, bits<16> opcode, SDPatternOperator operator,    : InstVRIe<opcode, (outs tr1.op:$V1), (ins tr2.op:$V2, imm32zx12:$I3),               mnemonic#"\t$V1, $V2, $I3",               [(set (tr1.vt tr1.op:$V1), (operator (tr2.vt tr2.op:$V2), -                                                  imm32zx12:$I3))]> { +                                                  imm32zx12_timm:$I3))]> {    let M4 = type;    let M5 = m5;  } @@ -3715,7 +3715,7 @@ class BinaryVRX<string mnemonic, bits<16> opcode, SDPatternOperator operator,    : InstVRX<opcode, (outs VR128:$V1), (ins bdxaddr12only:$XBD2, imm32zx4:$M3),              mnemonic#"\t$V1, $XBD2, $M3",              [(set (tr.vt tr.op:$V1), (operator bdxaddr12only:$XBD2, -                                               imm32zx4:$M3))]> { +                                               imm32zx4_timm:$M3))]> {    let mayLoad = 1;    let AccessBytes = bytes;  } @@ -3765,7 +3765,7 @@ class BinaryVSI<string mnemonic, bits<16> opcode, SDPatternOperator operator,  }  class StoreBinaryVRV<string mnemonic, bits<16> opcode, bits<5> bytes, -                     Immediate index> +                     ImmOpWithPattern index>    : InstVRV<opcode, (outs), (ins VR128:$V1, bdvaddr12only:$VBD2, index:$M3),              mnemonic#"\t$V1, $VBD2, $M3", []> {    let mayStore = 1; @@ -3774,7 +3774,7 @@ class StoreBinaryVRV<string mnemonic, bits<16> opcode, bits<5> bytes,  class StoreBinaryVRX<string mnemonic, bits<16> opcode,                       SDPatternOperator operator, TypedReg tr, bits<5> bytes, -                     Immediate index> +                     ImmOpWithPattern index>    : InstVRX<opcode, (outs), (ins tr.op:$V1, bdxaddr12only:$XBD2, index:$M3),              mnemonic#"\t$V1, $XBD2, $M3",              [(operator (tr.vt tr.op:$V1), bdxaddr12only:$XBD2, index:$M3)]> { @@ -3809,7 +3809,7 @@ class CompareRRE<string mnemonic, bits<16> opcode, SDPatternOperator operator,  }  class CompareRI<string mnemonic, bits<12> opcode, SDPatternOperator operator, -                RegisterOperand cls, Immediate imm> +                RegisterOperand cls, ImmOpWithPattern imm>    : InstRIa<opcode, (outs), (ins cls:$R1, imm:$I2),              mnemonic#"\t$R1, $I2",              [(set CC, (operator cls:$R1, imm:$I2))]> { @@ -3817,7 +3817,7 @@ class CompareRI<string mnemonic, bits<12> opcode, SDPatternOperator operator,  }  class CompareRIL<string mnemonic, bits<12> opcode, SDPatternOperator operator, -                 RegisterOperand cls, Immediate imm> +                 RegisterOperand cls, ImmOpWithPattern imm>    : InstRILa<opcode, (outs), (ins cls:$R1, imm:$I2),               mnemonic#"\t$R1, $I2",               [(set CC, (operator cls:$R1, imm:$I2))]> { @@ -3924,7 +3924,7 @@ class CompareSSb<string mnemonic, bits<8> opcode>  }  class CompareSI<string mnemonic, bits<8> opcode, SDPatternOperator operator, -                SDPatternOperator load, Immediate imm, +                SDPatternOperator load, ImmOpWithPattern imm,                  AddressingMode mode = bdaddr12only>    : InstSI<opcode, (outs), (ins mode:$BD1, imm:$I2),             mnemonic#"\t$BD1, $I2", @@ -3934,7 +3934,7 @@ class CompareSI<string mnemonic, bits<8> opcode, SDPatternOperator operator,  }  class CompareSIL<string mnemonic, bits<16> opcode, SDPatternOperator operator, -                 SDPatternOperator load, Immediate imm> +                 SDPatternOperator load, ImmOpWithPattern imm>    : InstSIL<opcode, (outs), (ins bdaddr12only:$BD1, imm:$I2),              mnemonic#"\t$BD1, $I2",              [(set CC, (operator (load bdaddr12only:$BD1), imm:$I2))]> { @@ -3943,7 +3943,7 @@ class CompareSIL<string mnemonic, bits<16> opcode, SDPatternOperator operator,  }  class CompareSIY<string mnemonic, bits<16> opcode, SDPatternOperator operator, -                 SDPatternOperator load, Immediate imm, +                 SDPatternOperator load, ImmOpWithPattern imm,                   AddressingMode mode = bdaddr20only>    : InstSIY<opcode, (outs), (ins mode:$BD1, imm:$I2),              mnemonic#"\t$BD1, $I2", @@ -3954,7 +3954,7 @@ class CompareSIY<string mnemonic, bits<16> opcode, SDPatternOperator operator,  multiclass CompareSIPair<string mnemonic, bits<8> siOpcode, bits<16> siyOpcode,                           SDPatternOperator operator, SDPatternOperator load, -                         Immediate imm> { +                         ImmOpWithPattern imm> {    let DispKey = mnemonic in {      let DispSize = "12" in        def "" : CompareSI<mnemonic, siOpcode, operator, load, imm, bdaddr12pair>; @@ -4012,7 +4012,7 @@ class TestRXE<string mnemonic, bits<16> opcode, SDPatternOperator operator,  }  class TestBinarySIL<string mnemonic, bits<16> opcode, -                    SDPatternOperator operator, Immediate imm> +                    SDPatternOperator operator, ImmOpWithPattern imm>    : InstSIL<opcode, (outs), (ins bdaddr12only:$BD1, imm:$I2),              mnemonic#"\t$BD1, $I2",              [(set CC, (operator bdaddr12only:$BD1, imm:$I2))]>; @@ -4073,7 +4073,7 @@ class SideEffectTernaryMemMemMemRRFb<string mnemonic, bits<16> opcode,  class SideEffectTernaryRRFc<string mnemonic, bits<16> opcode,                              RegisterOperand cls1, RegisterOperand cls2, -                            Immediate imm> +                            ImmOpWithPattern imm>    : InstRRFc<opcode, (outs), (ins cls1:$R1, cls2:$R2, imm:$M3),               mnemonic#"\t$R1, $R2, $M3", []>; @@ -4086,7 +4086,7 @@ multiclass SideEffectTernaryRRFcOpt<string mnemonic, bits<16> opcode,  class SideEffectTernaryMemMemRRFc<string mnemonic, bits<16> opcode,                                    RegisterOperand cls1, RegisterOperand cls2, -                                  Immediate imm> +                                  ImmOpWithPattern imm>    : InstRRFc<opcode, (outs cls1:$R1, cls2:$R2),               (ins cls1:$R1src, cls2:$R2src, imm:$M3),               mnemonic#"\t$R1, $R2, $M3", []> { @@ -4221,7 +4221,7 @@ class TernaryRXF<string mnemonic, bits<16> opcode, SDPatternOperator operator,  }  class TernaryVRIa<string mnemonic, bits<16> opcode, SDPatternOperator operator, -                  TypedReg tr1, TypedReg tr2, Immediate imm, Immediate index> +                  TypedReg tr1, TypedReg tr2, ImmOpWithPattern imm, ImmOpWithPattern index>    : InstVRIa<opcode, (outs tr1.op:$V1), (ins tr2.op:$V1src, imm:$I2, index:$M3),               mnemonic#"\t$V1, $I2, $M3",               [(set (tr1.vt tr1.op:$V1), (operator (tr2.vt tr2.op:$V1src), @@ -4237,7 +4237,7 @@ class TernaryVRId<string mnemonic, bits<16> opcode, SDPatternOperator operator,               mnemonic#"\t$V1, $V2, $V3, $I4",               [(set (tr1.vt tr1.op:$V1), (operator (tr2.vt tr2.op:$V2),                                                    (tr2.vt tr2.op:$V3), -                                                  imm32zx8:$I4))]> { +                                                  imm32zx8_timm:$I4))]> {    let M5 = type;  } @@ -4252,8 +4252,8 @@ class TernaryVRRa<string mnemonic, bits<16> opcode, SDPatternOperator operator,               (ins tr2.op:$V2, imm32zx4:$M4, imm32zx4:$M5),               mnemonic#"\t$V1, $V2, $M4, $M5",               [(set (tr1.vt tr1.op:$V1), (operator (tr2.vt tr2.op:$V2), -                                                  imm32zx4:$M4, -                                                  imm32zx4:$M5))], +                                                  imm32zx4_timm:$M4, +                                                  imm32zx4_timm:$M5))],               m4or> {    let M3 = type;  } @@ -4285,13 +4285,13 @@ multiclass TernaryOptVRRbSPair<string mnemonic, bits<16> opcode,                                 TypedReg tr1, TypedReg tr2, bits<4> type,                                 bits<4> modifier = 0> {    def "" : TernaryVRRb<mnemonic, opcode, operator, tr1, tr2, type, -                       imm32zx4even, !and (modifier, 14)>; +                       imm32zx4even_timm, !and (modifier, 14)>;    def : InstAlias<mnemonic#"\t$V1, $V2, $V3",                    (!cast<Instruction>(NAME) tr1.op:$V1, tr2.op:$V2,                                              tr2.op:$V3, 0)>;    let Defs = [CC] in      def S : TernaryVRRb<mnemonic##"s", opcode, operator_cc, tr1, tr2, type, -                        imm32zx4even, !add(!and (modifier, 14), 1)>; +                        imm32zx4even_timm, !add(!and (modifier, 14), 1)>;    def : InstAlias<mnemonic#"s\t$V1, $V2, $V3",                    (!cast<Instruction>(NAME#"S") tr1.op:$V1, tr2.op:$V2,                                                  tr2.op:$V3, 0)>; @@ -4314,7 +4314,7 @@ class TernaryVRRc<string mnemonic, bits<16> opcode, SDPatternOperator operator,               mnemonic#"\t$V1, $V2, $V3, $M4",               [(set (tr1.vt tr1.op:$V1), (operator (tr2.vt tr2.op:$V2),                                                    (tr2.vt tr2.op:$V3), -                                                  imm32zx4:$M4))]> { +                                                  imm32zx4_timm:$M4))]> {    let M5 = 0;    let M6 = 0;  } @@ -4327,7 +4327,7 @@ class TernaryVRRcFloat<string mnemonic, bits<16> opcode,               mnemonic#"\t$V1, $V2, $V3, $M6",               [(set (tr1.vt tr1.op:$V1), (operator (tr2.vt tr2.op:$V2),                                                    (tr2.vt tr2.op:$V3), -                                                  imm32zx4:$M6))]> { +                                                  imm32zx4_timm:$M6))]> {    let M4 = type;    let M5 = m5;  } @@ -4429,7 +4429,7 @@ class TernaryVRSbGeneric<string mnemonic, bits<16> opcode>  }  class TernaryVRV<string mnemonic, bits<16> opcode, bits<5> bytes, -                 Immediate index> +                 ImmOpWithPattern index>    : InstVRV<opcode, (outs VR128:$V1),             (ins VR128:$V1src, bdvaddr12only:$VBD2, index:$M3),             mnemonic#"\t$V1, $VBD2, $M3", []> { @@ -4440,7 +4440,7 @@ class TernaryVRV<string mnemonic, bits<16> opcode, bits<5> bytes,  }  class TernaryVRX<string mnemonic, bits<16> opcode, SDPatternOperator operator, -                 TypedReg tr1, TypedReg tr2, bits<5> bytes, Immediate index> +                 TypedReg tr1, TypedReg tr2, bits<5> bytes, ImmOpWithPattern index>    : InstVRX<opcode, (outs tr1.op:$V1),             (ins tr2.op:$V1src, bdxaddr12only:$XBD2, index:$M3),             mnemonic#"\t$V1, $XBD2, $M3", @@ -4461,7 +4461,7 @@ class QuaternaryVRId<string mnemonic, bits<16> opcode, SDPatternOperator operato               [(set (tr1.vt tr1.op:$V1), (operator (tr2.vt tr2.op:$V1src),                                                    (tr2.vt tr2.op:$V2),                                                    (tr2.vt tr2.op:$V3), -                                                  imm32zx8:$I4))]> { +                                                  imm32zx8_timm:$I4))]> {    let Constraints = "$V1 = $V1src";    let DisableEncoding = "$V1src";    let M5 = type; @@ -4480,7 +4480,7 @@ class QuaternaryVRIf<string mnemonic, bits<16> opcode>    : InstVRIf<opcode, (outs VR128:$V1),               (ins VR128:$V2, VR128:$V3,                    imm32zx8:$I4, imm32zx4:$M5), -             mnemonic#"\t$V1, $V2, $V3, $I4, $M5", []>; +            mnemonic#"\t$V1, $V2, $V3, $I4, $M5", []>;  class QuaternaryVRIg<string mnemonic, bits<16> opcode>    : InstVRIg<opcode, (outs VR128:$V1), @@ -4491,7 +4491,7 @@ class QuaternaryVRIg<string mnemonic, bits<16> opcode>  class QuaternaryVRRd<string mnemonic, bits<16> opcode,                       SDPatternOperator operator, TypedReg tr1, TypedReg tr2,                       TypedReg tr3, TypedReg tr4, bits<4> type, -                     SDPatternOperator m6mask = imm32zx4, bits<4> m6or = 0> +                     SDPatternOperator m6mask = imm32zx4_timm, bits<4> m6or = 0>    : InstVRRd<opcode, (outs tr1.op:$V1),               (ins tr2.op:$V2, tr3.op:$V3, tr4.op:$V4, m6mask:$M6),               mnemonic#"\t$V1, $V2, $V3, $V4, $M6", @@ -4518,14 +4518,14 @@ multiclass QuaternaryOptVRRdSPair<string mnemonic, bits<16> opcode,                                  bits<4> modifier = 0> {    def "" : QuaternaryVRRd<mnemonic, opcode, operator,                            tr1, tr2, tr2, tr2, type, -                          imm32zx4even, !and (modifier, 14)>; +                          imm32zx4even_timm, !and (modifier, 14)>;    def : InstAlias<mnemonic#"\t$V1, $V2, $V3, $V4",                    (!cast<Instruction>(NAME) tr1.op:$V1, tr2.op:$V2,                                              tr2.op:$V3, tr2.op:$V4, 0)>;    let Defs = [CC] in      def S : QuaternaryVRRd<mnemonic##"s", opcode, operator_cc,                             tr1, tr2, tr2, tr2, type, -                           imm32zx4even, !add (!and (modifier, 14), 1)>; +                           imm32zx4even_timm, !add (!and (modifier, 14), 1)>;    def : InstAlias<mnemonic#"s\t$V1, $V2, $V3, $V4",                    (!cast<Instruction>(NAME#"S") tr1.op:$V1, tr2.op:$V2,                                                  tr2.op:$V3, tr2.op:$V4, 0)>; @@ -4536,7 +4536,7 @@ multiclass QuaternaryOptVRRdSPairGeneric<string mnemonic, bits<16> opcode> {      def "" : QuaternaryVRRdGeneric<mnemonic, opcode>;    def : InstAlias<mnemonic#"\t$V1, $V2, $V3, $V4, $M5",                    (!cast<Instruction>(NAME) VR128:$V1, VR128:$V2, VR128:$V3, -                                            VR128:$V4, imm32zx4:$M5, 0)>; +                                            VR128:$V4, imm32zx4_timm:$M5, 0)>;  }  class SideEffectQuaternaryRRFa<string mnemonic, bits<16> opcode, @@ -4638,13 +4638,13 @@ class RotateSelectRIEf<string mnemonic, bits<16> opcode, RegisterOperand cls1,  class PrefetchRXY<string mnemonic, bits<16> opcode, SDPatternOperator operator>    : InstRXYb<opcode, (outs), (ins imm32zx4:$M1, bdxaddr20only:$XBD2),               mnemonic##"\t$M1, $XBD2", -             [(operator imm32zx4:$M1, bdxaddr20only:$XBD2)]>; +             [(operator imm32zx4_timm:$M1, bdxaddr20only:$XBD2)]>;  class PrefetchRILPC<string mnemonic, bits<12> opcode,                      SDPatternOperator operator> -  : InstRILc<opcode, (outs), (ins imm32zx4:$M1, pcrel32:$RI2), +  : InstRILc<opcode, (outs), (ins imm32zx4_timm:$M1, pcrel32:$RI2),               mnemonic##"\t$M1, $RI2", -             [(operator imm32zx4:$M1, pcrel32:$RI2)]> { +             [(operator imm32zx4_timm:$M1, pcrel32:$RI2)]> {    // We want PC-relative addresses to be tried ahead of BD and BDX addresses.    // However, BDXs have two extra operands and are therefore 6 units more    // complex. @@ -4691,7 +4691,7 @@ class Pseudo<dag outs, dag ins, list<dag> pattern>  // Like UnaryRI, but expanded after RA depending on the choice of register.  class UnaryRIPseudo<SDPatternOperator operator, RegisterOperand cls, -                    Immediate imm> +                    ImmOpWithPattern imm>    : Pseudo<(outs cls:$R1), (ins imm:$I2),             [(set cls:$R1, (operator imm:$I2))]>; @@ -4720,7 +4720,7 @@ class UnaryRRPseudo<string key, SDPatternOperator operator,  // Like BinaryRI, but expanded after RA depending on the choice of register.  class BinaryRIPseudo<SDPatternOperator operator, RegisterOperand cls, -                     Immediate imm> +                     ImmOpWithPattern imm>    : Pseudo<(outs cls:$R1), (ins cls:$R1src, imm:$I2),             [(set cls:$R1, (operator cls:$R1src, imm:$I2))]> {    let Constraints = "$R1 = $R1src"; @@ -4728,13 +4728,13 @@ class BinaryRIPseudo<SDPatternOperator operator, RegisterOperand cls,  // Like BinaryRIE, but expanded after RA depending on the choice of register.  class BinaryRIEPseudo<SDPatternOperator operator, RegisterOperand cls, -                      Immediate imm> +                      ImmOpWithPattern imm>    : Pseudo<(outs cls:$R1), (ins cls:$R3, imm:$I2),             [(set cls:$R1, (operator cls:$R3, imm:$I2))]>;  // Like BinaryRIAndK, but expanded after RA depending on the choice of register.  multiclass BinaryRIAndKPseudo<string key, SDPatternOperator operator, -                              RegisterOperand cls, Immediate imm> { +                              RegisterOperand cls, ImmOpWithPattern imm> {    let NumOpsKey = key in {      let NumOpsValue = "3" in        def K : BinaryRIEPseudo<operator, cls, imm>, @@ -4764,7 +4764,7 @@ class MemFoldPseudo<string mnemonic, RegisterOperand cls, bits<5> bytes,  // Like CompareRI, but expanded after RA depending on the choice of register.  class CompareRIPseudo<SDPatternOperator operator, RegisterOperand cls, -                      Immediate imm> +                      ImmOpWithPattern imm>    : Pseudo<(outs), (ins cls:$R1, imm:$I2),             [(set CC, (operator cls:$R1, imm:$I2))]> {    let isCompare = 1; @@ -4783,7 +4783,7 @@ class CompareRXYPseudo<SDPatternOperator operator, RegisterOperand cls,  }  // Like TestBinarySIL, but expanded later. -class TestBinarySILPseudo<SDPatternOperator operator, Immediate imm> +class TestBinarySILPseudo<SDPatternOperator operator, ImmOpWithPattern imm>    : Pseudo<(outs), (ins bdaddr12only:$BD1, imm:$I2),             [(set CC, (operator bdaddr12only:$BD1, imm:$I2))]>; @@ -4812,7 +4812,7 @@ class CondBinaryRRFaPseudo<RegisterOperand cls1, RegisterOperand cls2,  // Like CondBinaryRIE, but expanded after RA depending on the choice of  // register. -class CondBinaryRIEPseudo<RegisterOperand cls, Immediate imm> +class CondBinaryRIEPseudo<RegisterOperand cls, ImmOpWithPattern imm>    : Pseudo<(outs cls:$R1),             (ins cls:$R1src, imm:$I2, cond4:$valid, cond4:$M3),             [(set cls:$R1, (z_select_ccmask imm:$I2, cls:$R1src, @@ -4876,7 +4876,7 @@ class SelectWrapper<ValueType vt, RegisterOperand cls>    : Pseudo<(outs cls:$dst),             (ins cls:$src1, cls:$src2, imm32zx4:$valid, imm32zx4:$cc),             [(set (vt cls:$dst), (z_select_ccmask cls:$src1, cls:$src2, -                                            imm32zx4:$valid, imm32zx4:$cc))]> { +                                            imm32zx4_timm:$valid, imm32zx4_timm:$cc))]> {    let usesCustomInserter = 1;    let hasNoSchedulingInfo = 1;    let Uses = [CC]; @@ -4890,12 +4890,12 @@ multiclass CondStores<RegisterOperand cls, SDPatternOperator store,      def "" : Pseudo<(outs),                      (ins cls:$new, mode:$addr, imm32zx4:$valid, imm32zx4:$cc),                      [(store (z_select_ccmask cls:$new, (load mode:$addr), -                                             imm32zx4:$valid, imm32zx4:$cc), +                                             imm32zx4_timm:$valid, imm32zx4_timm:$cc),                              mode:$addr)]>;      def Inv : Pseudo<(outs),                       (ins cls:$new, mode:$addr, imm32zx4:$valid, imm32zx4:$cc),                       [(store (z_select_ccmask (load mode:$addr), cls:$new, -                                              imm32zx4:$valid, imm32zx4:$cc), +                                              imm32zx4_timm:$valid, imm32zx4_timm:$cc),                                mode:$addr)]>;    }  } @@ -4917,11 +4917,11 @@ class AtomicLoadBinary<SDPatternOperator operator, RegisterOperand cls,  // Specializations of AtomicLoadWBinary.  class AtomicLoadBinaryReg32<SDPatternOperator operator>    : AtomicLoadBinary<operator, GR32, (i32 GR32:$src2), GR32>; -class AtomicLoadBinaryImm32<SDPatternOperator operator, Immediate imm> +class AtomicLoadBinaryImm32<SDPatternOperator operator, ImmOpWithPattern imm>    : AtomicLoadBinary<operator, GR32, (i32 imm:$src2), imm>;  class AtomicLoadBinaryReg64<SDPatternOperator operator>    : AtomicLoadBinary<operator, GR64, (i64 GR64:$src2), GR64>; -class AtomicLoadBinaryImm64<SDPatternOperator operator, Immediate imm> +class AtomicLoadBinaryImm64<SDPatternOperator operator, ImmOpWithPattern imm>    : AtomicLoadBinary<operator, GR64, (i64 imm:$src2), imm>;  // OPERATOR is ATOMIC_SWAPW or an ATOMIC_LOADW_* operation.  PAT and OPERAND @@ -4944,7 +4944,7 @@ class AtomicLoadWBinary<SDPatternOperator operator, dag pat,  // Specializations of AtomicLoadWBinary.  class AtomicLoadWBinaryReg<SDPatternOperator operator>    : AtomicLoadWBinary<operator, (i32 GR32:$src2), GR32>; -class AtomicLoadWBinaryImm<SDPatternOperator operator, Immediate imm> +class AtomicLoadWBinaryImm<SDPatternOperator operator, ImmOpWithPattern imm>    : AtomicLoadWBinary<operator, (i32 imm:$src2), imm>;  // A pseudo instruction that is a direct alias of a real instruction. @@ -4979,7 +4979,7 @@ class StoreAliasVRX<SDPatternOperator operator, TypedReg tr,  // An alias of a BinaryRI, but with different register sizes.  class BinaryAliasRI<SDPatternOperator operator, RegisterOperand cls, -                    Immediate imm> +                    ImmOpWithPattern imm>    : Alias<4, (outs cls:$R1), (ins cls:$R1src, imm:$I2),            [(set cls:$R1, (operator cls:$R1src, imm:$I2))]> {    let Constraints = "$R1 = $R1src"; @@ -4987,7 +4987,7 @@ class BinaryAliasRI<SDPatternOperator operator, RegisterOperand cls,  // An alias of a BinaryRIL, but with different register sizes.  class BinaryAliasRIL<SDPatternOperator operator, RegisterOperand cls, -                     Immediate imm> +                     ImmOpWithPattern imm>    : Alias<6, (outs cls:$R1), (ins cls:$R1src, imm:$I2),            [(set cls:$R1, (operator cls:$R1src, imm:$I2))]> {    let Constraints = "$R1 = $R1src"; @@ -4999,7 +4999,7 @@ class BinaryAliasVRRf<RegisterOperand cls>  // An alias of a CompareRI, but with different register sizes.  class CompareAliasRI<SDPatternOperator operator, RegisterOperand cls, -                     Immediate imm> +                     ImmOpWithPattern imm>    : Alias<4, (outs), (ins cls:$R1, imm:$I2),            [(set CC, (operator cls:$R1, imm:$I2))]> {    let isCompare = 1; diff --git a/lib/Target/SystemZ/SystemZInstrInfo.cpp b/lib/Target/SystemZ/SystemZInstrInfo.cpp index 57c1cf4ec70a..bc783608d45b 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -46,22 +46,12 @@ using namespace llvm;  #include "SystemZGenInstrInfo.inc"  #define DEBUG_TYPE "systemz-II" -STATISTIC(LOCRMuxJumps, "Number of LOCRMux jump-sequences (lower is better)");  // Return a mask with Count low bits set.  static uint64_t allOnes(unsigned int Count) {    return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;  } -// Reg should be a 32-bit GPR.  Return true if it is a high register rather -// than a low register. -static bool isHighReg(unsigned int Reg) { -  if (SystemZ::GRH32BitRegClass.contains(Reg)) -    return true; -  assert(SystemZ::GR32BitRegClass.contains(Reg) && "Invalid GRX32"); -  return false; -} -  // Pin the vtable to this file.  void SystemZInstrInfo::anchor() {} @@ -85,7 +75,7 @@ void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,    // Set up the two 64-bit registers and remember super reg and its flags.    MachineOperand &HighRegOp = EarlierMI->getOperand(0);    MachineOperand &LowRegOp = MI->getOperand(0); -  unsigned Reg128 = LowRegOp.getReg(); +  Register Reg128 = LowRegOp.getReg();    unsigned Reg128Killed = getKillRegState(LowRegOp.isKill());    unsigned Reg128Undef  = getUndefRegState(LowRegOp.isUndef());    HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64)); @@ -147,8 +137,8 @@ void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const {  void SystemZInstrInfo::expandRIPseudo(MachineInstr &MI, unsigned LowOpcode,                                        unsigned HighOpcode,                                        bool ConvertHigh) const { -  unsigned Reg = MI.getOperand(0).getReg(); -  bool IsHigh = isHighReg(Reg); +  Register Reg = MI.getOperand(0).getReg(); +  bool IsHigh = SystemZ::isHighReg(Reg);    MI.setDesc(get(IsHigh ? HighOpcode : LowOpcode));    if (IsHigh && ConvertHigh)      MI.getOperand(1).setImm(uint32_t(MI.getOperand(1).getImm())); @@ -161,10 +151,10 @@ void SystemZInstrInfo::expandRIPseudo(MachineInstr &MI, unsigned LowOpcode,  void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode,                                         unsigned LowOpcodeK,                                         unsigned HighOpcode) const { -  unsigned DestReg = MI.getOperand(0).getReg(); -  unsigned SrcReg = MI.getOperand(1).getReg(); -  bool DestIsHigh = isHighReg(DestReg); -  bool SrcIsHigh = isHighReg(SrcReg); +  Register DestReg = MI.getOperand(0).getReg(); +  Register SrcReg = MI.getOperand(1).getReg(); +  bool DestIsHigh = SystemZ::isHighReg(DestReg); +  bool SrcIsHigh = SystemZ::isHighReg(SrcReg);    if (!DestIsHigh && !SrcIsHigh)      MI.setDesc(get(LowOpcodeK));    else { @@ -184,9 +174,10 @@ void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode,  // is a high GR32.  void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode,                                         unsigned HighOpcode) const { -  unsigned Reg = MI.getOperand(0).getReg(); -  unsigned Opcode = getOpcodeForOffset(isHighReg(Reg) ? HighOpcode : LowOpcode, -                                       MI.getOperand(2).getImm()); +  Register Reg = MI.getOperand(0).getReg(); +  unsigned Opcode = getOpcodeForOffset( +      SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode, +      MI.getOperand(2).getImm());    MI.setDesc(get(Opcode));  } @@ -195,93 +186,11 @@ void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode,  // register is a low GR32 and HighOpcode if the register is a high GR32.  void SystemZInstrInfo::expandLOCPseudo(MachineInstr &MI, unsigned LowOpcode,                                         unsigned HighOpcode) const { -  unsigned Reg = MI.getOperand(0).getReg(); -  unsigned Opcode = isHighReg(Reg) ? HighOpcode : LowOpcode; +  Register Reg = MI.getOperand(0).getReg(); +  unsigned Opcode = SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode;    MI.setDesc(get(Opcode));  } -// MI is a load-register-on-condition pseudo instruction.  Replace it with -// LowOpcode if source and destination are both low GR32s and HighOpcode if -// source and destination are both high GR32s. -void SystemZInstrInfo::expandLOCRPseudo(MachineInstr &MI, unsigned LowOpcode, -                                        unsigned HighOpcode) const { -  unsigned DestReg = MI.getOperand(0).getReg(); -  unsigned SrcReg = MI.getOperand(2).getReg(); -  bool DestIsHigh = isHighReg(DestReg); -  bool SrcIsHigh = isHighReg(SrcReg); - -  if (!DestIsHigh && !SrcIsHigh) -    MI.setDesc(get(LowOpcode)); -  else if (DestIsHigh && SrcIsHigh) -    MI.setDesc(get(HighOpcode)); -  else -    LOCRMuxJumps++; - -  // If we were unable to implement the pseudo with a single instruction, we -  // need to convert it back into a branch sequence.  This cannot be done here -  // since the caller of expandPostRAPseudo does not handle changes to the CFG -  // correctly.  This change is defered to the SystemZExpandPseudo pass. -} - -// MI is a select pseudo instruction.  Replace it with LowOpcode if source -// and destination are all low GR32s and HighOpcode if source and destination -// are all high GR32s.  Otherwise, use the two-operand MixedOpcode. -void SystemZInstrInfo::expandSELRPseudo(MachineInstr &MI, unsigned LowOpcode, -                                        unsigned HighOpcode, -                                        unsigned MixedOpcode) const { -  unsigned DestReg = MI.getOperand(0).getReg(); -  unsigned Src1Reg = MI.getOperand(1).getReg(); -  unsigned Src2Reg = MI.getOperand(2).getReg(); -  bool DestIsHigh = isHighReg(DestReg); -  bool Src1IsHigh = isHighReg(Src1Reg); -  bool Src2IsHigh = isHighReg(Src2Reg); - -  // If sources and destination aren't all high or all low, we may be able to -  // simplify the operation by moving one of the sources to the destination -  // first.  But only if this doesn't clobber the other source. -  if (DestReg != Src1Reg && DestReg != Src2Reg) { -    if (DestIsHigh != Src1IsHigh) { -      emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), DestReg, Src1Reg, -                    SystemZ::LR, 32, MI.getOperand(1).isKill(), -                    MI.getOperand(1).isUndef()); -      MI.getOperand(1).setReg(DestReg); -      Src1Reg = DestReg; -      Src1IsHigh = DestIsHigh; -    } else if (DestIsHigh != Src2IsHigh) { -      emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), DestReg, Src2Reg, -                    SystemZ::LR, 32, MI.getOperand(2).isKill(), -                    MI.getOperand(2).isUndef()); -      MI.getOperand(2).setReg(DestReg); -      Src2Reg = DestReg; -      Src2IsHigh = DestIsHigh; -    } -  } - -  // If the destination (now) matches one source, prefer this to be first. -  if (DestReg != Src1Reg && DestReg == Src2Reg) { -    commuteInstruction(MI, false, 1, 2); -    std::swap(Src1Reg, Src2Reg); -    std::swap(Src1IsHigh, Src2IsHigh); -  } - -  if (!DestIsHigh && !Src1IsHigh && !Src2IsHigh) -    MI.setDesc(get(LowOpcode)); -  else if (DestIsHigh && Src1IsHigh && Src2IsHigh) -    MI.setDesc(get(HighOpcode)); -  else { -    // Given the simplifcation above, we must already have a two-operand case. -    assert (DestReg == Src1Reg); -    MI.setDesc(get(MixedOpcode)); -    MI.tieOperands(0, 1); -    LOCRMuxJumps++; -  } - -  // If we were unable to implement the pseudo with a single instruction, we -  // need to convert it back into a branch sequence.  This cannot be done here -  // since the caller of expandPostRAPseudo does not handle changes to the CFG -  // correctly.  This change is defered to the SystemZExpandPseudo pass. -} -  // MI is an RR-style pseudo instruction that zero-extends the low Size bits  // of one GRX32 into another.  Replace it with LowOpcode if both operands  // are low registers, otherwise use RISB[LH]G. @@ -302,8 +211,8 @@ void SystemZInstrInfo::expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode,  void SystemZInstrInfo::expandLoadStackGuard(MachineInstr *MI) const {    MachineBasicBlock *MBB = MI->getParent();    MachineFunction &MF = *MBB->getParent(); -  const unsigned Reg64 = MI->getOperand(0).getReg(); -  const unsigned Reg32 = RI.getSubReg(Reg64, SystemZ::subreg_l32); +  const Register Reg64 = MI->getOperand(0).getReg(); +  const Register Reg32 = RI.getSubReg(Reg64, SystemZ::subreg_l32);    // EAR can only load the low subregister so us a shift for %a0 to produce    // the GR containing %a0 and %a1. @@ -341,8 +250,8 @@ SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB,                                  unsigned Size, bool KillSrc,                                  bool UndefSrc) const {    unsigned Opcode; -  bool DestIsHigh = isHighReg(DestReg); -  bool SrcIsHigh = isHighReg(SrcReg); +  bool DestIsHigh = SystemZ::isHighReg(DestReg); +  bool SrcIsHigh = SystemZ::isHighReg(SrcReg);    if (DestIsHigh && SrcIsHigh)      Opcode = SystemZ::RISBHH;    else if (DestIsHigh && !SrcIsHigh) @@ -468,7 +377,7 @@ bool SystemZInstrInfo::analyzeBranch(MachineBasicBlock &MBB,      // Can't handle indirect branches.      SystemZII::Branch Branch(getBranchInfo(*I)); -    if (!Branch.Target->isMBB()) +    if (!Branch.hasMBBTarget())        return true;      // Punt on compound branches. @@ -478,7 +387,7 @@ bool SystemZInstrInfo::analyzeBranch(MachineBasicBlock &MBB,      if (Branch.CCMask == SystemZ::CCMASK_ANY) {        // Handle unconditional branches.        if (!AllowModify) { -        TBB = Branch.Target->getMBB(); +        TBB = Branch.getMBBTarget();          continue;        } @@ -490,7 +399,7 @@ bool SystemZInstrInfo::analyzeBranch(MachineBasicBlock &MBB,        FBB = nullptr;        // Delete the JMP if it's equivalent to a fall-through. -      if (MBB.isLayoutSuccessor(Branch.Target->getMBB())) { +      if (MBB.isLayoutSuccessor(Branch.getMBBTarget())) {          TBB = nullptr;          I->eraseFromParent();          I = MBB.end(); @@ -498,7 +407,7 @@ bool SystemZInstrInfo::analyzeBranch(MachineBasicBlock &MBB,        }        // TBB is used to indicate the unconditinal destination. -      TBB = Branch.Target->getMBB(); +      TBB = Branch.getMBBTarget();        continue;      } @@ -506,7 +415,7 @@ bool SystemZInstrInfo::analyzeBranch(MachineBasicBlock &MBB,      if (Cond.empty()) {        // FIXME: add X86-style branch swap        FBB = TBB; -      TBB = Branch.Target->getMBB(); +      TBB = Branch.getMBBTarget();        Cond.push_back(MachineOperand::CreateImm(Branch.CCValid));        Cond.push_back(MachineOperand::CreateImm(Branch.CCMask));        continue; @@ -517,7 +426,7 @@ bool SystemZInstrInfo::analyzeBranch(MachineBasicBlock &MBB,      // Only handle the case where all conditional branches branch to the same      // destination. -    if (TBB != Branch.Target->getMBB()) +    if (TBB != Branch.getMBBTarget())        return true;      // If the conditions are the same, we can leave them alone. @@ -547,7 +456,7 @@ unsigned SystemZInstrInfo::removeBranch(MachineBasicBlock &MBB,        continue;      if (!I->isBranch())        break; -    if (!getBranchInfo(*I).Target->isMBB()) +    if (!getBranchInfo(*I).hasMBBTarget())        break;      // Remove the branch.      I->eraseFromParent(); @@ -676,8 +585,8 @@ void SystemZInstrInfo::insertSelect(MachineBasicBlock &MBB,      else {        Opc = SystemZ::LOCR;        MRI.constrainRegClass(DstReg, &SystemZ::GR32BitRegClass); -      unsigned TReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); -      unsigned FReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); +      Register TReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); +      Register FReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);        BuildMI(MBB, I, DL, get(TargetOpcode::COPY), TReg).addReg(TrueReg);        BuildMI(MBB, I, DL, get(TargetOpcode::COPY), FReg).addReg(FalseReg);        TrueReg = TReg; @@ -1258,13 +1167,14 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(        assert(NumOps == 3 && "Expected two source registers.");        Register DstReg = MI.getOperand(0).getReg();        Register DstPhys = -        (TRI->isVirtualRegister(DstReg) ? VRM->getPhys(DstReg) : DstReg); +          (Register::isVirtualRegister(DstReg) ? VRM->getPhys(DstReg) : DstReg);        Register SrcReg = (OpNum == 2 ? MI.getOperand(1).getReg()                                      : ((OpNum == 1 && MI.isCommutable())                                             ? MI.getOperand(2).getReg()                                           : Register()));        if (DstPhys && !SystemZ::GRH32BitRegClass.contains(DstPhys) && SrcReg && -          TRI->isVirtualRegister(SrcReg) && DstPhys == VRM->getPhys(SrcReg)) +          Register::isVirtualRegister(SrcReg) && +          DstPhys == VRM->getPhys(SrcReg))          NeedsCommute = (OpNum == 1);        else          MemOpcode = -1; @@ -1358,15 +1268,6 @@ bool SystemZInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {      expandLOCPseudo(MI, SystemZ::LOCHI, SystemZ::LOCHHI);      return true; -  case SystemZ::LOCRMux: -    expandLOCRPseudo(MI, SystemZ::LOCR, SystemZ::LOCFHR); -    return true; - -  case SystemZ::SELRMux: -    expandSELRPseudo(MI, SystemZ::SELR, SystemZ::SELFHR, -                         SystemZ::LOCRMux); -    return true; -    case SystemZ::STCMux:      expandRXYPseudo(MI, SystemZ::STC, SystemZ::STCH);      return true; @@ -1468,8 +1369,8 @@ bool SystemZInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {      return true;    case SystemZ::RISBMux: { -    bool DestIsHigh = isHighReg(MI.getOperand(0).getReg()); -    bool SrcIsHigh = isHighReg(MI.getOperand(2).getReg()); +    bool DestIsHigh = SystemZ::isHighReg(MI.getOperand(0).getReg()); +    bool SrcIsHigh = SystemZ::isHighReg(MI.getOperand(2).getReg());      if (SrcIsHigh == DestIsHigh)        MI.setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL));      else { @@ -1545,6 +1446,10 @@ SystemZInstrInfo::getBranchInfo(const MachineInstr &MI) const {      return SystemZII::Branch(SystemZII::BranchCLG, SystemZ::CCMASK_ICMP,                               MI.getOperand(2).getImm(), &MI.getOperand(3)); +  case SystemZ::INLINEASM_BR: +    // Don't try to analyze asm goto, so pass nullptr as branch target argument. +    return SystemZII::Branch(SystemZII::AsmGoto, 0, 0, nullptr); +    default:      llvm_unreachable("Unrecognized branch opcode");    } @@ -1845,8 +1750,7 @@ void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB,  bool SystemZInstrInfo::  areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, -                                const MachineInstr &MIb, -                                AliasAnalysis *AA) const { +                                const MachineInstr &MIb) const {    if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand())      return false; diff --git a/lib/Target/SystemZ/SystemZInstrInfo.h b/lib/Target/SystemZ/SystemZInstrInfo.h index 2edde175542e..6dc6e72aa52a 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.h +++ b/lib/Target/SystemZ/SystemZInstrInfo.h @@ -100,11 +100,18 @@ enum BranchType {    // An instruction that decrements a 64-bit register and branches if    // the result is nonzero. -  BranchCTG +  BranchCTG, + +  // An instruction representing an asm goto statement. +  AsmGoto  };  // Information about a branch instruction. -struct Branch { +class Branch { +  // The target of the branch. In case of INLINEASM_BR, this is nullptr. +  const MachineOperand *Target; + +public:    // The type of the branch.    BranchType Type; @@ -114,12 +121,15 @@ struct Branch {    // CCMASK_<N> is set if the branch should be taken when CC == N.    unsigned CCMask; -  // The target of the branch. -  const MachineOperand *Target; -    Branch(BranchType type, unsigned ccValid, unsigned ccMask,           const MachineOperand *target) -    : Type(type), CCValid(ccValid), CCMask(ccMask), Target(target) {} +    : Target(target), Type(type), CCValid(ccValid), CCMask(ccMask) {} + +  bool isIndirect() { return Target != nullptr && Target->isReg(); } +  bool hasMBBTarget() { return Target != nullptr && Target->isMBB(); } +  MachineBasicBlock *getMBBTarget() { +    return hasMBBTarget() ? Target->getMBB() : nullptr; +  }  };  // Kinds of fused compares in compare-and-* instructions.  Together with type @@ -160,10 +170,6 @@ class SystemZInstrInfo : public SystemZGenInstrInfo {                         unsigned HighOpcode) const;    void expandLOCPseudo(MachineInstr &MI, unsigned LowOpcode,                         unsigned HighOpcode) const; -  void expandLOCRPseudo(MachineInstr &MI, unsigned LowOpcode, -                        unsigned HighOpcode) const; -  void expandSELRPseudo(MachineInstr &MI, unsigned LowOpcode, -                        unsigned HighOpcode, unsigned MixedOpcode) const;    void expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode,                          unsigned Size) const;    void expandLoadStackGuard(MachineInstr *MI) const; @@ -322,8 +328,7 @@ public:    // memory addresses and false otherwise.    bool    areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, -                                  const MachineInstr &MIb, -                                  AliasAnalysis *AA = nullptr) const override; +                                  const MachineInstr &MIb) const override;  };  } // end namespace llvm diff --git a/lib/Target/SystemZ/SystemZInstrInfo.td b/lib/Target/SystemZ/SystemZInstrInfo.td index 91856893e3bd..8b334756611a 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.td +++ b/lib/Target/SystemZ/SystemZInstrInfo.td @@ -337,15 +337,15 @@ defm CondStore8Mux  : CondStores<GRX32, nonvolatile_truncstorei8,  defm CondStore16Mux : CondStores<GRX32, nonvolatile_truncstorei16,                                   nonvolatile_anyextloadi16, bdxaddr20only>,                        Requires<[FeatureHighWord]>; -defm CondStore32Mux : CondStores<GRX32, nonvolatile_store, -                                 nonvolatile_load, bdxaddr20only>, +defm CondStore32Mux : CondStores<GRX32, simple_store, +                                 simple_load, bdxaddr20only>,                        Requires<[FeatureLoadStoreOnCond2]>;  defm CondStore8     : CondStores<GR32, nonvolatile_truncstorei8,                                   nonvolatile_anyextloadi8, bdxaddr20only>;  defm CondStore16    : CondStores<GR32, nonvolatile_truncstorei16,                                   nonvolatile_anyextloadi16, bdxaddr20only>; -defm CondStore32    : CondStores<GR32, nonvolatile_store, -                                 nonvolatile_load, bdxaddr20only>; +defm CondStore32    : CondStores<GR32, simple_store, +                                 simple_load, bdxaddr20only>;  defm : CondStores64<CondStore8, CondStore8Inv, nonvolatile_truncstorei8,                      nonvolatile_anyextloadi8, bdxaddr20only>; @@ -353,8 +353,8 @@ defm : CondStores64<CondStore16, CondStore16Inv, nonvolatile_truncstorei16,                      nonvolatile_anyextloadi16, bdxaddr20only>;  defm : CondStores64<CondStore32, CondStore32Inv, nonvolatile_truncstorei32,                      nonvolatile_anyextloadi32, bdxaddr20only>; -defm CondStore64 : CondStores<GR64, nonvolatile_store, -                              nonvolatile_load, bdxaddr20only>; +defm CondStore64 : CondStores<GR64, simple_store, +                              simple_load, bdxaddr20only>;  //===----------------------------------------------------------------------===//  // Move instructions @@ -531,8 +531,8 @@ let Predicates = [FeatureLoadStoreOnCond2], Uses = [CC] in {    // Load on condition.  Matched via DAG pattern.    // Expands to LOC or LOCFH, depending on the choice of register. -  def LOCMux : CondUnaryRSYPseudo<nonvolatile_load, GRX32, 4>; -  defm LOCFH : CondUnaryRSYPair<"locfh", 0xEBE0, nonvolatile_load, GRH32, 4>; +  def LOCMux : CondUnaryRSYPseudo<simple_load, GRX32, 4>; +  defm LOCFH : CondUnaryRSYPair<"locfh", 0xEBE0, simple_load, GRH32, 4>;    // Store on condition.  Expanded from CondStore* pseudos.    // Expands to STOC or STOCFH, depending on the choice of register. @@ -563,8 +563,8 @@ let Predicates = [FeatureLoadStoreOnCond], Uses = [CC] in {    }    // Load on condition.  Matched via DAG pattern. -  defm LOC  : CondUnaryRSYPair<"loc",  0xEBF2, nonvolatile_load, GR32, 4>; -  defm LOCG : CondUnaryRSYPair<"locg", 0xEBE2, nonvolatile_load, GR64, 8>; +  defm LOC  : CondUnaryRSYPair<"loc",  0xEBF2, simple_load, GR32, 4>; +  defm LOCG : CondUnaryRSYPair<"locg", 0xEBE2, simple_load, GR64, 8>;    // Store on condition.  Expanded from CondStore* pseudos.    defm STOC  : CondStoreRSYPair<"stoc",  0xEBF3, GR32, 4>; @@ -2082,7 +2082,7 @@ let Predicates = [FeatureProcessorAssist] in {  // cleared.  We only use the first result here.  let Defs = [CC] in    def FLOGR : UnaryRRE<"flogr", 0xB983, null_frag, GR128, GR64>; -def : Pat<(ctlz GR64:$src), +def : Pat<(i64 (ctlz GR64:$src)),            (EXTRACT_SUBREG (FLOGR GR64:$src), subreg_h64)>;  // Population count.  Counts bits set per byte or doubleword. diff --git a/lib/Target/SystemZ/SystemZInstrVector.td b/lib/Target/SystemZ/SystemZInstrVector.td index 261727f89058..02364bbda5c1 100644 --- a/lib/Target/SystemZ/SystemZInstrVector.td +++ b/lib/Target/SystemZ/SystemZInstrVector.td @@ -60,7 +60,7 @@ let Predicates = [FeatureVector] in {      // Generate byte mask.      def VZERO : InherentVRIa<"vzero", 0xE744, 0>;      def VONE  : InherentVRIa<"vone", 0xE744, 0xffff>; -    def VGBM  : UnaryVRIa<"vgbm", 0xE744, z_byte_mask, v128b, imm32zx16>; +    def VGBM  : UnaryVRIa<"vgbm", 0xE744, z_byte_mask, v128b, imm32zx16_timm>;      // Generate mask.      def VGM  : BinaryVRIbGeneric<"vgm", 0xE746>; @@ -71,10 +71,10 @@ let Predicates = [FeatureVector] in {      // Replicate immediate.      def VREPI  : UnaryVRIaGeneric<"vrepi", 0xE745, imm32sx16>; -    def VREPIB : UnaryVRIa<"vrepib", 0xE745, z_replicate, v128b, imm32sx16, 0>; -    def VREPIH : UnaryVRIa<"vrepih", 0xE745, z_replicate, v128h, imm32sx16, 1>; -    def VREPIF : UnaryVRIa<"vrepif", 0xE745, z_replicate, v128f, imm32sx16, 2>; -    def VREPIG : UnaryVRIa<"vrepig", 0xE745, z_replicate, v128g, imm32sx16, 3>; +    def VREPIB : UnaryVRIa<"vrepib", 0xE745, z_replicate, v128b, imm32sx16_timm, 0>; +    def VREPIH : UnaryVRIa<"vrepih", 0xE745, z_replicate, v128h, imm32sx16_timm, 1>; +    def VREPIF : UnaryVRIa<"vrepif", 0xE745, z_replicate, v128f, imm32sx16_timm, 2>; +    def VREPIG : UnaryVRIa<"vrepig", 0xE745, z_replicate, v128g, imm32sx16_timm, 3>;    }    // Load element immediate. @@ -116,7 +116,7 @@ let Predicates = [FeatureVector] in {                                 (ins bdxaddr12only:$XBD2, imm32zx4:$M3),                         "lcbb\t$R1, $XBD2, $M3",                         [(set GR32:$R1, (int_s390_lcbb bdxaddr12only:$XBD2, -                                                      imm32zx4:$M3))]>; +                                                      imm32zx4_timm:$M3))]>;    // Load with length.  The number of loaded bytes is only known at run time.    def VLL : BinaryVRSb<"vll", 0xE737, int_s390_vll, 0>; @@ -362,9 +362,9 @@ let Predicates = [FeatureVector] in {    def VREPH : BinaryVRIc<"vreph", 0xE74D, z_splat, v128h, v128h, 1>;    def VREPF : BinaryVRIc<"vrepf", 0xE74D, z_splat, v128f, v128f, 2>;    def VREPG : BinaryVRIc<"vrepg", 0xE74D, z_splat, v128g, v128g, 3>; -  def : Pat<(v4f32 (z_splat VR128:$vec, imm32zx16:$index)), +  def : Pat<(v4f32 (z_splat VR128:$vec, imm32zx16_timm:$index)),              (VREPF VR128:$vec, imm32zx16:$index)>; -  def : Pat<(v2f64 (z_splat VR128:$vec, imm32zx16:$index)), +  def : Pat<(v2f64 (z_splat VR128:$vec, imm32zx16_timm:$index)),              (VREPG VR128:$vec, imm32zx16:$index)>;    // Select. @@ -778,7 +778,7 @@ let Predicates = [FeatureVector] in {    // Shift left double by byte.    def VSLDB : TernaryVRId<"vsldb", 0xE777, z_shl_double, v128b, v128b, 0>; -  def : Pat<(int_s390_vsldb VR128:$x, VR128:$y, imm32zx8:$z), +  def : Pat<(int_s390_vsldb VR128:$x, VR128:$y, imm32zx8_timm:$z),              (VSLDB VR128:$x, VR128:$y, imm32zx8:$z)>;    // Shift left double by bit. @@ -1069,7 +1069,7 @@ let Predicates = [FeatureVector] in {      def WCGDB : TernaryVRRa<"wcgdb", 0xE7C2, null_frag, v64g, v64db, 3, 8>;    }    // Rounding mode should agree with SystemZInstrFP.td. -  def : FPConversion<VCGDB, fp_to_sint, v128g, v128db, 0, 5>; +  def : FPConversion<VCGDB, any_fp_to_sint, v128g, v128db, 0, 5>;    let Predicates = [FeatureVectorEnhancements2] in {      let Uses = [FPC], mayRaiseFPException = 1 in {        let isAsmParserOnly = 1 in @@ -1078,7 +1078,7 @@ let Predicates = [FeatureVector] in {        def WCFEB : TernaryVRRa<"wcfeb", 0xE7C2, null_frag, v32sb, v32f, 2, 8>;      }      // Rounding mode should agree with SystemZInstrFP.td. -    def : FPConversion<VCFEB, fp_to_sint, v128f, v128sb, 0, 5>; +    def : FPConversion<VCFEB, any_fp_to_sint, v128f, v128sb, 0, 5>;    }    // Convert to logical. @@ -1088,7 +1088,7 @@ let Predicates = [FeatureVector] in {      def WCLGDB : TernaryVRRa<"wclgdb", 0xE7C0, null_frag, v64g, v64db, 3, 8>;    }    // Rounding mode should agree with SystemZInstrFP.td. -  def : FPConversion<VCLGDB, fp_to_uint, v128g, v128db, 0, 5>; +  def : FPConversion<VCLGDB, any_fp_to_uint, v128g, v128db, 0, 5>;    let Predicates = [FeatureVectorEnhancements2] in {      let Uses = [FPC], mayRaiseFPException = 1 in {        let isAsmParserOnly = 1 in @@ -1097,7 +1097,7 @@ let Predicates = [FeatureVector] in {        def WCLFEB : TernaryVRRa<"wclfeb", 0xE7C0, null_frag, v32sb, v32f, 2, 8>;      }      // Rounding mode should agree with SystemZInstrFP.td. -    def : FPConversion<VCLFEB, fp_to_uint, v128f, v128sb, 0, 5>; +    def : FPConversion<VCLFEB, any_fp_to_uint, v128f, v128sb, 0, 5>;    }    // Divide. diff --git a/lib/Target/SystemZ/SystemZLongBranch.cpp b/lib/Target/SystemZ/SystemZLongBranch.cpp index 95d7e22dec32..724111229569 100644 --- a/lib/Target/SystemZ/SystemZLongBranch.cpp +++ b/lib/Target/SystemZ/SystemZLongBranch.cpp @@ -85,9 +85,9 @@ struct MBBInfo {    // This value never changes.    uint64_t Size = 0; -  // The minimum alignment of the block, as a log2 value. +  // The minimum alignment of the block.    // This value never changes. -  unsigned Alignment = 0; +  Align Alignment;    // The number of terminators in this block.  This value never changes.    unsigned NumTerminators = 0; @@ -127,7 +127,8 @@ struct BlockPosition {    // as the runtime address.    unsigned KnownBits; -  BlockPosition(unsigned InitialAlignment) : KnownBits(InitialAlignment) {} +  BlockPosition(unsigned InitialLogAlignment) +      : KnownBits(InitialLogAlignment) {}  };  class SystemZLongBranch : public MachineFunctionPass { @@ -178,17 +179,16 @@ const uint64_t MaxForwardRange = 0xfffe;  // instructions.  void SystemZLongBranch::skipNonTerminators(BlockPosition &Position,                                             MBBInfo &Block) { -  if (Block.Alignment > Position.KnownBits) { +  if (Log2(Block.Alignment) > Position.KnownBits) {      // When calculating the address of Block, we need to conservatively      // assume that Block had the worst possible misalignment. -    Position.Address += ((uint64_t(1) << Block.Alignment) - -                         (uint64_t(1) << Position.KnownBits)); -    Position.KnownBits = Block.Alignment; +    Position.Address += +        (Block.Alignment.value() - (uint64_t(1) << Position.KnownBits)); +    Position.KnownBits = Log2(Block.Alignment);    }    // Align the addresses. -  uint64_t AlignMask = (uint64_t(1) << Block.Alignment) - 1; -  Position.Address = (Position.Address + AlignMask) & ~AlignMask; +  Position.Address = alignTo(Position.Address, Block.Alignment);    // Record the block's position.    Block.Address = Position.Address; @@ -257,7 +257,7 @@ TerminatorInfo SystemZLongBranch::describeTerminator(MachineInstr &MI) {      }      Terminator.Branch = &MI;      Terminator.TargetBlock = -      TII->getBranchInfo(MI).Target->getMBB()->getNumber(); +      TII->getBranchInfo(MI).getMBBTarget()->getNumber();    }    return Terminator;  } @@ -275,7 +275,7 @@ uint64_t SystemZLongBranch::initMBBInfo() {    Terminators.clear();    Terminators.reserve(NumBlocks); -  BlockPosition Position(MF->getAlignment()); +  BlockPosition Position(Log2(MF->getAlignment()));    for (unsigned I = 0; I < NumBlocks; ++I) {      MachineBasicBlock *MBB = MF->getBlockNumbered(I);      MBBInfo &Block = MBBs[I]; @@ -339,7 +339,7 @@ bool SystemZLongBranch::mustRelaxABranch() {  // must be long.  void SystemZLongBranch::setWorstCaseAddresses() {    SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin(); -  BlockPosition Position(MF->getAlignment()); +  BlockPosition Position(Log2(MF->getAlignment()));    for (auto &Block : MBBs) {      skipNonTerminators(Position, Block);      for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) { @@ -440,7 +440,7 @@ void SystemZLongBranch::relaxBranch(TerminatorInfo &Terminator) {  // Run a shortening pass and relax any branches that need to be relaxed.  void SystemZLongBranch::relaxBranches() {    SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin(); -  BlockPosition Position(MF->getAlignment()); +  BlockPosition Position(Log2(MF->getAlignment()));    for (auto &Block : MBBs) {      skipNonTerminators(Position, Block);      for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) { diff --git a/lib/Target/SystemZ/SystemZMachineScheduler.cpp b/lib/Target/SystemZ/SystemZMachineScheduler.cpp index 0becfaa1d49c..3fc25034dded 100644 --- a/lib/Target/SystemZ/SystemZMachineScheduler.cpp +++ b/lib/Target/SystemZ/SystemZMachineScheduler.cpp @@ -15,6 +15,7 @@  //===----------------------------------------------------------------------===//  #include "SystemZMachineScheduler.h" +#include "llvm/CodeGen/MachineLoopInfo.h"  using namespace llvm; @@ -108,8 +109,8 @@ void SystemZPostRASchedStrategy::enterMBB(MachineBasicBlock *NextMBB) {         I != SinglePredMBB->end(); I++) {      LLVM_DEBUG(dbgs() << "** Emitting incoming branch: "; I->dump(););      bool TakenBranch = (I->isBranch() && -      (TII->getBranchInfo(*I).Target->isReg() || // Relative branch -       TII->getBranchInfo(*I).Target->getMBB() == MBB)); +                        (TII->getBranchInfo(*I).isIndirect() || +                         TII->getBranchInfo(*I).getMBBTarget() == MBB));      HazardRec->emitInstruction(&*I, TakenBranch);      if (TakenBranch)        break; diff --git a/lib/Target/SystemZ/SystemZOperands.td b/lib/Target/SystemZ/SystemZOperands.td index 56632e1529a2..b2bab68a6274 100644 --- a/lib/Target/SystemZ/SystemZOperands.td +++ b/lib/Target/SystemZ/SystemZOperands.td @@ -21,15 +21,32 @@ class ImmediateTLSAsmOperand<string name>    let RenderMethod = "addImmTLSOperands";  } +class ImmediateOp<ValueType vt, string asmop> : Operand<vt> { +  let PrintMethod = "print"##asmop##"Operand"; +  let DecoderMethod = "decode"##asmop##"Operand"; +  let ParserMatchClass = !cast<AsmOperandClass>(asmop); +} + +class ImmOpWithPattern<ValueType vt, string asmop, code pred, SDNodeXForm xform, +      SDNode ImmNode = imm> : +  ImmediateOp<vt, asmop>, PatLeaf<(vt ImmNode), pred, xform>; + +// class ImmediatePatLeaf<ValueType vt, code pred, +//       SDNodeXForm xform, SDNode ImmNode> +//   : PatLeaf<(vt ImmNode), pred, xform>; + +  // Constructs both a DAG pattern and instruction operand for an immediate  // of type VT.  PRED returns true if a node is acceptable and XFORM returns  // the operand value associated with the node.  ASMOP is the name of the  // associated asm operand, and also forms the basis of the asm print method. -class Immediate<ValueType vt, code pred, SDNodeXForm xform, string asmop> -  : PatLeaf<(vt imm), pred, xform>, Operand<vt> { -  let PrintMethod = "print"##asmop##"Operand"; -  let DecoderMethod = "decode"##asmop##"Operand"; -  let ParserMatchClass = !cast<AsmOperandClass>(asmop); +multiclass Immediate<ValueType vt, code pred, SDNodeXForm xform, string asmop> { +  // def "" : ImmediateOp<vt, asmop>, +  //          PatLeaf<(vt imm), pred, xform>; +  def "" : ImmOpWithPattern<vt, asmop, pred, xform>; + +//  def _timm : PatLeaf<(vt timm), pred, xform>; +  def _timm : ImmOpWithPattern<vt, asmop, pred, xform, timm>;  }  // Constructs an asm operand for a PC-relative address.  SIZE says how @@ -295,87 +312,87 @@ def U48Imm : ImmediateAsmOperand<"U48Imm">;  // Immediates for the lower and upper 16 bits of an i32, with the other  // bits of the i32 being zero. -def imm32ll16 : Immediate<i32, [{ +defm imm32ll16 : Immediate<i32, [{    return SystemZ::isImmLL(N->getZExtValue());  }], LL16, "U16Imm">; -def imm32lh16 : Immediate<i32, [{ +defm imm32lh16 : Immediate<i32, [{    return SystemZ::isImmLH(N->getZExtValue());  }], LH16, "U16Imm">;  // Immediates for the lower and upper 16 bits of an i32, with the other  // bits of the i32 being one. -def imm32ll16c : Immediate<i32, [{ +defm imm32ll16c : Immediate<i32, [{    return SystemZ::isImmLL(uint32_t(~N->getZExtValue()));  }], LL16, "U16Imm">; -def imm32lh16c : Immediate<i32, [{ +defm imm32lh16c : Immediate<i32, [{    return SystemZ::isImmLH(uint32_t(~N->getZExtValue()));  }], LH16, "U16Imm">;  // Short immediates -def imm32zx1 : Immediate<i32, [{ +defm imm32zx1 : Immediate<i32, [{    return isUInt<1>(N->getZExtValue());  }], NOOP_SDNodeXForm, "U1Imm">; -def imm32zx2 : Immediate<i32, [{ +defm imm32zx2 : Immediate<i32, [{    return isUInt<2>(N->getZExtValue());  }], NOOP_SDNodeXForm, "U2Imm">; -def imm32zx3 : Immediate<i32, [{ +defm imm32zx3 : Immediate<i32, [{    return isUInt<3>(N->getZExtValue());  }], NOOP_SDNodeXForm, "U3Imm">; -def imm32zx4 : Immediate<i32, [{ +defm imm32zx4 : Immediate<i32, [{    return isUInt<4>(N->getZExtValue());  }], NOOP_SDNodeXForm, "U4Imm">;  // Note: this enforces an even value during code generation only.  // When used from the assembler, any 4-bit value is allowed. -def imm32zx4even : Immediate<i32, [{ +defm imm32zx4even : Immediate<i32, [{    return isUInt<4>(N->getZExtValue());  }], UIMM8EVEN, "U4Imm">; -def imm32zx6 : Immediate<i32, [{ +defm imm32zx6 : Immediate<i32, [{    return isUInt<6>(N->getZExtValue());  }], NOOP_SDNodeXForm, "U6Imm">; -def imm32sx8 : Immediate<i32, [{ +defm imm32sx8 : Immediate<i32, [{    return isInt<8>(N->getSExtValue());  }], SIMM8, "S8Imm">; -def imm32zx8 : Immediate<i32, [{ +defm imm32zx8 : Immediate<i32, [{    return isUInt<8>(N->getZExtValue());  }], UIMM8, "U8Imm">; -def imm32zx8trunc : Immediate<i32, [{}], UIMM8, "U8Imm">; +defm imm32zx8trunc : Immediate<i32, [{}], UIMM8, "U8Imm">; -def imm32zx12 : Immediate<i32, [{ +defm imm32zx12 : Immediate<i32, [{    return isUInt<12>(N->getZExtValue());  }], UIMM12, "U12Imm">; -def imm32sx16 : Immediate<i32, [{ +defm imm32sx16 : Immediate<i32, [{    return isInt<16>(N->getSExtValue());  }], SIMM16, "S16Imm">; -def imm32sx16n : Immediate<i32, [{ +defm imm32sx16n : Immediate<i32, [{    return isInt<16>(-N->getSExtValue());  }], NEGSIMM16, "S16Imm">; -def imm32zx16 : Immediate<i32, [{ +defm imm32zx16 : Immediate<i32, [{    return isUInt<16>(N->getZExtValue());  }], UIMM16, "U16Imm">; -def imm32sx16trunc : Immediate<i32, [{}], SIMM16, "S16Imm">; -def imm32zx16trunc : Immediate<i32, [{}], UIMM16, "U16Imm">; +defm imm32sx16trunc : Immediate<i32, [{}], SIMM16, "S16Imm">; +defm imm32zx16trunc : Immediate<i32, [{}], UIMM16, "U16Imm">;  // Full 32-bit immediates.  we need both signed and unsigned versions  // because the assembler is picky.  E.g. AFI requires signed operands  // while NILF requires unsigned ones. -def simm32 : Immediate<i32, [{}], SIMM32, "S32Imm">; -def uimm32 : Immediate<i32, [{}], UIMM32, "U32Imm">; +defm simm32 : Immediate<i32, [{}], SIMM32, "S32Imm">; +defm uimm32 : Immediate<i32, [{}], UIMM32, "U32Imm">; -def simm32n : Immediate<i32, [{ +defm simm32n : Immediate<i32, [{    return isInt<32>(-N->getSExtValue());  }], NEGSIMM32, "S32Imm">; @@ -387,107 +404,107 @@ def imm32 : ImmLeaf<i32, [{}]>;  // Immediates for 16-bit chunks of an i64, with the other bits of the  // i32 being zero. -def imm64ll16 : Immediate<i64, [{ +defm imm64ll16 : Immediate<i64, [{    return SystemZ::isImmLL(N->getZExtValue());  }], LL16, "U16Imm">; -def imm64lh16 : Immediate<i64, [{ +defm imm64lh16 : Immediate<i64, [{    return SystemZ::isImmLH(N->getZExtValue());  }], LH16, "U16Imm">; -def imm64hl16 : Immediate<i64, [{ +defm imm64hl16 : Immediate<i64, [{    return SystemZ::isImmHL(N->getZExtValue());  }], HL16, "U16Imm">; -def imm64hh16 : Immediate<i64, [{ +defm imm64hh16 : Immediate<i64, [{    return SystemZ::isImmHH(N->getZExtValue());  }], HH16, "U16Imm">;  // Immediates for 16-bit chunks of an i64, with the other bits of the  // i32 being one. -def imm64ll16c : Immediate<i64, [{ +defm imm64ll16c : Immediate<i64, [{    return SystemZ::isImmLL(uint64_t(~N->getZExtValue()));  }], LL16, "U16Imm">; -def imm64lh16c : Immediate<i64, [{ +defm imm64lh16c : Immediate<i64, [{    return SystemZ::isImmLH(uint64_t(~N->getZExtValue()));  }], LH16, "U16Imm">; -def imm64hl16c : Immediate<i64, [{ +defm imm64hl16c : Immediate<i64, [{    return SystemZ::isImmHL(uint64_t(~N->getZExtValue()));  }], HL16, "U16Imm">; -def imm64hh16c : Immediate<i64, [{ +defm imm64hh16c : Immediate<i64, [{    return SystemZ::isImmHH(uint64_t(~N->getZExtValue()));  }], HH16, "U16Imm">;  // Immediates for the lower and upper 32 bits of an i64, with the other  // bits of the i32 being zero. -def imm64lf32 : Immediate<i64, [{ +defm imm64lf32 : Immediate<i64, [{    return SystemZ::isImmLF(N->getZExtValue());  }], LF32, "U32Imm">; -def imm64hf32 : Immediate<i64, [{ +defm imm64hf32 : Immediate<i64, [{    return SystemZ::isImmHF(N->getZExtValue());  }], HF32, "U32Imm">;  // Immediates for the lower and upper 32 bits of an i64, with the other  // bits of the i32 being one. -def imm64lf32c : Immediate<i64, [{ +defm imm64lf32c : Immediate<i64, [{    return SystemZ::isImmLF(uint64_t(~N->getZExtValue()));  }], LF32, "U32Imm">; -def imm64hf32c : Immediate<i64, [{ +defm imm64hf32c : Immediate<i64, [{    return SystemZ::isImmHF(uint64_t(~N->getZExtValue()));  }], HF32, "U32Imm">;  // Negated immediates that fit LF32 or LH16. -def imm64lh16n : Immediate<i64, [{ +defm imm64lh16n : Immediate<i64, [{    return SystemZ::isImmLH(uint64_t(-N->getZExtValue()));  }], NEGLH16, "U16Imm">; -def imm64lf32n : Immediate<i64, [{ +defm imm64lf32n : Immediate<i64, [{    return SystemZ::isImmLF(uint64_t(-N->getZExtValue()));  }], NEGLF32, "U32Imm">;  // Short immediates. -def imm64sx8 : Immediate<i64, [{ +defm imm64sx8 : Immediate<i64, [{    return isInt<8>(N->getSExtValue());  }], SIMM8, "S8Imm">; -def imm64zx8 : Immediate<i64, [{ +defm imm64zx8 : Immediate<i64, [{    return isUInt<8>(N->getSExtValue());  }], UIMM8, "U8Imm">; -def imm64sx16 : Immediate<i64, [{ +defm imm64sx16 : Immediate<i64, [{    return isInt<16>(N->getSExtValue());  }], SIMM16, "S16Imm">; -def imm64sx16n : Immediate<i64, [{ +defm imm64sx16n : Immediate<i64, [{    return isInt<16>(-N->getSExtValue());  }], NEGSIMM16, "S16Imm">; -def imm64zx16 : Immediate<i64, [{ +defm imm64zx16 : Immediate<i64, [{    return isUInt<16>(N->getZExtValue());  }], UIMM16, "U16Imm">; -def imm64sx32 : Immediate<i64, [{ +defm imm64sx32 : Immediate<i64, [{    return isInt<32>(N->getSExtValue());  }], SIMM32, "S32Imm">; -def imm64sx32n : Immediate<i64, [{ +defm imm64sx32n : Immediate<i64, [{    return isInt<32>(-N->getSExtValue());  }], NEGSIMM32, "S32Imm">; -def imm64zx32 : Immediate<i64, [{ +defm imm64zx32 : Immediate<i64, [{    return isUInt<32>(N->getZExtValue());  }], UIMM32, "U32Imm">; -def imm64zx32n : Immediate<i64, [{ +defm imm64zx32n : Immediate<i64, [{    return isUInt<32>(-N->getSExtValue());  }], NEGUIMM32, "U32Imm">; -def imm64zx48 : Immediate<i64, [{ +defm imm64zx48 : Immediate<i64, [{    return isUInt<64>(N->getZExtValue());  }], UIMM48, "U48Imm">; @@ -637,7 +654,7 @@ def bdvaddr12only     : BDVMode<            "64", "12">;  //===----------------------------------------------------------------------===//  // A 4-bit condition-code mask. -def cond4 : PatLeaf<(i32 imm), [{ return (N->getZExtValue() < 16); }]>, +def cond4 : PatLeaf<(i32 timm), [{ return (N->getZExtValue() < 16); }]>,              Operand<i32> {    let PrintMethod = "printCond4Operand";  } diff --git a/lib/Target/SystemZ/SystemZOperators.td b/lib/Target/SystemZ/SystemZOperators.td index 15bd12bc98a4..6fe383e64b74 100644 --- a/lib/Target/SystemZ/SystemZOperators.td +++ b/lib/Target/SystemZ/SystemZOperators.td @@ -472,17 +472,17 @@ def z_subcarry : PatFrag<(ops node:$lhs, node:$rhs),                                (z_subcarry_1 node:$lhs, node:$rhs, CC)>;  // Signed and unsigned comparisons. -def z_scmp : PatFrag<(ops node:$a, node:$b), (z_icmp node:$a, node:$b, imm), [{ +def z_scmp : PatFrag<(ops node:$a, node:$b), (z_icmp node:$a, node:$b, timm), [{    unsigned Type = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();    return Type != SystemZICMP::UnsignedOnly;  }]>; -def z_ucmp : PatFrag<(ops node:$a, node:$b), (z_icmp node:$a, node:$b, imm), [{ +def z_ucmp : PatFrag<(ops node:$a, node:$b), (z_icmp node:$a, node:$b, timm), [{    unsigned Type = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();    return Type != SystemZICMP::SignedOnly;  }]>;  // Register- and memory-based TEST UNDER MASK. -def z_tm_reg : PatFrag<(ops node:$a, node:$b), (z_tm node:$a, node:$b, imm)>; +def z_tm_reg : PatFrag<(ops node:$a, node:$b), (z_tm node:$a, node:$b, timm)>;  def z_tm_mem : PatFrag<(ops node:$a, node:$b), (z_tm node:$a, node:$b, 0)>;  // Register sign-extend operations.  Sub-32-bit values are represented as i32s. diff --git a/lib/Target/SystemZ/SystemZPatterns.td b/lib/Target/SystemZ/SystemZPatterns.td index beaf4de285a3..65300fb47627 100644 --- a/lib/Target/SystemZ/SystemZPatterns.td +++ b/lib/Target/SystemZ/SystemZPatterns.td @@ -100,12 +100,12 @@ multiclass CondStores64<Instruction insn, Instruction insninv,                          SDPatternOperator store, SDPatternOperator load,                          AddressingMode mode> {    def : Pat<(store (z_select_ccmask GR64:$new, (load mode:$addr), -                                    imm32zx4:$valid, imm32zx4:$cc), +                                    imm32zx4_timm:$valid, imm32zx4_timm:$cc),                     mode:$addr),              (insn (EXTRACT_SUBREG GR64:$new, subreg_l32), mode:$addr,                    imm32zx4:$valid, imm32zx4:$cc)>;    def : Pat<(store (z_select_ccmask (load mode:$addr), GR64:$new, -                                    imm32zx4:$valid, imm32zx4:$cc), +                                    imm32zx4_timm:$valid, imm32zx4_timm:$cc),                     mode:$addr),              (insninv (EXTRACT_SUBREG GR64:$new, subreg_l32), mode:$addr,                       imm32zx4:$valid, imm32zx4:$cc)>; diff --git a/lib/Target/SystemZ/SystemZPostRewrite.cpp b/lib/Target/SystemZ/SystemZPostRewrite.cpp index 8e4060eac74c..aaa7f8fc88f5 100644 --- a/lib/Target/SystemZ/SystemZPostRewrite.cpp +++ b/lib/Target/SystemZ/SystemZPostRewrite.cpp @@ -25,6 +25,7 @@ using namespace llvm;  #define DEBUG_TYPE "systemz-postrewrite"  STATISTIC(MemFoldCopies, "Number of copies inserted before folded mem ops."); +STATISTIC(LOCRMuxJumps, "Number of LOCRMux jump-sequences (lower is better)");  namespace llvm {    void initializeSystemZPostRewritePass(PassRegistry&); @@ -45,12 +46,20 @@ public:    StringRef getPassName() const override { return SYSTEMZ_POSTREWRITE_NAME; } -  void getAnalysisUsage(AnalysisUsage &AU) const override { -    AU.setPreservesAll(); -    MachineFunctionPass::getAnalysisUsage(AU); -  } -  private: +  void selectLOCRMux(MachineBasicBlock &MBB, +                     MachineBasicBlock::iterator MBBI, +                     MachineBasicBlock::iterator &NextMBBI, +                     unsigned LowOpcode, +                     unsigned HighOpcode); +  void selectSELRMux(MachineBasicBlock &MBB, +                     MachineBasicBlock::iterator MBBI, +                     MachineBasicBlock::iterator &NextMBBI, +                     unsigned LowOpcode, +                     unsigned HighOpcode); +  bool expandCondMove(MachineBasicBlock &MBB, +                      MachineBasicBlock::iterator MBBI, +                      MachineBasicBlock::iterator &NextMBBI);    bool selectMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,                  MachineBasicBlock::iterator &NextMBBI);    bool selectMBB(MachineBasicBlock &MBB); @@ -68,11 +77,141 @@ FunctionPass *llvm::createSystemZPostRewritePass(SystemZTargetMachine &TM) {    return new SystemZPostRewrite();  } +// MI is a load-register-on-condition pseudo instruction.  Replace it with +// LowOpcode if source and destination are both low GR32s and HighOpcode if +// source and destination are both high GR32s. Otherwise, a branch sequence +// is created. +void SystemZPostRewrite::selectLOCRMux(MachineBasicBlock &MBB, +                                       MachineBasicBlock::iterator MBBI, +                                       MachineBasicBlock::iterator &NextMBBI, +                                       unsigned LowOpcode, +                                       unsigned HighOpcode) { +  Register DestReg = MBBI->getOperand(0).getReg(); +  Register SrcReg = MBBI->getOperand(2).getReg(); +  bool DestIsHigh = SystemZ::isHighReg(DestReg); +  bool SrcIsHigh = SystemZ::isHighReg(SrcReg); + +  if (!DestIsHigh && !SrcIsHigh) +    MBBI->setDesc(TII->get(LowOpcode)); +  else if (DestIsHigh && SrcIsHigh) +    MBBI->setDesc(TII->get(HighOpcode)); +  else +    expandCondMove(MBB, MBBI, NextMBBI); +} + +// MI is a select pseudo instruction.  Replace it with LowOpcode if source +// and destination are all low GR32s and HighOpcode if source and destination +// are all high GR32s. Otherwise, a branch sequence is created. +void SystemZPostRewrite::selectSELRMux(MachineBasicBlock &MBB, +                                       MachineBasicBlock::iterator MBBI, +                                       MachineBasicBlock::iterator &NextMBBI, +                                       unsigned LowOpcode, +                                       unsigned HighOpcode) { +  Register DestReg = MBBI->getOperand(0).getReg(); +  Register Src1Reg = MBBI->getOperand(1).getReg(); +  Register Src2Reg = MBBI->getOperand(2).getReg(); +  bool DestIsHigh = SystemZ::isHighReg(DestReg); +  bool Src1IsHigh = SystemZ::isHighReg(Src1Reg); +  bool Src2IsHigh = SystemZ::isHighReg(Src2Reg); + +  // If sources and destination aren't all high or all low, we may be able to +  // simplify the operation by moving one of the sources to the destination +  // first.  But only if this doesn't clobber the other source. +  if (DestReg != Src1Reg && DestReg != Src2Reg) { +    if (DestIsHigh != Src1IsHigh) { +      BuildMI(*MBBI->getParent(), MBBI, MBBI->getDebugLoc(), +              TII->get(SystemZ::COPY), DestReg) +        .addReg(MBBI->getOperand(1).getReg(), getRegState(MBBI->getOperand(1))); +      MBBI->getOperand(1).setReg(DestReg); +      Src1Reg = DestReg; +      Src1IsHigh = DestIsHigh; +    } else if (DestIsHigh != Src2IsHigh) { +      BuildMI(*MBBI->getParent(), MBBI, MBBI->getDebugLoc(), +              TII->get(SystemZ::COPY), DestReg) +        .addReg(MBBI->getOperand(2).getReg(), getRegState(MBBI->getOperand(2))); +      MBBI->getOperand(2).setReg(DestReg); +      Src2Reg = DestReg; +      Src2IsHigh = DestIsHigh; +    } +  } + +  // If the destination (now) matches one source, prefer this to be first. +  if (DestReg != Src1Reg && DestReg == Src2Reg) { +    TII->commuteInstruction(*MBBI, false, 1, 2); +    std::swap(Src1Reg, Src2Reg); +    std::swap(Src1IsHigh, Src2IsHigh); +  } + +  if (!DestIsHigh && !Src1IsHigh && !Src2IsHigh) +    MBBI->setDesc(TII->get(LowOpcode)); +  else if (DestIsHigh && Src1IsHigh && Src2IsHigh) +    MBBI->setDesc(TII->get(HighOpcode)); +  else +    // Given the simplification above, we must already have a two-operand case. +    expandCondMove(MBB, MBBI, NextMBBI); +} + +// Replace MBBI by a branch sequence that performs a conditional move of +// operand 2 to the destination register. Operand 1 is expected to be the +// same register as the destination. +bool SystemZPostRewrite::expandCondMove(MachineBasicBlock &MBB, +                                        MachineBasicBlock::iterator MBBI, +                                        MachineBasicBlock::iterator &NextMBBI) { +  MachineFunction &MF = *MBB.getParent(); +  const BasicBlock *BB = MBB.getBasicBlock(); +  MachineInstr &MI = *MBBI; +  DebugLoc DL = MI.getDebugLoc(); +  Register DestReg = MI.getOperand(0).getReg(); +  Register SrcReg = MI.getOperand(2).getReg(); +  unsigned CCValid = MI.getOperand(3).getImm(); +  unsigned CCMask = MI.getOperand(4).getImm(); +  assert(DestReg == MI.getOperand(1).getReg() && +         "Expected destination and first source operand to be the same."); + +  LivePhysRegs LiveRegs(TII->getRegisterInfo()); +  LiveRegs.addLiveOuts(MBB); +  for (auto I = std::prev(MBB.end()); I != MBBI; --I) +    LiveRegs.stepBackward(*I); + +  // Splice MBB at MI, moving the rest of the block into RestMBB. +  MachineBasicBlock *RestMBB = MF.CreateMachineBasicBlock(BB); +  MF.insert(std::next(MachineFunction::iterator(MBB)), RestMBB); +  RestMBB->splice(RestMBB->begin(), &MBB, MI, MBB.end()); +  RestMBB->transferSuccessors(&MBB); +  for (auto I = LiveRegs.begin(); I != LiveRegs.end(); ++I) +    RestMBB->addLiveIn(*I); + +  // Create a new block MoveMBB to hold the move instruction. +  MachineBasicBlock *MoveMBB = MF.CreateMachineBasicBlock(BB); +  MF.insert(std::next(MachineFunction::iterator(MBB)), MoveMBB); +  MoveMBB->addLiveIn(SrcReg); +  for (auto I = LiveRegs.begin(); I != LiveRegs.end(); ++I) +    MoveMBB->addLiveIn(*I); + +  // At the end of MBB, create a conditional branch to RestMBB if the +  // condition is false, otherwise fall through to MoveMBB. +  BuildMI(&MBB, DL, TII->get(SystemZ::BRC)) +    .addImm(CCValid).addImm(CCMask ^ CCValid).addMBB(RestMBB); +  MBB.addSuccessor(RestMBB); +  MBB.addSuccessor(MoveMBB); + +  // In MoveMBB, emit an instruction to move SrcReg into DestReg, +  // then fall through to RestMBB. +  BuildMI(*MoveMBB, MoveMBB->end(), DL, TII->get(SystemZ::COPY), DestReg) +      .addReg(MI.getOperand(2).getReg(), getRegState(MI.getOperand(2))); +  MoveMBB->addSuccessor(RestMBB); + +  NextMBBI = MBB.end(); +  MI.eraseFromParent(); +  LOCRMuxJumps++; +  return true; +} +  /// If MBBI references a pseudo instruction that should be selected here,  /// do it and return true.  Otherwise return false.  bool SystemZPostRewrite::selectMI(MachineBasicBlock &MBB, -                                MachineBasicBlock::iterator MBBI, -                                MachineBasicBlock::iterator &NextMBBI) { +                                  MachineBasicBlock::iterator MBBI, +                                  MachineBasicBlock::iterator &NextMBBI) {    MachineInstr &MI = *MBBI;    unsigned Opcode = MI.getOpcode(); @@ -83,7 +222,7 @@ bool SystemZPostRewrite::selectMI(MachineBasicBlock &MBB,    if (TargetMemOpcode != -1) {      MI.setDesc(TII->get(TargetMemOpcode));      MI.tieOperands(0, 1); -    unsigned DstReg = MI.getOperand(0).getReg(); +    Register DstReg = MI.getOperand(0).getReg();      MachineOperand &SrcMO = MI.getOperand(1);      if (DstReg != SrcMO.getReg()) {        BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(SystemZ::COPY), DstReg) @@ -94,6 +233,15 @@ bool SystemZPostRewrite::selectMI(MachineBasicBlock &MBB,      return true;    } +  switch (Opcode) { +  case SystemZ::LOCRMux: +    selectLOCRMux(MBB, MBBI, NextMBBI, SystemZ::LOCR, SystemZ::LOCFHR); +    return true; +  case SystemZ::SELRMux: +    selectSELRMux(MBB, MBBI, NextMBBI, SystemZ::SELR, SystemZ::SELFHR); +    return true; +  } +    return false;  } diff --git a/lib/Target/SystemZ/SystemZProcessors.td b/lib/Target/SystemZ/SystemZProcessors.td index b27c25beb58c..af33a0300552 100644 --- a/lib/Target/SystemZ/SystemZProcessors.td +++ b/lib/Target/SystemZ/SystemZProcessors.td @@ -35,5 +35,6 @@ def : ProcessorModel<"z13", Z13Model, Arch11SupportedFeatures.List>;  def : ProcessorModel<"arch12", Z14Model, Arch12SupportedFeatures.List>;  def : ProcessorModel<"z14", Z14Model, Arch12SupportedFeatures.List>; -def : ProcessorModel<"arch13", Arch13Model, Arch13SupportedFeatures.List>; +def : ProcessorModel<"arch13", Z15Model, Arch13SupportedFeatures.List>; +def : ProcessorModel<"z15", Z15Model, Arch13SupportedFeatures.List>; diff --git a/lib/Target/SystemZ/SystemZRegisterInfo.cpp b/lib/Target/SystemZ/SystemZRegisterInfo.cpp index e7cd6871dbb4..39ace5594b7f 100644 --- a/lib/Target/SystemZ/SystemZRegisterInfo.cpp +++ b/lib/Target/SystemZ/SystemZRegisterInfo.cpp @@ -41,7 +41,7 @@ static const TargetRegisterClass *getRC32(MachineOperand &MO,      return &SystemZ::GRH32BitRegClass;    if (VRM && VRM->hasPhys(MO.getReg())) { -    unsigned PhysReg = VRM->getPhys(MO.getReg()); +    Register PhysReg = VRM->getPhys(MO.getReg());      if (SystemZ::GR32BitRegClass.contains(PhysReg))        return &SystemZ::GR32BitRegClass;      assert (SystemZ::GRH32BitRegClass.contains(PhysReg) && @@ -120,8 +120,8 @@ SystemZRegisterInfo::getRegAllocationHints(unsigned VirtReg,            }            // Add the other operand of the LOCRMux to the worklist. -          unsigned OtherReg = -            (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg()); +          Register OtherReg = +              (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg());            if (MRI->getRegClass(OtherReg) == &SystemZ::GRX32BitRegClass)              Worklist.push_back(OtherReg);          } // end LOCRMux @@ -169,7 +169,8 @@ SystemZRegisterInfo::getRegAllocationHints(unsigned VirtReg,        auto tryAddHint = [&](const MachineOperand *MO) -> void {          Register Reg = MO->getReg(); -        Register PhysReg = isPhysicalRegister(Reg) ? Reg : VRM->getPhys(Reg); +        Register PhysReg = +            Register::isPhysicalRegister(Reg) ? Reg : VRM->getPhys(Reg);          if (PhysReg) {            if (MO->getSubReg())              PhysReg = getSubReg(PhysReg, MO->getSubReg()); @@ -297,8 +298,8 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,        assert(Mask && "One offset must be OK");      } while (!OpcodeForOffset); -    unsigned ScratchReg = -      MF.getRegInfo().createVirtualRegister(&SystemZ::ADDR64BitRegClass); +    Register ScratchReg = +        MF.getRegInfo().createVirtualRegister(&SystemZ::ADDR64BitRegClass);      int64_t HighOffset = OldOffset - Offset;      if (MI->getDesc().TSFlags & SystemZII::HasIndex @@ -351,8 +352,8 @@ bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI,    // regalloc may run out of registers.    unsigned WideOpNo = (getRegSizeInBits(*SrcRC) == 128 ? 1 : 0); -  unsigned GR128Reg = MI->getOperand(WideOpNo).getReg(); -  unsigned GRNarReg = MI->getOperand((WideOpNo == 1) ? 0 : 1).getReg(); +  Register GR128Reg = MI->getOperand(WideOpNo).getReg(); +  Register GRNarReg = MI->getOperand((WideOpNo == 1) ? 0 : 1).getReg();    LiveInterval &IntGR128 = LIS.getInterval(GR128Reg);    LiveInterval &IntGRNar = LIS.getInterval(GRNarReg); @@ -385,7 +386,7 @@ bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI,    MEE++;    for (; MII != MEE; ++MII) {      for (const MachineOperand &MO : MII->operands()) -      if (MO.isReg() && isPhysicalRegister(MO.getReg())) { +      if (MO.isReg() && Register::isPhysicalRegister(MO.getReg())) {          for (MCSuperRegIterator SI(MO.getReg(), this, true/*IncludeSelf*/);               SI.isValid(); ++SI)            if (NewRC->contains(*SI)) { diff --git a/lib/Target/SystemZ/SystemZRegisterInfo.h b/lib/Target/SystemZ/SystemZRegisterInfo.h index 4f721ec23e53..7044efef1ac6 100644 --- a/lib/Target/SystemZ/SystemZRegisterInfo.h +++ b/lib/Target/SystemZ/SystemZRegisterInfo.h @@ -28,6 +28,15 @@ inline unsigned even128(bool Is32bit) {  inline unsigned odd128(bool Is32bit) {    return Is32bit ? subreg_l32 : subreg_l64;  } + +// Reg should be a 32-bit GPR.  Return true if it is a high register rather +// than a low register. +inline bool isHighReg(unsigned int Reg) { +  if (SystemZ::GRH32BitRegClass.contains(Reg)) +    return true; +  assert(SystemZ::GR32BitRegClass.contains(Reg) && "Invalid GRX32"); +  return false; +}  } // end namespace SystemZ  struct SystemZRegisterInfo : public SystemZGenRegisterInfo { diff --git a/lib/Target/SystemZ/SystemZSchedule.td b/lib/Target/SystemZ/SystemZSchedule.td index 98eca2802242..119e3ee7c22c 100644 --- a/lib/Target/SystemZ/SystemZSchedule.td +++ b/lib/Target/SystemZ/SystemZSchedule.td @@ -59,7 +59,7 @@ def VBU : SchedWrite; // Virtual branching unit  def MCD : SchedWrite; // Millicode -include "SystemZScheduleArch13.td" +include "SystemZScheduleZ15.td"  include "SystemZScheduleZ14.td"  include "SystemZScheduleZ13.td"  include "SystemZScheduleZEC12.td" diff --git a/lib/Target/SystemZ/SystemZScheduleArch13.td b/lib/Target/SystemZ/SystemZScheduleZ15.td index 9f82f24d0e8f..56ceb88f35d4 100644 --- a/lib/Target/SystemZ/SystemZScheduleArch13.td +++ b/lib/Target/SystemZ/SystemZScheduleZ15.td @@ -1,4 +1,4 @@ -//-- SystemZScheduleArch13.td - SystemZ Scheduling Definitions ----*- tblgen -*-=// +//-- SystemZScheduleZ15.td - SystemZ Scheduling Definitions ----*- tblgen -*-=//  //  // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.  // See https://llvm.org/LICENSE.txt for license information. @@ -6,14 +6,14 @@  //  //===----------------------------------------------------------------------===//  // -// This file defines the machine model for Arch13 to support instruction +// This file defines the machine model for Z15 to support instruction  // scheduling and other instruction cost heuristics.  //  // Pseudos expanded right after isel do not need to be modelled here.  //  //===----------------------------------------------------------------------===// -def Arch13Model : SchedMachineModel { +def Z15Model : SchedMachineModel {      let UnsupportedFeatures = Arch13UnsupportedFeatures.List; @@ -27,7 +27,7 @@ def Arch13Model : SchedMachineModel {      let MispredictPenalty = 20;  } -let SchedModel = Arch13Model in  { +let SchedModel = Z15Model in  {  // These definitions need the SchedModel value. They could be put in a  // subtarget common include file, but it seems the include system in Tablegen  // currently (2016) rejects multiple includes of same file. @@ -73,43 +73,43 @@ let NumMicroOps = 0 in {  }  // Execution units. -def Arch13_FXaUnit     : ProcResource<2>; -def Arch13_FXbUnit     : ProcResource<2>; -def Arch13_LSUnit      : ProcResource<2>; -def Arch13_VecUnit     : ProcResource<2>; -def Arch13_VecFPdUnit  : ProcResource<2> { let BufferSize = 1; /* blocking */ } -def Arch13_VBUnit      : ProcResource<2>; -def Arch13_MCD         : ProcResource<1>; +def Z15_FXaUnit     : ProcResource<2>; +def Z15_FXbUnit     : ProcResource<2>; +def Z15_LSUnit      : ProcResource<2>; +def Z15_VecUnit     : ProcResource<2>; +def Z15_VecFPdUnit  : ProcResource<2> { let BufferSize = 1; /* blocking */ } +def Z15_VBUnit      : ProcResource<2>; +def Z15_MCD         : ProcResource<1>;  // Subtarget specific definitions of scheduling resources.  let NumMicroOps = 0 in { -  def : WriteRes<FXa, [Arch13_FXaUnit]>; -  def : WriteRes<FXb, [Arch13_FXbUnit]>; -  def : WriteRes<LSU, [Arch13_LSUnit]>; -  def : WriteRes<VecBF,  [Arch13_VecUnit]>; -  def : WriteRes<VecDF,  [Arch13_VecUnit]>; -  def : WriteRes<VecDFX, [Arch13_VecUnit]>; -  def : WriteRes<VecMul,  [Arch13_VecUnit]>; -  def : WriteRes<VecStr,  [Arch13_VecUnit]>; -  def : WriteRes<VecXsPm, [Arch13_VecUnit]>; +  def : WriteRes<FXa, [Z15_FXaUnit]>; +  def : WriteRes<FXb, [Z15_FXbUnit]>; +  def : WriteRes<LSU, [Z15_LSUnit]>; +  def : WriteRes<VecBF,  [Z15_VecUnit]>; +  def : WriteRes<VecDF,  [Z15_VecUnit]>; +  def : WriteRes<VecDFX, [Z15_VecUnit]>; +  def : WriteRes<VecMul,  [Z15_VecUnit]>; +  def : WriteRes<VecStr,  [Z15_VecUnit]>; +  def : WriteRes<VecXsPm, [Z15_VecUnit]>;    foreach Num = 2-5 in { let ResourceCycles = [Num] in { -    def : WriteRes<!cast<SchedWrite>("FXa"#Num), [Arch13_FXaUnit]>; -    def : WriteRes<!cast<SchedWrite>("FXb"#Num), [Arch13_FXbUnit]>; -    def : WriteRes<!cast<SchedWrite>("LSU"#Num), [Arch13_LSUnit]>; -    def : WriteRes<!cast<SchedWrite>("VecBF"#Num), [Arch13_VecUnit]>; -    def : WriteRes<!cast<SchedWrite>("VecDF"#Num), [Arch13_VecUnit]>; -    def : WriteRes<!cast<SchedWrite>("VecDFX"#Num), [Arch13_VecUnit]>; -    def : WriteRes<!cast<SchedWrite>("VecMul"#Num), [Arch13_VecUnit]>; -    def : WriteRes<!cast<SchedWrite>("VecStr"#Num), [Arch13_VecUnit]>; -    def : WriteRes<!cast<SchedWrite>("VecXsPm"#Num), [Arch13_VecUnit]>; +    def : WriteRes<!cast<SchedWrite>("FXa"#Num), [Z15_FXaUnit]>; +    def : WriteRes<!cast<SchedWrite>("FXb"#Num), [Z15_FXbUnit]>; +    def : WriteRes<!cast<SchedWrite>("LSU"#Num), [Z15_LSUnit]>; +    def : WriteRes<!cast<SchedWrite>("VecBF"#Num), [Z15_VecUnit]>; +    def : WriteRes<!cast<SchedWrite>("VecDF"#Num), [Z15_VecUnit]>; +    def : WriteRes<!cast<SchedWrite>("VecDFX"#Num), [Z15_VecUnit]>; +    def : WriteRes<!cast<SchedWrite>("VecMul"#Num), [Z15_VecUnit]>; +    def : WriteRes<!cast<SchedWrite>("VecStr"#Num), [Z15_VecUnit]>; +    def : WriteRes<!cast<SchedWrite>("VecXsPm"#Num), [Z15_VecUnit]>;    }} -  def : WriteRes<VecFPd,  [Arch13_VecFPdUnit]> { let ResourceCycles = [30]; } +  def : WriteRes<VecFPd,  [Z15_VecFPdUnit]> { let ResourceCycles = [30]; } -  def : WriteRes<VBU,     [Arch13_VBUnit]>; // Virtual Branching Unit +  def : WriteRes<VBU,     [Z15_VBUnit]>; // Virtual Branching Unit  } -def : WriteRes<MCD, [Arch13_MCD]> { let NumMicroOps = 3; +def : WriteRes<MCD, [Z15_MCD]> { let NumMicroOps = 3;                                      let BeginGroup  = 1;                                      let EndGroup    = 1; } diff --git a/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp b/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp index a50e6aa59711..47c925dcf730 100644 --- a/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp +++ b/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp @@ -209,10 +209,10 @@ std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::EmitTargetCodeForMemchr(    // Now select between End and null, depending on whether the character    // was found. -  SDValue Ops[] = {End, DAG.getConstant(0, DL, PtrVT), -                   DAG.getConstant(SystemZ::CCMASK_SRST, DL, MVT::i32), -                   DAG.getConstant(SystemZ::CCMASK_SRST_FOUND, DL, MVT::i32), -                   CCReg}; +  SDValue Ops[] = { +      End, DAG.getConstant(0, DL, PtrVT), +      DAG.getTargetConstant(SystemZ::CCMASK_SRST, DL, MVT::i32), +      DAG.getTargetConstant(SystemZ::CCMASK_SRST_FOUND, DL, MVT::i32), CCReg};    End = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, PtrVT, Ops);    return std::make_pair(End, Chain);  } diff --git a/lib/Target/SystemZ/SystemZShortenInst.cpp b/lib/Target/SystemZ/SystemZShortenInst.cpp index e79dfc5b4b9e..2aca22c9082a 100644 --- a/lib/Target/SystemZ/SystemZShortenInst.cpp +++ b/lib/Target/SystemZ/SystemZShortenInst.cpp @@ -75,7 +75,7 @@ static void tieOpsIfNeeded(MachineInstr &MI) {  // instead of IIxF.  bool SystemZShortenInst::shortenIIF(MachineInstr &MI, unsigned LLIxL,                                      unsigned LLIxH) { -  unsigned Reg = MI.getOperand(0).getReg(); +  Register Reg = MI.getOperand(0).getReg();    // The new opcode will clear the other half of the GR64 reg, so    // cancel if that is live.    unsigned thisSubRegIdx = @@ -86,7 +86,7 @@ bool SystemZShortenInst::shortenIIF(MachineInstr &MI, unsigned LLIxL,                                              : SystemZ::subreg_l32);    unsigned GR64BitReg =        TRI->getMatchingSuperReg(Reg, thisSubRegIdx, &SystemZ::GR64BitRegClass); -  unsigned OtherReg = TRI->getSubReg(GR64BitReg, otherSubRegIdx); +  Register OtherReg = TRI->getSubReg(GR64BitReg, otherSubRegIdx);    if (LiveRegs.contains(OtherReg))      return false; diff --git a/lib/Target/SystemZ/SystemZTargetMachine.cpp b/lib/Target/SystemZ/SystemZTargetMachine.cpp index 5c49e6eff0bf..20865037fe38 100644 --- a/lib/Target/SystemZ/SystemZTargetMachine.cpp +++ b/lib/Target/SystemZ/SystemZTargetMachine.cpp @@ -154,7 +154,7 @@ SystemZTargetMachine::SystemZTargetMachine(const Target &T, const Triple &TT,            getEffectiveRelocModel(RM),            getEffectiveSystemZCodeModel(CM, getEffectiveRelocModel(RM), JIT),            OL), -      TLOF(llvm::make_unique<TargetLoweringObjectFileELF>()), +      TLOF(std::make_unique<TargetLoweringObjectFileELF>()),        Subtarget(TT, CPU, FS, *this) {    initAsmInfo();  } @@ -176,7 +176,7 @@ public:    ScheduleDAGInstrs *    createPostMachineScheduler(MachineSchedContext *C) const override {      return new ScheduleDAGMI(C, -                             llvm::make_unique<SystemZPostRASchedStrategy>(C), +                             std::make_unique<SystemZPostRASchedStrategy>(C),                               /*RemoveKillFlags=*/true);    } @@ -184,6 +184,7 @@ public:    bool addInstSelector() override;    bool addILPOpts() override;    void addPostRewrite() override; +  void addPostRegAlloc() override;    void addPreSched2() override;    void addPreEmitPass() override;  }; @@ -217,14 +218,14 @@ void SystemZPassConfig::addPostRewrite() {    addPass(createSystemZPostRewritePass(getSystemZTargetMachine()));  } -void SystemZPassConfig::addPreSched2() { +void SystemZPassConfig::addPostRegAlloc() {    // PostRewrite needs to be run at -O0 also (in which case addPostRewrite()    // is not called).    if (getOptLevel() == CodeGenOpt::None)      addPass(createSystemZPostRewritePass(getSystemZTargetMachine())); +} -  addPass(createSystemZExpandPseudoPass(getSystemZTargetMachine())); - +void SystemZPassConfig::addPreSched2() {    if (getOptLevel() != CodeGenOpt::None)      addPass(&IfConverterID);  } diff --git a/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp b/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp index 145cf87ef9f5..11c99aa11174 100644 --- a/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp +++ b/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp @@ -304,7 +304,8 @@ bool SystemZTTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,               C2.ScaleCost, C2.SetupCost);  } -unsigned SystemZTTIImpl::getNumberOfRegisters(bool Vector) { +unsigned SystemZTTIImpl::getNumberOfRegisters(unsigned ClassID) const { +  bool Vector = (ClassID == 1);    if (!Vector)      // Discount the stack pointer.  Also leave out %r0, since it can't      // be used in an address. @@ -707,7 +708,7 @@ int SystemZTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,        // TODO: Fix base implementation which could simplify things a bit here        // (seems to miss on differentiating on scalar/vector types). -      // Only 64 bit vector conversions are natively supported before arch13. +      // Only 64 bit vector conversions are natively supported before z15.        if (DstScalarBits == 64 || ST->hasVectorEnhancements2()) {          if (SrcScalarBits == DstScalarBits)            return NumDstVectors; diff --git a/lib/Target/SystemZ/SystemZTargetTransformInfo.h b/lib/Target/SystemZ/SystemZTargetTransformInfo.h index 16ce2ef1d7a0..3ba80b31439f 100644 --- a/lib/Target/SystemZ/SystemZTargetTransformInfo.h +++ b/lib/Target/SystemZ/SystemZTargetTransformInfo.h @@ -56,12 +56,12 @@ public:    /// \name Vector TTI Implementations    /// @{ -  unsigned getNumberOfRegisters(bool Vector); +  unsigned getNumberOfRegisters(unsigned ClassID) const;    unsigned getRegisterBitWidth(bool Vector) const; -  unsigned getCacheLineSize() { return 256; } -  unsigned getPrefetchDistance() { return 2000; } -  unsigned getMinPrefetchStride() { return 2048; } +  unsigned getCacheLineSize() const override { return 256; } +  unsigned getPrefetchDistance() const override { return 2000; } +  unsigned getMinPrefetchStride() const override { return 2048; }    bool hasDivRemOp(Type *DataType, bool IsSigned);    bool prefersVectorizedAddressing() { return false; } | 
