aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/Mips
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2013-12-22 00:04:03 +0000
committerDimitry Andric <dim@FreeBSD.org>2013-12-22 00:04:03 +0000
commitf8af5cf600354830d4ccf59732403f0f073eccb9 (patch)
tree2ba0398b4c42ad4f55561327538044fd2c925a8b /lib/Target/Mips
parent59d6cff90eecf31cb3dd860c4e786674cfdd42eb (diff)
downloadsrc-f8af5cf600354830d4ccf59732403f0f073eccb9.tar.gz
src-f8af5cf600354830d4ccf59732403f0f073eccb9.zip
Vendor import of llvm release_34 branch r197841 (effectively, 3.4 RC3):vendor/llvm/llvm-release_34-r197841
Notes
Notes: svn path=/vendor/llvm/dist/; revision=259698 svn path=/vendor/llvm/llvm-release_34-r197841/; revision=259700; tag=vendor/llvm/llvm-release_34-r197841
Diffstat (limited to 'lib/Target/Mips')
-rw-r--r--lib/Target/Mips/AsmParser/MipsAsmParser.cpp1572
-rw-r--r--lib/Target/Mips/CMakeLists.txt4
-rw-r--r--lib/Target/Mips/Disassembler/MipsDisassembler.cpp446
-rw-r--r--lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp89
-rw-r--r--lib/Target/Mips/InstPrinter/MipsInstPrinter.h9
-rw-r--r--lib/Target/Mips/MCTargetDesc/CMakeLists.txt3
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp43
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.cpp81
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.h28
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp39
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp89
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h43
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h39
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp3
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h7
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp210
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp49
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h16
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp67
-rw-r--r--lib/Target/Mips/MSA.txt78
-rw-r--r--lib/Target/Mips/MicroMipsInstrFormats.td196
-rw-r--r--lib/Target/Mips/MicroMipsInstrInfo.td228
-rw-r--r--lib/Target/Mips/Mips.h1
-rw-r--r--lib/Target/Mips/Mips.td3
-rw-r--r--lib/Target/Mips/Mips16FrameLowering.cpp26
-rw-r--r--lib/Target/Mips/Mips16FrameLowering.h2
-rw-r--r--lib/Target/Mips/Mips16HardFloat.cpp517
-rw-r--r--lib/Target/Mips/Mips16HardFloat.h54
-rw-r--r--lib/Target/Mips/Mips16ISelDAGToDAG.cpp21
-rw-r--r--lib/Target/Mips/Mips16ISelDAGToDAG.h2
-rw-r--r--lib/Target/Mips/Mips16ISelLowering.cpp239
-rw-r--r--lib/Target/Mips/Mips16ISelLowering.h4
-rw-r--r--lib/Target/Mips/Mips16InstrFormats.td18
-rw-r--r--lib/Target/Mips/Mips16InstrInfo.cpp203
-rw-r--r--lib/Target/Mips/Mips16InstrInfo.h15
-rw-r--r--lib/Target/Mips/Mips16InstrInfo.td188
-rw-r--r--lib/Target/Mips/Mips16RegisterInfo.cpp17
-rw-r--r--lib/Target/Mips/Mips16RegisterInfo.h4
-rw-r--r--lib/Target/Mips/Mips64InstrInfo.td440
-rw-r--r--lib/Target/Mips/MipsAnalyzeImmediate.cpp2
-rw-r--r--lib/Target/Mips/MipsAnalyzeImmediate.h10
-rw-r--r--lib/Target/Mips/MipsAsmPrinter.cpp141
-rw-r--r--lib/Target/Mips/MipsAsmPrinter.h25
-rw-r--r--lib/Target/Mips/MipsCallingConv.td30
-rw-r--r--lib/Target/Mips/MipsCodeEmitter.cpp40
-rw-r--r--lib/Target/Mips/MipsCondMov.td210
-rw-r--r--lib/Target/Mips/MipsConstantIslandPass.cpp1470
-rw-r--r--lib/Target/Mips/MipsDSPInstrInfo.td468
-rw-r--r--lib/Target/Mips/MipsDelaySlotFiller.cpp38
-rw-r--r--lib/Target/Mips/MipsISelDAGToDAG.cpp81
-rw-r--r--lib/Target/Mips/MipsISelDAGToDAG.h38
-rw-r--r--lib/Target/Mips/MipsISelLowering.cpp824
-rw-r--r--lib/Target/Mips/MipsISelLowering.h184
-rw-r--r--lib/Target/Mips/MipsInstrFPU.td456
-rw-r--r--lib/Target/Mips/MipsInstrFormats.td143
-rw-r--r--lib/Target/Mips/MipsInstrInfo.cpp43
-rw-r--r--lib/Target/Mips/MipsInstrInfo.h16
-rw-r--r--lib/Target/Mips/MipsInstrInfo.td983
-rw-r--r--lib/Target/Mips/MipsJITInfo.cpp6
-rw-r--r--lib/Target/Mips/MipsLongBranch.cpp25
-rw-r--r--lib/Target/Mips/MipsMCInstLower.cpp5
-rw-r--r--lib/Target/Mips/MipsMCInstLower.h4
-rw-r--r--lib/Target/Mips/MipsMSAInstrFormats.td406
-rw-r--r--lib/Target/Mips/MipsMSAInstrInfo.td3694
-rw-r--r--lib/Target/Mips/MipsMachineFunction.cpp72
-rw-r--r--lib/Target/Mips/MipsMachineFunction.h103
-rw-r--r--lib/Target/Mips/MipsOs16.cpp45
-rw-r--r--lib/Target/Mips/MipsRegisterInfo.cpp79
-rw-r--r--lib/Target/Mips/MipsRegisterInfo.h8
-rw-r--r--lib/Target/Mips/MipsRegisterInfo.td411
-rw-r--r--lib/Target/Mips/MipsSEFrameLowering.cpp184
-rw-r--r--lib/Target/Mips/MipsSEFrameLowering.h2
-rw-r--r--lib/Target/Mips/MipsSEISelDAGToDAG.cpp443
-rw-r--r--lib/Target/Mips/MipsSEISelDAGToDAG.h48
-rw-r--r--lib/Target/Mips/MipsSEISelLowering.cpp2329
-rw-r--r--lib/Target/Mips/MipsSEISelLowering.h48
-rw-r--r--lib/Target/Mips/MipsSEInstrInfo.cpp310
-rw-r--r--lib/Target/Mips/MipsSEInstrInfo.h40
-rw-r--r--lib/Target/Mips/MipsSERegisterInfo.cpp85
-rw-r--r--lib/Target/Mips/MipsSERegisterInfo.h5
-rw-r--r--lib/Target/Mips/MipsSchedule.td15
-rw-r--r--lib/Target/Mips/MipsSubtarget.cpp34
-rw-r--r--lib/Target/Mips/MipsSubtarget.h31
-rw-r--r--lib/Target/Mips/MipsTargetMachine.cpp21
-rw-r--r--lib/Target/Mips/MipsTargetMachine.h6
-rw-r--r--lib/Target/Mips/MipsTargetStreamer.h44
86 files changed, 15457 insertions, 3341 deletions
diff --git a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index 0795cb963b35..cdae6c2f37e5 100644
--- a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -9,6 +9,7 @@
#include "MCTargetDesc/MipsMCTargetDesc.h"
#include "MipsRegisterInfo.h"
+#include "MipsTargetStreamer.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
@@ -20,26 +21,29 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCTargetAsmParser.h"
#include "llvm/Support/TargetRegistry.h"
+#include "llvm/ADT/APInt.h"
using namespace llvm;
+namespace llvm {
+class MCInstrInfo;
+}
+
namespace {
class MipsAssemblerOptions {
public:
- MipsAssemblerOptions():
- aTReg(1), reorder(true), macro(true) {
- }
+ MipsAssemblerOptions() : aTReg(1), reorder(true), macro(true) {}
- unsigned getATRegNum() {return aTReg;}
+ unsigned getATRegNum() { return aTReg; }
bool setATReg(unsigned Reg);
- bool isReorder() {return reorder;}
- void setReorder() {reorder = true;}
- void setNoreorder() {reorder = false;}
+ bool isReorder() { return reorder; }
+ void setReorder() { reorder = true; }
+ void setNoreorder() { reorder = false; }
- bool isMacro() {return macro;}
- void setMacro() {macro = true;}
- void setNomacro() {macro = false;}
+ bool isMacro() { return macro; }
+ void setMacro() { macro = true; }
+ void setNomacro() { macro = false; }
private:
unsigned aTReg;
@@ -51,23 +55,21 @@ private:
namespace {
class MipsAsmParser : public MCTargetAsmParser {
- enum FpFormatTy {
- FP_FORMAT_NONE = -1,
- FP_FORMAT_S,
- FP_FORMAT_D,
- FP_FORMAT_L,
- FP_FORMAT_W
- } FpFormat;
+ MipsTargetStreamer &getTargetStreamer() {
+ MCTargetStreamer &TS = Parser.getStreamer().getTargetStreamer();
+ return static_cast<MipsTargetStreamer &>(TS);
+ }
MCSubtargetInfo &STI;
MCAsmParser &Parser;
MipsAssemblerOptions Options;
+ bool hasConsumedDollar;
#define GET_ASSEMBLER_HEADER
#include "MipsGenAsmMatcher.inc"
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands,
MCStreamer &Out, unsigned &ErrorInfo,
bool MatchingInlineAsm);
@@ -75,40 +77,98 @@ class MipsAsmParser : public MCTargetAsmParser {
bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- bool parseMathOperation(StringRef Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands);
bool ParseDirective(AsmToken DirectiveID);
MipsAsmParser::OperandMatchResultTy
- parseMemOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ parseRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands, int RegKind);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseMSARegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands, int RegKind);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseMSACtrlRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
+ int RegKind);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseMemOperand(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ bool parsePtrReg(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
+ int RegKind);
+
+ MipsAsmParser::OperandMatchResultTy
+ parsePtrReg(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseGPR32(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseGPR64(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseHWRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseCCRRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseAFGR64Regs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseFGR64Regs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseFGR32Regs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseFGRH32Regs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
MipsAsmParser::OperandMatchResultTy
- parseCPURegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ parseFCCRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
MipsAsmParser::OperandMatchResultTy
- parseCPU64Regs(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ parseACC64DSP(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
MipsAsmParser::OperandMatchResultTy
- parseHWRegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ parseLO32DSP(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
MipsAsmParser::OperandMatchResultTy
- parseHW64Regs(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ parseHI32DSP(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
MipsAsmParser::OperandMatchResultTy
- parseCCRRegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ parseCOP2(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseMSA128BRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseMSA128HRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseMSA128WRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseMSA128DRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseMSA128CtrlRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseInvNum(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
+
+ MipsAsmParser::OperandMatchResultTy
+ parseLSAImm(SmallVectorImpl<MCParsedAsmOperand *> &Operands);
bool searchSymbolAlias(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- unsigned RegisterClass);
+ unsigned RegKind);
- bool ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &,
+ bool ParseOperand(SmallVectorImpl<MCParsedAsmOperand *> &,
StringRef Mnemonic);
int tryParseRegister(bool is64BitReg);
- bool tryParseRegisterOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ bool tryParseRegisterOperand(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
bool is64BitReg);
bool needsExpansion(MCInst &Inst);
@@ -122,17 +182,19 @@ class MipsAsmParser : public MCTargetAsmParser {
void expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
void expandMemInst(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions,
- bool isLoad,bool isImmOpnd);
+ SmallVectorImpl<MCInst> &Instructions, bool isLoad,
+ bool isImmOpnd);
bool reportParseError(StringRef ErrorMsg);
bool parseMemOffset(const MCExpr *&Res, bool isParenExpr);
bool parseRelocOperand(const MCExpr *&Res);
- const MCExpr* evaluateRelocExpr(const MCExpr *Expr, StringRef RelocStr);
+ const MCExpr *evaluateRelocExpr(const MCExpr *Expr, StringRef RelocStr);
bool isEvaluated(const MCExpr *Expr);
bool parseDirectiveSet();
+ bool parseDirectiveMipsHackStocg();
+ bool parseDirectiveMipsHackELFFlags();
bool parseSetAtDirective();
bool parseSetNoAtDirective();
@@ -144,6 +206,7 @@ class MipsAsmParser : public MCTargetAsmParser {
bool parseSetAssignment();
bool parseDirectiveWord(unsigned Size, SMLoc L);
+ bool parseDirectiveGpWord();
MCSymbolRefExpr::VariantKind getVariantKind(StringRef Symbol);
@@ -155,40 +218,49 @@ class MipsAsmParser : public MCTargetAsmParser {
return (STI.getFeatureBits() & Mips::FeatureFP64Bit) != 0;
}
+ bool isN64() const { return STI.getFeatureBits() & Mips::FeatureN64; }
+
int matchRegisterName(StringRef Symbol, bool is64BitReg);
int matchCPURegisterName(StringRef Symbol);
int matchRegisterByNumber(unsigned RegNum, unsigned RegClass);
- void setFpFormat(FpFormatTy Format) {
- FpFormat = Format;
- }
+ int matchFPURegisterName(StringRef Name);
- void setDefaultFpFormat();
+ int matchFCCRegisterName(StringRef Name);
- void setFpFormat(StringRef Format);
+ int matchACRegisterName(StringRef Name);
- FpFormatTy getFpFormat() {return FpFormat;}
+ int matchMSA128RegisterName(StringRef Name);
- bool requestsDoubleOperand(StringRef Mnemonic);
+ int matchMSA128CtrlRegisterName(StringRef Name);
+
+ int regKindToRegClass(int RegKind);
unsigned getReg(int RC, int RegNo);
int getATReg();
bool processInstruction(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
+ SmallVectorImpl<MCInst> &Instructions);
+
+ // Helper function that checks if the value of a vector index is within the
+ // boundaries of accepted values for each RegisterKind
+ // Example: INSERT.B $w0[n], $1 => 16 > n >= 0
+ bool validateMSAIndex(int Val, int RegKind);
+
public:
- MipsAsmParser(MCSubtargetInfo &sti, MCAsmParser &parser)
- : MCTargetAsmParser(), STI(sti), Parser(parser) {
+ MipsAsmParser(MCSubtargetInfo &sti, MCAsmParser &parser,
+ const MCInstrInfo &MII)
+ : MCTargetAsmParser(), STI(sti), Parser(parser),
+ hasConsumedDollar(false) {
// Initialize the set of available features.
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
}
MCAsmParser &getParser() const { return Parser; }
MCAsmLexer &getLexer() const { return Parser.getLexer(); }
-
};
}
@@ -201,14 +273,24 @@ class MipsOperand : public MCParsedAsmOperand {
public:
enum RegisterKind {
Kind_None,
- Kind_CPURegs,
- Kind_CPU64Regs,
+ Kind_GPR32,
+ Kind_GPR64,
Kind_HWRegs,
- Kind_HW64Regs,
Kind_FGR32Regs,
+ Kind_FGRH32Regs,
Kind_FGR64Regs,
Kind_AFGR64Regs,
- Kind_CCRRegs
+ Kind_CCRRegs,
+ Kind_FCCRegs,
+ Kind_ACC64DSP,
+ Kind_LO32DSP,
+ Kind_HI32DSP,
+ Kind_COP2,
+ Kind_MSA128BRegs,
+ Kind_MSA128HRegs,
+ Kind_MSA128WRegs,
+ Kind_MSA128DRegs,
+ Kind_MSA128CtrlRegs
};
private:
@@ -219,7 +301,9 @@ private:
k_Memory,
k_PostIndexRegister,
k_Register,
- k_Token
+ k_PtrReg,
+ k_Token,
+ k_LSAImm
} Kind;
MipsOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
@@ -258,7 +342,12 @@ public:
Inst.addOperand(MCOperand::CreateReg(getReg()));
}
- void addExpr(MCInst &Inst, const MCExpr *Expr) const{
+ void addPtrRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(getPtrReg()));
+ }
+
+ void addExpr(MCInst &Inst, const MCExpr *Expr) const {
// Add as immediate when possible. Null MCExpr = 0.
if (Expr == 0)
Inst.addOperand(MCOperand::CreateImm(0));
@@ -287,6 +376,9 @@ public:
bool isImm() const { return Kind == k_Immediate; }
bool isToken() const { return Kind == k_Token; }
bool isMem() const { return Kind == k_Memory; }
+ bool isPtrReg() const { return Kind == k_PtrReg; }
+ bool isInvNum() const { return Kind == k_Immediate; }
+ bool isLSAImm() const { return Kind == k_LSAImm; }
StringRef getToken() const {
assert(Kind == k_Token && "Invalid access!");
@@ -298,13 +390,18 @@ public:
return Reg.RegNum;
}
+ unsigned getPtrReg() const {
+ assert((Kind == k_PtrReg) && "Invalid access!");
+ return Reg.RegNum;
+ }
+
void setRegKind(RegisterKind RegKind) {
- assert((Kind == k_Register) && "Invalid access!");
+ assert((Kind == k_Register || Kind == k_PtrReg) && "Invalid access!");
Reg.Kind = RegKind;
}
const MCExpr *getImm() const {
- assert((Kind == k_Immediate) && "Invalid access!");
+ assert((Kind == k_Immediate || Kind == k_LSAImm) && "Invalid access!");
return Imm.Val;
}
@@ -335,6 +432,14 @@ public:
return Op;
}
+ static MipsOperand *CreatePtrReg(unsigned RegNum, SMLoc S, SMLoc E) {
+ MipsOperand *Op = new MipsOperand(k_PtrReg);
+ Op->Reg.RegNum = RegNum;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
static MipsOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
MipsOperand *Op = new MipsOperand(k_Immediate);
Op->Imm.Val = Val;
@@ -343,8 +448,16 @@ public:
return Op;
}
+ static MipsOperand *CreateLSAImm(const MCExpr *Val, SMLoc S, SMLoc E) {
+ MipsOperand *Op = new MipsOperand(k_LSAImm);
+ Op->Imm.Val = Val;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
static MipsOperand *CreateMem(unsigned Base, const MCExpr *Off,
- SMLoc S, SMLoc E) {
+ SMLoc S, SMLoc E) {
MipsOperand *Op = new MipsOperand(k_Memory);
Op->Mem.Base = Base;
Op->Mem.Off = Off;
@@ -353,59 +466,91 @@ public:
return Op;
}
- bool isCPURegsAsm() const {
- return Kind == k_Register && Reg.Kind == Kind_CPURegs;
+ bool isGPR32Asm() const {
+ return Kind == k_Register && Reg.Kind == Kind_GPR32;
}
- void addCPURegsAsmOperands(MCInst &Inst, unsigned N) const {
+ void addRegAsmOperands(MCInst &Inst, unsigned N) const {
Inst.addOperand(MCOperand::CreateReg(Reg.RegNum));
}
- bool isCPU64RegsAsm() const {
- return Kind == k_Register && Reg.Kind == Kind_CPU64Regs;
- }
- void addCPU64RegsAsmOperands(MCInst &Inst, unsigned N) const {
- Inst.addOperand(MCOperand::CreateReg(Reg.RegNum));
+ bool isGPR64Asm() const {
+ return Kind == k_Register && Reg.Kind == Kind_GPR64;
}
bool isHWRegsAsm() const {
assert((Kind == k_Register) && "Invalid access!");
return Reg.Kind == Kind_HWRegs;
}
- void addHWRegsAsmOperands(MCInst &Inst, unsigned N) const {
- Inst.addOperand(MCOperand::CreateReg(Reg.RegNum));
- }
- bool isHW64RegsAsm() const {
+ bool isCCRAsm() const {
assert((Kind == k_Register) && "Invalid access!");
- return Reg.Kind == Kind_HW64Regs;
+ return Reg.Kind == Kind_CCRRegs;
}
- void addHW64RegsAsmOperands(MCInst &Inst, unsigned N) const {
- Inst.addOperand(MCOperand::CreateReg(Reg.RegNum));
+
+ bool isAFGR64Asm() const {
+ return Kind == k_Register && Reg.Kind == Kind_AFGR64Regs;
}
- void addCCRAsmOperands(MCInst &Inst, unsigned N) const {
- Inst.addOperand(MCOperand::CreateReg(Reg.RegNum));
+ bool isFGR64Asm() const {
+ return Kind == k_Register && Reg.Kind == Kind_FGR64Regs;
}
- bool isCCRAsm() const {
- assert((Kind == k_Register) && "Invalid access!");
- return Reg.Kind == Kind_CCRRegs;
+ bool isFGR32Asm() const {
+ return (Kind == k_Register) && Reg.Kind == Kind_FGR32Regs;
}
- /// getStartLoc - Get the location of the first token of this operand.
- SMLoc getStartLoc() const {
- return StartLoc;
+ bool isFGRH32Asm() const {
+ return (Kind == k_Register) && Reg.Kind == Kind_FGRH32Regs;
}
- /// getEndLoc - Get the location of the last token of this operand.
- SMLoc getEndLoc() const {
- return EndLoc;
+
+ bool isFCCRegsAsm() const {
+ return (Kind == k_Register) && Reg.Kind == Kind_FCCRegs;
+ }
+
+ bool isACC64DSPAsm() const {
+ return Kind == k_Register && Reg.Kind == Kind_ACC64DSP;
+ }
+
+ bool isLO32DSPAsm() const {
+ return Kind == k_Register && Reg.Kind == Kind_LO32DSP;
+ }
+
+ bool isHI32DSPAsm() const {
+ return Kind == k_Register && Reg.Kind == Kind_HI32DSP;
+ }
+
+ bool isCOP2Asm() const { return Kind == k_Register && Reg.Kind == Kind_COP2; }
+
+ bool isMSA128BAsm() const {
+ return Kind == k_Register && Reg.Kind == Kind_MSA128BRegs;
+ }
+
+ bool isMSA128HAsm() const {
+ return Kind == k_Register && Reg.Kind == Kind_MSA128HRegs;
+ }
+
+ bool isMSA128WAsm() const {
+ return Kind == k_Register && Reg.Kind == Kind_MSA128WRegs;
+ }
+
+ bool isMSA128DAsm() const {
+ return Kind == k_Register && Reg.Kind == Kind_MSA128DRegs;
+ }
+
+ bool isMSA128CRAsm() const {
+ return Kind == k_Register && Reg.Kind == Kind_MSA128CtrlRegs;
}
+ /// getStartLoc - Get the location of the first token of this operand.
+ SMLoc getStartLoc() const { return StartLoc; }
+ /// getEndLoc - Get the location of the last token of this operand.
+ SMLoc getEndLoc() const { return EndLoc; }
+
virtual void print(raw_ostream &OS) const {
llvm_unreachable("unimplemented!");
}
}; // class MipsOperand
-} // namespace
+} // namespace
namespace llvm {
extern const MCInstrDesc MipsInsts[];
@@ -436,8 +581,8 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
// reference or immediate we may have to expand instructions.
for (unsigned i = 0; i < MCID.getNumOperands(); i++) {
const MCOperandInfo &OpInfo = MCID.OpInfo[i];
- if ((OpInfo.OperandType == MCOI::OPERAND_MEMORY)
- || (OpInfo.OperandType == MCOI::OPERAND_UNKNOWN)) {
+ if ((OpInfo.OperandType == MCOI::OPERAND_MEMORY) ||
+ (OpInfo.OperandType == MCOI::OPERAND_UNKNOWN)) {
MCOperand &Op = Inst.getOperand(i);
if (Op.isImm()) {
int MemOffset = Op.getImm();
@@ -450,7 +595,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
const MCExpr *Expr = Op.getExpr();
if (Expr->getKind() == MCExpr::SymbolRef) {
const MCSymbolRefExpr *SR =
- static_cast<const MCSymbolRefExpr*>(Expr);
+ static_cast<const MCSymbolRefExpr *>(Expr);
if (SR->getKind() == MCSymbolRefExpr::VK_None) {
// Expand symbol.
expandMemInst(Inst, IDLoc, Instructions, MCID.mayLoad(), false);
@@ -463,7 +608,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
}
}
} // for
- } // if load/store
+ } // if load/store
if (needsExpansion(Inst))
expandInstruction(Inst, IDLoc, Instructions);
@@ -486,7 +631,7 @@ bool MipsAsmParser::needsExpansion(MCInst &Inst) {
}
void MipsAsmParser::expandInstruction(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
+ SmallVectorImpl<MCInst> &Instructions) {
switch (Inst.getOpcode()) {
case Mips::LoadImm32Reg:
return expandLoadImm(Inst, IDLoc, Instructions);
@@ -541,8 +686,9 @@ void MipsAsmParser::expandLoadImm(MCInst &Inst, SMLoc IDLoc,
}
}
-void MipsAsmParser::expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
+void
+MipsAsmParser::expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions) {
MCInst tmpInst;
const MCOperand &ImmOp = Inst.getOperand(2);
assert(ImmOp.isImm() && "expected immediate operand kind");
@@ -583,8 +729,9 @@ void MipsAsmParser::expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc,
}
}
-void MipsAsmParser::expandLoadAddressImm(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions) {
+void
+MipsAsmParser::expandLoadAddressImm(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions) {
MCInst tmpInst;
const MCOperand &ImmOp = Inst.getOperand(1);
assert(ImmOp.isImm() && "expected immediate operand kind");
@@ -617,14 +764,15 @@ void MipsAsmParser::expandLoadAddressImm(MCInst &Inst, SMLoc IDLoc,
}
void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions, bool isLoad, bool isImmOpnd) {
+ SmallVectorImpl<MCInst> &Instructions,
+ bool isLoad, bool isImmOpnd) {
const MCSymbolRefExpr *SR;
MCInst TempInst;
unsigned ImmOffset, HiOffset, LoOffset;
const MCExpr *ExprOffset;
unsigned TmpRegNum;
- unsigned AtRegNum = getReg((isMips64()) ? Mips::CPU64RegsRegClassID
- : Mips::CPURegsRegClassID, getATReg());
+ unsigned AtRegNum = getReg(
+ (isMips64()) ? Mips::GPR64RegClassID : Mips::GPR32RegClassID, getATReg());
// 1st operand is either the source or destination register.
assert(Inst.getOperand(0).isReg() && "expected register operand kind");
unsigned RegOpNum = Inst.getOperand(0).getReg();
@@ -654,7 +802,7 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc,
TempInst.addOperand(MCOperand::CreateImm(HiOffset));
else {
if (ExprOffset->getKind() == MCExpr::SymbolRef) {
- SR = static_cast<const MCSymbolRefExpr*>(ExprOffset);
+ SR = static_cast<const MCSymbolRefExpr *>(ExprOffset);
const MCSymbolRefExpr *HiExpr = MCSymbolRefExpr::Create(
SR->getSymbol().getName(), MCSymbolRefExpr::VK_Mips_ABS_HI,
getContext());
@@ -697,15 +845,14 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc,
TempInst.clear();
}
-bool MipsAsmParser::
-MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm) {
+bool MipsAsmParser::MatchAndEmitInstruction(
+ SMLoc IDLoc, unsigned &Opcode,
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands, MCStreamer &Out,
+ unsigned &ErrorInfo, bool MatchingInlineAsm) {
MCInst Inst;
SmallVector<MCInst, 8> Instructions;
- unsigned MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
- MatchingInlineAsm);
+ unsigned MatchResult =
+ MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm);
switch (MatchResult) {
default:
@@ -726,7 +873,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
if (ErrorInfo >= Operands.size())
return Error(IDLoc, "too few operands for instruction");
- ErrorLoc = ((MipsOperand*) Operands[ErrorInfo])->getStartLoc();
+ ErrorLoc = ((MipsOperand *)Operands[ErrorInfo])->getStartLoc();
if (ErrorLoc == SMLoc())
ErrorLoc = IDLoc;
}
@@ -740,44 +887,44 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
}
int MipsAsmParser::matchCPURegisterName(StringRef Name) {
- int CC;
+ int CC;
if (Name == "at")
return getATReg();
- CC = StringSwitch<unsigned>(Name)
- .Case("zero", 0)
- .Case("a0", 4)
- .Case("a1", 5)
- .Case("a2", 6)
- .Case("a3", 7)
- .Case("v0", 2)
- .Case("v1", 3)
- .Case("s0", 16)
- .Case("s1", 17)
- .Case("s2", 18)
- .Case("s3", 19)
- .Case("s4", 20)
- .Case("s5", 21)
- .Case("s6", 22)
- .Case("s7", 23)
- .Case("k0", 26)
- .Case("k1", 27)
- .Case("sp", 29)
- .Case("fp", 30)
- .Case("gp", 28)
- .Case("ra", 31)
- .Case("t0", 8)
- .Case("t1", 9)
- .Case("t2", 10)
- .Case("t3", 11)
- .Case("t4", 12)
- .Case("t5", 13)
- .Case("t6", 14)
- .Case("t7", 15)
- .Case("t8", 24)
- .Case("t9", 25)
- .Default(-1);
+ CC = StringSwitch<unsigned>(Name)
+ .Case("zero", 0)
+ .Case("a0", 4)
+ .Case("a1", 5)
+ .Case("a2", 6)
+ .Case("a3", 7)
+ .Case("v0", 2)
+ .Case("v1", 3)
+ .Case("s0", 16)
+ .Case("s1", 17)
+ .Case("s2", 18)
+ .Case("s3", 19)
+ .Case("s4", 20)
+ .Case("s5", 21)
+ .Case("s6", 22)
+ .Case("s7", 23)
+ .Case("k0", 26)
+ .Case("k1", 27)
+ .Case("sp", 29)
+ .Case("fp", 30)
+ .Case("gp", 28)
+ .Case("ra", 31)
+ .Case("t0", 8)
+ .Case("t1", 9)
+ .Case("t2", 10)
+ .Case("t3", 11)
+ .Case("t4", 12)
+ .Case("t5", 13)
+ .Case("t6", 14)
+ .Case("t7", 15)
+ .Case("t8", 24)
+ .Case("t9", 25)
+ .Default(-1);
// Although SGI documentation just cuts out t0-t3 for n32/n64,
// GNU pushes the values of t0-t3 to override the o32/o64 values for t4-t7
@@ -787,83 +934,140 @@ int MipsAsmParser::matchCPURegisterName(StringRef Name) {
if (CC == -1 && isMips64())
CC = StringSwitch<unsigned>(Name)
- .Case("a4", 8)
- .Case("a5", 9)
- .Case("a6", 10)
- .Case("a7", 11)
- .Case("kt0", 26)
- .Case("kt1", 27)
- .Case("s8", 30)
- .Default(-1);
+ .Case("a4", 8)
+ .Case("a5", 9)
+ .Case("a6", 10)
+ .Case("a7", 11)
+ .Case("kt0", 26)
+ .Case("kt1", 27)
+ .Case("s8", 30)
+ .Default(-1);
return CC;
}
-int MipsAsmParser::matchRegisterName(StringRef Name, bool is64BitReg) {
-
- if (Name.equals("fcc0"))
- return Mips::FCC0;
-
- int CC;
- CC = matchCPURegisterName(Name);
- if (CC != -1)
- return matchRegisterByNumber(CC, is64BitReg ? Mips::CPU64RegsRegClassID
- : Mips::CPURegsRegClassID);
+int MipsAsmParser::matchFPURegisterName(StringRef Name) {
if (Name[0] == 'f') {
StringRef NumString = Name.substr(1);
unsigned IntVal;
if (NumString.getAsInteger(10, IntVal))
- return -1; // This is not an integer.
- if (IntVal > 31)
+ return -1; // This is not an integer.
+ if (IntVal > 31) // Maximum index for fpu register.
return -1;
+ return IntVal;
+ }
+ return -1;
+}
- FpFormatTy Format = getFpFormat();
+int MipsAsmParser::matchFCCRegisterName(StringRef Name) {
- if (Format == FP_FORMAT_S || Format == FP_FORMAT_W)
- return getReg(Mips::FGR32RegClassID, IntVal);
- if (Format == FP_FORMAT_D) {
- if (isFP64()) {
- return getReg(Mips::FGR64RegClassID, IntVal);
- }
- // Only even numbers available as register pairs.
- if ((IntVal > 31) || (IntVal % 2 != 0))
- return -1;
- return getReg(Mips::AFGR64RegClassID, IntVal / 2);
- }
+ if (Name.startswith("fcc")) {
+ StringRef NumString = Name.substr(3);
+ unsigned IntVal;
+ if (NumString.getAsInteger(10, IntVal))
+ return -1; // This is not an integer.
+ if (IntVal > 7) // There are only 8 fcc registers.
+ return -1;
+ return IntVal;
}
+ return -1;
+}
+int MipsAsmParser::matchACRegisterName(StringRef Name) {
+
+ if (Name.startswith("ac")) {
+ StringRef NumString = Name.substr(2);
+ unsigned IntVal;
+ if (NumString.getAsInteger(10, IntVal))
+ return -1; // This is not an integer.
+ if (IntVal > 3) // There are only 3 acc registers.
+ return -1;
+ return IntVal;
+ }
return -1;
}
-void MipsAsmParser::setDefaultFpFormat() {
+int MipsAsmParser::matchMSA128RegisterName(StringRef Name) {
+ unsigned IntVal;
- if (isMips64() || isFP64())
- FpFormat = FP_FORMAT_D;
- else
- FpFormat = FP_FORMAT_S;
+ if (Name.front() != 'w' || Name.drop_front(1).getAsInteger(10, IntVal))
+ return -1;
+
+ if (IntVal > 31)
+ return -1;
+
+ return IntVal;
}
-bool MipsAsmParser::requestsDoubleOperand(StringRef Mnemonic){
+int MipsAsmParser::matchMSA128CtrlRegisterName(StringRef Name) {
+ int CC;
- bool IsDouble = StringSwitch<bool>(Mnemonic.lower())
- .Case("ldxc1", true)
- .Case("ldc1", true)
- .Case("sdxc1", true)
- .Case("sdc1", true)
- .Default(false);
+ CC = StringSwitch<unsigned>(Name)
+ .Case("msair", 0)
+ .Case("msacsr", 1)
+ .Case("msaaccess", 2)
+ .Case("msasave", 3)
+ .Case("msamodify", 4)
+ .Case("msarequest", 5)
+ .Case("msamap", 6)
+ .Case("msaunmap", 7)
+ .Default(-1);
- return IsDouble;
+ return CC;
}
-void MipsAsmParser::setFpFormat(StringRef Format) {
+int MipsAsmParser::matchRegisterName(StringRef Name, bool is64BitReg) {
- FpFormat = StringSwitch<FpFormatTy>(Format.lower())
- .Case(".s", FP_FORMAT_S)
- .Case(".d", FP_FORMAT_D)
- .Case(".l", FP_FORMAT_L)
- .Case(".w", FP_FORMAT_W)
- .Default(FP_FORMAT_NONE);
+ int CC;
+ CC = matchCPURegisterName(Name);
+ if (CC != -1)
+ return matchRegisterByNumber(CC, is64BitReg ? Mips::GPR64RegClassID
+ : Mips::GPR32RegClassID);
+ CC = matchFPURegisterName(Name);
+ // TODO: decide about fpu register class
+ if (CC != -1)
+ return matchRegisterByNumber(CC, isFP64() ? Mips::FGR64RegClassID
+ : Mips::FGR32RegClassID);
+ return matchMSA128RegisterName(Name);
+}
+
+int MipsAsmParser::regKindToRegClass(int RegKind) {
+
+ switch (RegKind) {
+ case MipsOperand::Kind_GPR32:
+ return Mips::GPR32RegClassID;
+ case MipsOperand::Kind_GPR64:
+ return Mips::GPR64RegClassID;
+ case MipsOperand::Kind_HWRegs:
+ return Mips::HWRegsRegClassID;
+ case MipsOperand::Kind_FGR32Regs:
+ return Mips::FGR32RegClassID;
+ case MipsOperand::Kind_FGRH32Regs:
+ return Mips::FGRH32RegClassID;
+ case MipsOperand::Kind_FGR64Regs:
+ return Mips::FGR64RegClassID;
+ case MipsOperand::Kind_AFGR64Regs:
+ return Mips::AFGR64RegClassID;
+ case MipsOperand::Kind_CCRRegs:
+ return Mips::CCRRegClassID;
+ case MipsOperand::Kind_ACC64DSP:
+ return Mips::ACC64DSPRegClassID;
+ case MipsOperand::Kind_FCCRegs:
+ return Mips::FCCRegClassID;
+ case MipsOperand::Kind_MSA128BRegs:
+ return Mips::MSA128BRegClassID;
+ case MipsOperand::Kind_MSA128HRegs:
+ return Mips::MSA128HRegClassID;
+ case MipsOperand::Kind_MSA128WRegs:
+ return Mips::MSA128WRegClassID;
+ case MipsOperand::Kind_MSA128DRegs:
+ return Mips::MSA128DRegClassID;
+ case MipsOperand::Kind_MSA128CtrlRegs:
+ return Mips::MSACtrlRegClassID;
+ default:
+ return -1;
+ }
}
bool MipsAssemblerOptions::setATReg(unsigned Reg) {
@@ -874,17 +1078,15 @@ bool MipsAssemblerOptions::setATReg(unsigned Reg) {
return true;
}
-int MipsAsmParser::getATReg() {
- return Options.getATRegNum();
-}
+int MipsAsmParser::getATReg() { return Options.getATRegNum(); }
unsigned MipsAsmParser::getReg(int RC, int RegNo) {
- return *(getContext().getRegisterInfo().getRegClass(RC).begin() + RegNo);
+ return *(getContext().getRegisterInfo()->getRegClass(RC).begin() + RegNo);
}
int MipsAsmParser::matchRegisterByNumber(unsigned RegNum, unsigned RegClass) {
-
- if (RegNum > 31)
+ if (RegNum >
+ getContext().getRegisterInfo()->getRegClass(RegClass).getNumRegs())
return -1;
return getReg(RegClass, RegNum);
@@ -899,12 +1101,13 @@ int MipsAsmParser::tryParseRegister(bool is64BitReg) {
RegNum = matchRegisterName(lowerCase, is64BitReg);
} else if (Tok.is(AsmToken::Integer))
RegNum = matchRegisterByNumber(static_cast<unsigned>(Tok.getIntVal()),
- is64BitReg ? Mips::CPU64RegsRegClassID : Mips::CPURegsRegClassID);
+ is64BitReg ? Mips::GPR64RegClassID
+ : Mips::GPR32RegClassID);
return RegNum;
}
bool MipsAsmParser::tryParseRegisterOperand(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands, bool is64BitReg) {
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands, bool is64BitReg) {
SMLoc S = Parser.getTok().getLoc();
int RegNo = -1;
@@ -913,14 +1116,15 @@ bool MipsAsmParser::tryParseRegisterOperand(
if (RegNo == -1)
return true;
- Operands.push_back(MipsOperand::CreateReg(RegNo, S,
- Parser.getTok().getLoc()));
+ Operands.push_back(
+ MipsOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
Parser.Lex(); // Eat register token.
return false;
}
-bool MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*>&Operands,
- StringRef Mnemonic) {
+bool
+MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
+ StringRef Mnemonic) {
// Check if the current operand has a custom associated parser, if so, try to
// custom parse the operand, or fallback to the general approach.
OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
@@ -968,22 +1172,39 @@ bool MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*>&Operands,
return true;
SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
-
MCSymbol *Sym = getContext().GetOrCreateSymbol("$" + Identifier);
-
// Otherwise create a symbol reference.
- const MCExpr *Res = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None,
- getContext());
+ const MCExpr *Res =
+ MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None, getContext());
Operands.push_back(MipsOperand::CreateImm(Res, S, E));
return false;
}
case AsmToken::Identifier:
+ // For instruction aliases like "bc1f $Label" dedicated parser will
+ // eat the '$' sign before failing. So in order to look for appropriate
+ // label we must check first if we have already consumed '$'.
+ if (hasConsumedDollar) {
+ hasConsumedDollar = false;
+ SMLoc S = Parser.getTok().getLoc();
+ StringRef Identifier;
+ if (Parser.parseIdentifier(Identifier))
+ return true;
+ SMLoc E =
+ SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+ MCSymbol *Sym = getContext().GetOrCreateSymbol("$" + Identifier);
+ // Create a symbol reference.
+ const MCExpr *Res =
+ MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None, getContext());
+
+ Operands.push_back(MipsOperand::CreateImm(Res, S, E));
+ return false;
+ }
// Look for the existing symbol, we should check if
// we need to assigne the propper RegisterKind.
if (searchSymbolAlias(Operands, MipsOperand::Kind_None))
return false;
- // Else drop to expression parsing.
+ // Else drop to expression parsing.
case AsmToken::LParen:
case AsmToken::Minus:
case AsmToken::Plus:
@@ -1014,7 +1235,7 @@ bool MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*>&Operands,
return true;
}
-const MCExpr* MipsAsmParser::evaluateRelocExpr(const MCExpr *Expr,
+const MCExpr *MipsAsmParser::evaluateRelocExpr(const MCExpr *Expr,
StringRef RelocStr) {
const MCExpr *Res;
// Check the type of the expression.
@@ -1084,7 +1305,7 @@ bool MipsAsmParser::isEvaluated(const MCExpr *Expr) {
}
bool MipsAsmParser::parseRelocOperand(const MCExpr *&Res) {
- Parser.Lex(); // Eat the % token.
+ Parser.Lex(); // Eat the % token.
const AsmToken &Tok = Parser.getTok(); // Get next token, operation.
if (Tok.isNot(AsmToken::Identifier))
return true;
@@ -1130,7 +1351,7 @@ bool MipsAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
StartLoc = Parser.getTok().getLoc();
RegNo = tryParseRegister(isMips64());
EndLoc = Parser.getTok().getLoc();
- return (RegNo == (unsigned) -1);
+ return (RegNo == (unsigned)-1);
}
bool MipsAsmParser::parseMemOffset(const MCExpr *&Res, bool isParenExpr) {
@@ -1162,11 +1383,12 @@ bool MipsAsmParser::parseMemOffset(const MCExpr *&Res, bool isParenExpr) {
}
MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand(
- SmallVectorImpl<MCParsedAsmOperand*>&Operands) {
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
const MCExpr *IdVal = 0;
SMLoc S;
bool isParenExpr = false;
+ MipsAsmParser::OperandMatchResultTy Res = MatchOperand_NoMatch;
// First operand is the offset.
S = Parser.getTok().getLoc();
@@ -1181,21 +1403,20 @@ MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand(
const AsmToken &Tok = Parser.getTok(); // Get the next token.
if (Tok.isNot(AsmToken::LParen)) {
- MipsOperand *Mnemonic = static_cast<MipsOperand*>(Operands[0]);
+ MipsOperand *Mnemonic = static_cast<MipsOperand *>(Operands[0]);
if (Mnemonic->getToken() == "la") {
- SMLoc E = SMLoc::getFromPointer(
- Parser.getTok().getLoc().getPointer() - 1);
+ SMLoc E =
+ SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
Operands.push_back(MipsOperand::CreateImm(IdVal, S, E));
return MatchOperand_Success;
}
if (Tok.is(AsmToken::EndOfStatement)) {
- SMLoc E = SMLoc::getFromPointer(
- Parser.getTok().getLoc().getPointer() - 1);
+ SMLoc E =
+ SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
// Zero register assumed, add a memory operand with ZERO as its base.
- Operands.push_back(MipsOperand::CreateMem(isMips64() ? Mips::ZERO_64
- : Mips::ZERO,
- IdVal, S, E));
+ Operands.push_back(MipsOperand::CreateMem(
+ isMips64() ? Mips::ZERO_64 : Mips::ZERO, IdVal, S, E));
return MatchOperand_Success;
}
Error(Parser.getTok().getLoc(), "'(' expected");
@@ -1205,21 +1426,12 @@ MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand(
Parser.Lex(); // Eat the '(' token.
}
- const AsmToken &Tok1 = Parser.getTok(); // Get next token
- if (Tok1.is(AsmToken::Dollar)) {
- Parser.Lex(); // Eat the '$' token.
- if (tryParseRegisterOperand(Operands, isMips64())) {
- Error(Parser.getTok().getLoc(), "unexpected token in operand");
- return MatchOperand_ParseFail;
- }
-
- } else {
- Error(Parser.getTok().getLoc(), "unexpected token in operand");
- return MatchOperand_ParseFail;
- }
+ Res = parseRegs(Operands, isMips64() ? (int)MipsOperand::Kind_GPR64
+ : (int)MipsOperand::Kind_GPR32);
+ if (Res != MatchOperand_Success)
+ return Res;
- const AsmToken &Tok2 = Parser.getTok(); // Get next token.
- if (Tok2.isNot(AsmToken::RParen)) {
+ if (Parser.getTok().isNot(AsmToken::RParen)) {
Error(Parser.getTok().getLoc(), "')' expected");
return MatchOperand_ParseFail;
}
@@ -1232,7 +1444,7 @@ MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand(
IdVal = MCConstantExpr::Create(0, getContext());
// Replace the register operand with the memory operand.
- MipsOperand* op = static_cast<MipsOperand*>(Operands.back());
+ MipsOperand *op = static_cast<MipsOperand *>(Operands.back());
int RegNo = op->getReg();
// Remove the register from the operands.
Operands.pop_back();
@@ -1251,336 +1463,694 @@ MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand(
return MatchOperand_Success;
}
+bool MipsAsmParser::parsePtrReg(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
+ int RegKind) {
+ // If the first token is not '$' we have an error.
+ if (Parser.getTok().isNot(AsmToken::Dollar))
+ return false;
+
+ SMLoc S = Parser.getTok().getLoc();
+ Parser.Lex();
+ AsmToken::TokenKind TkKind = getLexer().getKind();
+ int Reg;
+
+ if (TkKind == AsmToken::Integer) {
+ Reg = matchRegisterByNumber(Parser.getTok().getIntVal(),
+ regKindToRegClass(RegKind));
+ if (Reg == -1)
+ return false;
+ } else if (TkKind == AsmToken::Identifier) {
+ if ((Reg = matchCPURegisterName(Parser.getTok().getString().lower())) == -1)
+ return false;
+ Reg = getReg(regKindToRegClass(RegKind), Reg);
+ } else {
+ return false;
+ }
+
+ MipsOperand *Op = MipsOperand::CreatePtrReg(Reg, S, Parser.getTok().getLoc());
+ Op->setRegKind((MipsOperand::RegisterKind)RegKind);
+ Operands.push_back(Op);
+ Parser.Lex();
+ return true;
+}
+
MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseCPU64Regs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+MipsAsmParser::parsePtrReg(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ MipsOperand::RegisterKind RegKind =
+ isN64() ? MipsOperand::Kind_GPR64 : MipsOperand::Kind_GPR32;
- if (!isMips64())
+ // Parse index register.
+ if (!parsePtrReg(Operands, RegKind))
return MatchOperand_NoMatch;
- if (getLexer().getKind() == AsmToken::Identifier) {
- if (searchSymbolAlias(Operands, MipsOperand::Kind_CPU64Regs))
+
+ // Parse '('.
+ if (Parser.getTok().isNot(AsmToken::LParen))
+ return MatchOperand_NoMatch;
+
+ Operands.push_back(MipsOperand::CreateToken("(", getLexer().getLoc()));
+ Parser.Lex();
+
+ // Parse base register.
+ if (!parsePtrReg(Operands, RegKind))
+ return MatchOperand_NoMatch;
+
+ // Parse ')'.
+ if (Parser.getTok().isNot(AsmToken::RParen))
+ return MatchOperand_NoMatch;
+
+ Operands.push_back(MipsOperand::CreateToken(")", getLexer().getLoc()));
+ Parser.Lex();
+
+ return MatchOperand_Success;
+}
+
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
+ int RegKind) {
+ MipsOperand::RegisterKind Kind = (MipsOperand::RegisterKind)RegKind;
+ if (getLexer().getKind() == AsmToken::Identifier && !hasConsumedDollar) {
+ if (searchSymbolAlias(Operands, Kind))
return MatchOperand_Success;
return MatchOperand_NoMatch;
}
+ SMLoc S = Parser.getTok().getLoc();
// If the first token is not '$', we have an error.
- if (Parser.getTok().isNot(AsmToken::Dollar))
+ if (Parser.getTok().isNot(AsmToken::Dollar) && !hasConsumedDollar)
return MatchOperand_NoMatch;
-
- Parser.Lex(); // Eat $
- if (!tryParseRegisterOperand(Operands, true)) {
- // Set the proper register kind.
- MipsOperand* op = static_cast<MipsOperand*>(Operands.back());
- op->setRegKind(MipsOperand::Kind_CPU64Regs);
+ if (!hasConsumedDollar) {
+ Parser.Lex(); // Eat the '$'
+ hasConsumedDollar = true;
+ }
+ if (getLexer().getKind() == AsmToken::Identifier) {
+ int RegNum = -1;
+ std::string RegName = Parser.getTok().getString().lower();
+ // Match register by name
+ switch (RegKind) {
+ case MipsOperand::Kind_GPR32:
+ case MipsOperand::Kind_GPR64:
+ RegNum = matchCPURegisterName(RegName);
+ break;
+ case MipsOperand::Kind_AFGR64Regs:
+ case MipsOperand::Kind_FGR64Regs:
+ case MipsOperand::Kind_FGR32Regs:
+ case MipsOperand::Kind_FGRH32Regs:
+ RegNum = matchFPURegisterName(RegName);
+ if (RegKind == MipsOperand::Kind_AFGR64Regs)
+ RegNum /= 2;
+ else if (RegKind == MipsOperand::Kind_FGRH32Regs && !isFP64())
+ if (RegNum != -1 && RegNum % 2 != 0)
+ Warning(S, "Float register should be even.");
+ break;
+ case MipsOperand::Kind_FCCRegs:
+ RegNum = matchFCCRegisterName(RegName);
+ break;
+ case MipsOperand::Kind_ACC64DSP:
+ RegNum = matchACRegisterName(RegName);
+ break;
+ default:
+ break; // No match, value is set to -1.
+ }
+ // No match found, return _NoMatch to give a chance to other round.
+ if (RegNum < 0)
+ return MatchOperand_NoMatch;
+
+ int RegVal = getReg(regKindToRegClass(Kind), RegNum);
+ if (RegVal == -1)
+ return MatchOperand_NoMatch;
+
+ MipsOperand *Op =
+ MipsOperand::CreateReg(RegVal, S, Parser.getTok().getLoc());
+ Op->setRegKind(Kind);
+ Operands.push_back(Op);
+ hasConsumedDollar = false;
+ Parser.Lex(); // Eat the register name.
+ return MatchOperand_Success;
+ } else if (getLexer().getKind() == AsmToken::Integer) {
+ unsigned RegNum = Parser.getTok().getIntVal();
+ if (Kind == MipsOperand::Kind_HWRegs) {
+ if (RegNum != 29)
+ return MatchOperand_NoMatch;
+ // Only hwreg 29 is supported, found at index 0.
+ RegNum = 0;
+ }
+ int Reg = matchRegisterByNumber(RegNum, regKindToRegClass(Kind));
+ if (Reg == -1)
+ return MatchOperand_NoMatch;
+ MipsOperand *Op = MipsOperand::CreateReg(Reg, S, Parser.getTok().getLoc());
+ Op->setRegKind(Kind);
+ Operands.push_back(Op);
+ hasConsumedDollar = false;
+ Parser.Lex(); // Eat the register number.
+ if ((RegKind == MipsOperand::Kind_GPR32) &&
+ (getLexer().is(AsmToken::LParen))) {
+ // Check if it is indexed addressing operand.
+ Operands.push_back(MipsOperand::CreateToken("(", getLexer().getLoc()));
+ Parser.Lex(); // Eat the parenthesis.
+ if (parseRegs(Operands, RegKind) != MatchOperand_Success)
+ return MatchOperand_NoMatch;
+ if (getLexer().isNot(AsmToken::RParen))
+ return MatchOperand_NoMatch;
+ Operands.push_back(MipsOperand::CreateToken(")", getLexer().getLoc()));
+ Parser.Lex();
+ }
return MatchOperand_Success;
}
return MatchOperand_NoMatch;
}
-bool MipsAsmParser::searchSymbolAlias(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands, unsigned RegisterKind) {
+bool MipsAsmParser::validateMSAIndex(int Val, int RegKind) {
+ MipsOperand::RegisterKind Kind = (MipsOperand::RegisterKind)RegKind;
- MCSymbol *Sym = getContext().LookupSymbol(Parser.getTok().getIdentifier());
- if (Sym) {
- SMLoc S = Parser.getTok().getLoc();
- const MCExpr *Expr;
- if (Sym->isVariable())
- Expr = Sym->getVariableValue();
- else
- return false;
- if (Expr->getKind() == MCExpr::SymbolRef) {
- const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
- const StringRef DefSymbol = Ref->getSymbol().getName();
- if (DefSymbol.startswith("$")) {
- // Lookup for the register with the corresponding name.
- int RegNum = matchRegisterName(DefSymbol.substr(1), isMips64());
- if (RegNum > -1) {
- Parser.Lex();
- MipsOperand *op = MipsOperand::CreateReg(RegNum, S,
- Parser.getTok().getLoc());
- op->setRegKind((MipsOperand::RegisterKind) RegisterKind);
- Operands.push_back(op);
- return true;
- }
- }
- } else if (Expr->getKind() == MCExpr::Constant) {
- Parser.Lex();
- const MCConstantExpr *Const = static_cast<const MCConstantExpr*>(Expr);
- MipsOperand *op = MipsOperand::CreateImm(Const, S,
- Parser.getTok().getLoc());
- Operands.push_back(op);
- return true;
- }
+ if (Val < 0)
+ return false;
+
+ switch (Kind) {
+ default:
+ return false;
+ case MipsOperand::Kind_MSA128BRegs:
+ return Val < 16;
+ case MipsOperand::Kind_MSA128HRegs:
+ return Val < 8;
+ case MipsOperand::Kind_MSA128WRegs:
+ return Val < 4;
+ case MipsOperand::Kind_MSA128DRegs:
+ return Val < 2;
}
- return false;
}
MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseCPURegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+MipsAsmParser::parseMSARegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
+ int RegKind) {
+ MipsOperand::RegisterKind Kind = (MipsOperand::RegisterKind)RegKind;
+ SMLoc S = Parser.getTok().getLoc();
+ std::string RegName;
- if (getLexer().getKind() == AsmToken::Identifier) {
- if (searchSymbolAlias(Operands, MipsOperand::Kind_CPURegs))
- return MatchOperand_Success;
- return MatchOperand_NoMatch;
- }
- // If the first token is not '$' we have an error.
if (Parser.getTok().isNot(AsmToken::Dollar))
return MatchOperand_NoMatch;
- Parser.Lex(); // Eat $
- if (!tryParseRegisterOperand(Operands, false)) {
- // Set the proper register kind.
- MipsOperand* op = static_cast<MipsOperand*>(Operands.back());
- op->setRegKind(MipsOperand::Kind_CPURegs);
+ switch (RegKind) {
+ default:
+ return MatchOperand_ParseFail;
+ case MipsOperand::Kind_MSA128BRegs:
+ case MipsOperand::Kind_MSA128HRegs:
+ case MipsOperand::Kind_MSA128WRegs:
+ case MipsOperand::Kind_MSA128DRegs:
+ break;
+ }
+
+ Parser.Lex(); // Eat the '$'.
+ if (getLexer().getKind() == AsmToken::Identifier)
+ RegName = Parser.getTok().getString().lower();
+ else
+ return MatchOperand_ParseFail;
+
+ int RegNum = matchMSA128RegisterName(RegName);
+
+ if (RegNum < 0 || RegNum > 31)
+ return MatchOperand_ParseFail;
+
+ int RegVal = getReg(regKindToRegClass(Kind), RegNum);
+ if (RegVal == -1)
+ return MatchOperand_ParseFail;
+
+ MipsOperand *Op = MipsOperand::CreateReg(RegVal, S, Parser.getTok().getLoc());
+ Op->setRegKind(Kind);
+ Operands.push_back(Op);
+
+ Parser.Lex(); // Eat the register identifier.
+
+ // MSA registers may be suffixed with an index in the form of:
+ // 1) Immediate expression.
+ // 2) General Purpose Register.
+ // Examples:
+ // 1) copy_s.b $29,$w0[0]
+ // 2) sld.b $w0,$w1[$1]
+
+ if (Parser.getTok().isNot(AsmToken::LBrac))
+ return MatchOperand_Success;
+
+ MipsOperand *Mnemonic = static_cast<MipsOperand *>(Operands[0]);
+
+ Operands.push_back(MipsOperand::CreateToken("[", Parser.getTok().getLoc()));
+ Parser.Lex(); // Parse the '[' token.
+
+ if (Parser.getTok().is(AsmToken::Dollar)) {
+ // This must be a GPR.
+ MipsOperand *RegOp;
+ SMLoc VIdx = Parser.getTok().getLoc();
+ Parser.Lex(); // Parse the '$' token.
+
+ // GPR have aliases and we must account for that. Example: $30 == $fp
+ if (getLexer().getKind() == AsmToken::Integer) {
+ unsigned RegNum = Parser.getTok().getIntVal();
+ int Reg = matchRegisterByNumber(
+ RegNum, regKindToRegClass(MipsOperand::Kind_GPR32));
+ if (Reg == -1) {
+ Error(VIdx, "invalid general purpose register");
+ return MatchOperand_ParseFail;
+ }
+
+ RegOp = MipsOperand::CreateReg(Reg, VIdx, Parser.getTok().getLoc());
+ } else if (getLexer().getKind() == AsmToken::Identifier) {
+ int RegNum = -1;
+ std::string RegName = Parser.getTok().getString().lower();
+
+ RegNum = matchCPURegisterName(RegName);
+ if (RegNum == -1) {
+ Error(VIdx, "general purpose register expected");
+ return MatchOperand_ParseFail;
+ }
+ RegNum = getReg(regKindToRegClass(MipsOperand::Kind_GPR32), RegNum);
+ RegOp = MipsOperand::CreateReg(RegNum, VIdx, Parser.getTok().getLoc());
+ } else
+ return MatchOperand_ParseFail;
+
+ RegOp->setRegKind(MipsOperand::Kind_GPR32);
+ Operands.push_back(RegOp);
+ Parser.Lex(); // Eat the register identifier.
+
+ if (Parser.getTok().isNot(AsmToken::RBrac))
+ return MatchOperand_ParseFail;
+
+ Operands.push_back(MipsOperand::CreateToken("]", Parser.getTok().getLoc()));
+ Parser.Lex(); // Parse the ']' token.
+
return MatchOperand_Success;
}
- return MatchOperand_NoMatch;
+
+ // The index must be a constant expression then.
+ SMLoc VIdx = Parser.getTok().getLoc();
+ const MCExpr *ImmVal;
+
+ if (getParser().parseExpression(ImmVal))
+ return MatchOperand_ParseFail;
+
+ const MCConstantExpr *expr = dyn_cast<MCConstantExpr>(ImmVal);
+ if (!expr || !validateMSAIndex((int)expr->getValue(), Kind)) {
+ Error(VIdx, "invalid immediate value");
+ return MatchOperand_ParseFail;
+ }
+
+ SMLoc E = Parser.getTok().getEndLoc();
+
+ if (Parser.getTok().isNot(AsmToken::RBrac))
+ return MatchOperand_ParseFail;
+
+ bool insve =
+ Mnemonic->getToken() == "insve.b" || Mnemonic->getToken() == "insve.h" ||
+ Mnemonic->getToken() == "insve.w" || Mnemonic->getToken() == "insve.d";
+
+ // The second vector index of insve instructions is always 0.
+ if (insve && Operands.size() > 6) {
+ if (expr->getValue() != 0) {
+ Error(VIdx, "immediate value must be 0");
+ return MatchOperand_ParseFail;
+ }
+ Operands.push_back(MipsOperand::CreateToken("0", VIdx));
+ } else
+ Operands.push_back(MipsOperand::CreateImm(expr, VIdx, E));
+
+ Operands.push_back(MipsOperand::CreateToken("]", Parser.getTok().getLoc()));
+
+ Parser.Lex(); // Parse the ']' token.
+
+ return MatchOperand_Success;
}
MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseHWRegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+MipsAsmParser::parseMSACtrlRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
+ int RegKind) {
+ MipsOperand::RegisterKind Kind = (MipsOperand::RegisterKind)RegKind;
- if (isMips64())
+ if (Kind != MipsOperand::Kind_MSA128CtrlRegs)
return MatchOperand_NoMatch;
- // If the first token is not '$' we have error.
if (Parser.getTok().isNot(AsmToken::Dollar))
- return MatchOperand_NoMatch;
+ return MatchOperand_ParseFail;
+
SMLoc S = Parser.getTok().getLoc();
- Parser.Lex(); // Eat the '$'.
- const AsmToken &Tok = Parser.getTok(); // Get the next token.
- if (Tok.isNot(AsmToken::Integer))
- return MatchOperand_NoMatch;
+ Parser.Lex(); // Eat the '$' symbol.
- unsigned RegNum = Tok.getIntVal();
- // At the moment only hwreg29 is supported.
- if (RegNum != 29)
+ int RegNum = -1;
+ if (getLexer().getKind() == AsmToken::Identifier)
+ RegNum = matchMSA128CtrlRegisterName(Parser.getTok().getString().lower());
+ else if (getLexer().getKind() == AsmToken::Integer)
+ RegNum = Parser.getTok().getIntVal();
+ else
return MatchOperand_ParseFail;
- MipsOperand *op = MipsOperand::CreateReg(Mips::HWR29, S,
- Parser.getTok().getLoc());
- op->setRegKind(MipsOperand::Kind_HWRegs);
- Operands.push_back(op);
+ if (RegNum < 0 || RegNum > 7)
+ return MatchOperand_ParseFail;
+
+ int RegVal = getReg(regKindToRegClass(Kind), RegNum);
+ if (RegVal == -1)
+ return MatchOperand_ParseFail;
+
+ MipsOperand *RegOp =
+ MipsOperand::CreateReg(RegVal, S, Parser.getTok().getLoc());
+ RegOp->setRegKind(MipsOperand::Kind_MSA128CtrlRegs);
+ Operands.push_back(RegOp);
+ Parser.Lex(); // Eat the register identifier.
- Parser.Lex(); // Eat the register number.
return MatchOperand_Success;
}
MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseHW64Regs(
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+MipsAsmParser::parseGPR64(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
if (!isMips64())
return MatchOperand_NoMatch;
+ return parseRegs(Operands, (int)MipsOperand::Kind_GPR64);
+}
+
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseGPR32(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseRegs(Operands, (int)MipsOperand::Kind_GPR32);
+}
+
+MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseAFGR64Regs(
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+
+ if (isFP64())
+ return MatchOperand_NoMatch;
+ return parseRegs(Operands, (int)MipsOperand::Kind_AFGR64Regs);
+}
+
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseFGR64Regs(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ if (!isFP64())
+ return MatchOperand_NoMatch;
+ return parseRegs(Operands, (int)MipsOperand::Kind_FGR64Regs);
+}
+
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseFGR32Regs(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseRegs(Operands, (int)MipsOperand::Kind_FGR32Regs);
+}
+
+MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseFGRH32Regs(
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseRegs(Operands, (int)MipsOperand::Kind_FGRH32Regs);
+}
+
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseFCCRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseRegs(Operands, (int)MipsOperand::Kind_FCCRegs);
+}
+
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseACC64DSP(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseRegs(Operands, (int)MipsOperand::Kind_ACC64DSP);
+}
+
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseLO32DSP(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
// If the first token is not '$' we have an error.
if (Parser.getTok().isNot(AsmToken::Dollar))
return MatchOperand_NoMatch;
+
SMLoc S = Parser.getTok().getLoc();
- Parser.Lex(); // Eat $
+ Parser.Lex(); // Eat the '$'
- const AsmToken &Tok = Parser.getTok(); // Get the next token.
- if (Tok.isNot(AsmToken::Integer))
+ const AsmToken &Tok = Parser.getTok(); // Get next token.
+
+ if (Tok.isNot(AsmToken::Identifier))
return MatchOperand_NoMatch;
- unsigned RegNum = Tok.getIntVal();
- // At the moment only hwreg29 is supported.
- if (RegNum != 29)
- return MatchOperand_ParseFail;
+ if (!Tok.getIdentifier().startswith("ac"))
+ return MatchOperand_NoMatch;
+
+ StringRef NumString = Tok.getIdentifier().substr(2);
+
+ unsigned IntVal;
+ if (NumString.getAsInteger(10, IntVal))
+ return MatchOperand_NoMatch;
- MipsOperand *op = MipsOperand::CreateReg(Mips::HWR29_64, S,
- Parser.getTok().getLoc());
- op->setRegKind(MipsOperand::Kind_HW64Regs);
- Operands.push_back(op);
+ unsigned Reg = matchRegisterByNumber(IntVal, Mips::LO32DSPRegClassID);
+
+ MipsOperand *Op = MipsOperand::CreateReg(Reg, S, Parser.getTok().getLoc());
+ Op->setRegKind(MipsOperand::Kind_LO32DSP);
+ Operands.push_back(Op);
Parser.Lex(); // Eat the register number.
return MatchOperand_Success;
}
MipsAsmParser::OperandMatchResultTy
-MipsAsmParser::parseCCRRegs(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- unsigned RegNum;
+MipsAsmParser::parseHI32DSP(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
// If the first token is not '$' we have an error.
if (Parser.getTok().isNot(AsmToken::Dollar))
return MatchOperand_NoMatch;
+
SMLoc S = Parser.getTok().getLoc();
Parser.Lex(); // Eat the '$'
const AsmToken &Tok = Parser.getTok(); // Get next token.
- if (Tok.is(AsmToken::Integer)) {
- RegNum = Tok.getIntVal();
- // At the moment only fcc0 is supported.
- if (RegNum != 0)
- return MatchOperand_ParseFail;
- } else if (Tok.is(AsmToken::Identifier)) {
- // At the moment only fcc0 is supported.
- if (Tok.getIdentifier() != "fcc0")
- return MatchOperand_ParseFail;
- } else
+
+ if (Tok.isNot(AsmToken::Identifier))
+ return MatchOperand_NoMatch;
+
+ if (!Tok.getIdentifier().startswith("ac"))
+ return MatchOperand_NoMatch;
+
+ StringRef NumString = Tok.getIdentifier().substr(2);
+
+ unsigned IntVal;
+ if (NumString.getAsInteger(10, IntVal))
return MatchOperand_NoMatch;
- MipsOperand *op = MipsOperand::CreateReg(Mips::FCC0, S,
- Parser.getTok().getLoc());
- op->setRegKind(MipsOperand::Kind_CCRRegs);
- Operands.push_back(op);
+ unsigned Reg = matchRegisterByNumber(IntVal, Mips::HI32DSPRegClassID);
+
+ MipsOperand *Op = MipsOperand::CreateReg(Reg, S, Parser.getTok().getLoc());
+ Op->setRegKind(MipsOperand::Kind_HI32DSP);
+ Operands.push_back(Op);
Parser.Lex(); // Eat the register number.
return MatchOperand_Success;
}
-MCSymbolRefExpr::VariantKind MipsAsmParser::getVariantKind(StringRef Symbol) {
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseCOP2(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ // If the first token is not '$' we have an error.
+ if (Parser.getTok().isNot(AsmToken::Dollar))
+ return MatchOperand_NoMatch;
- MCSymbolRefExpr::VariantKind VK
- = StringSwitch<MCSymbolRefExpr::VariantKind>(Symbol)
- .Case("hi", MCSymbolRefExpr::VK_Mips_ABS_HI)
- .Case("lo", MCSymbolRefExpr::VK_Mips_ABS_LO)
- .Case("gp_rel", MCSymbolRefExpr::VK_Mips_GPREL)
- .Case("call16", MCSymbolRefExpr::VK_Mips_GOT_CALL)
- .Case("got", MCSymbolRefExpr::VK_Mips_GOT)
- .Case("tlsgd", MCSymbolRefExpr::VK_Mips_TLSGD)
- .Case("tlsldm", MCSymbolRefExpr::VK_Mips_TLSLDM)
- .Case("dtprel_hi", MCSymbolRefExpr::VK_Mips_DTPREL_HI)
- .Case("dtprel_lo", MCSymbolRefExpr::VK_Mips_DTPREL_LO)
- .Case("gottprel", MCSymbolRefExpr::VK_Mips_GOTTPREL)
- .Case("tprel_hi", MCSymbolRefExpr::VK_Mips_TPREL_HI)
- .Case("tprel_lo", MCSymbolRefExpr::VK_Mips_TPREL_LO)
- .Case("got_disp", MCSymbolRefExpr::VK_Mips_GOT_DISP)
- .Case("got_page", MCSymbolRefExpr::VK_Mips_GOT_PAGE)
- .Case("got_ofst", MCSymbolRefExpr::VK_Mips_GOT_OFST)
- .Case("hi(%neg(%gp_rel", MCSymbolRefExpr::VK_Mips_GPOFF_HI)
- .Case("lo(%neg(%gp_rel", MCSymbolRefExpr::VK_Mips_GPOFF_LO)
- .Default(MCSymbolRefExpr::VK_None);
+ SMLoc S = Parser.getTok().getLoc();
+ Parser.Lex(); // Eat the '$'
- return VK;
+ const AsmToken &Tok = Parser.getTok(); // Get next token.
+
+ if (Tok.isNot(AsmToken::Integer))
+ return MatchOperand_NoMatch;
+
+ unsigned IntVal = Tok.getIntVal();
+
+ unsigned Reg = matchRegisterByNumber(IntVal, Mips::COP2RegClassID);
+
+ MipsOperand *Op = MipsOperand::CreateReg(Reg, S, Parser.getTok().getLoc());
+ Op->setRegKind(MipsOperand::Kind_COP2);
+ Operands.push_back(Op);
+
+ Parser.Lex(); // Eat the register number.
+ return MatchOperand_Success;
}
-static int ConvertCcString(StringRef CondString) {
- int CC = StringSwitch<unsigned>(CondString)
- .Case(".f", 0)
- .Case(".un", 1)
- .Case(".eq", 2)
- .Case(".ueq", 3)
- .Case(".olt", 4)
- .Case(".ult", 5)
- .Case(".ole", 6)
- .Case(".ule", 7)
- .Case(".sf", 8)
- .Case(".ngle", 9)
- .Case(".seq", 10)
- .Case(".ngl", 11)
- .Case(".lt", 12)
- .Case(".nge", 13)
- .Case(".le", 14)
- .Case(".ngt", 15)
- .Default(-1);
+MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMSA128BRegs(
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseMSARegs(Operands, (int)MipsOperand::Kind_MSA128BRegs);
+}
- return CC;
+MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMSA128HRegs(
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseMSARegs(Operands, (int)MipsOperand::Kind_MSA128HRegs);
}
-bool MipsAsmParser::
-parseMathOperation(StringRef Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // Split the format.
- size_t Start = Name.find('.'), Next = Name.rfind('.');
- StringRef Format1 = Name.slice(Start, Next);
- // Add the first format to the operands.
- Operands.push_back(MipsOperand::CreateToken(Format1, NameLoc));
- // Now for the second format.
- StringRef Format2 = Name.slice(Next, StringRef::npos);
- Operands.push_back(MipsOperand::CreateToken(Format2, NameLoc));
+MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMSA128WRegs(
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseMSARegs(Operands, (int)MipsOperand::Kind_MSA128WRegs);
+}
- // Set the format for the first register.
- setFpFormat(Format1);
+MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMSA128DRegs(
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseMSARegs(Operands, (int)MipsOperand::Kind_MSA128DRegs);
+}
- // Read the remaining operands.
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
- // Read the first operand.
- if (ParseOperand(Operands, Name)) {
- SMLoc Loc = getLexer().getLoc();
- Parser.eatToEndOfStatement();
- return Error(Loc, "unexpected token in argument list");
- }
+MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMSA128CtrlRegs(
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseMSACtrlRegs(Operands, (int)MipsOperand::Kind_MSA128CtrlRegs);
+}
- if (getLexer().isNot(AsmToken::Comma)) {
- SMLoc Loc = getLexer().getLoc();
- Parser.eatToEndOfStatement();
- return Error(Loc, "unexpected token in argument list");
+bool MipsAsmParser::searchSymbolAlias(
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands, unsigned RegKind) {
+
+ MCSymbol *Sym = getContext().LookupSymbol(Parser.getTok().getIdentifier());
+ if (Sym) {
+ SMLoc S = Parser.getTok().getLoc();
+ const MCExpr *Expr;
+ if (Sym->isVariable())
+ Expr = Sym->getVariableValue();
+ else
+ return false;
+ if (Expr->getKind() == MCExpr::SymbolRef) {
+ MipsOperand::RegisterKind Kind = (MipsOperand::RegisterKind)RegKind;
+ const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr);
+ const StringRef DefSymbol = Ref->getSymbol().getName();
+ if (DefSymbol.startswith("$")) {
+ int RegNum = -1;
+ APInt IntVal(32, -1);
+ if (!DefSymbol.substr(1).getAsInteger(10, IntVal))
+ RegNum = matchRegisterByNumber(IntVal.getZExtValue(),
+ isMips64() ? Mips::GPR64RegClassID
+ : Mips::GPR32RegClassID);
+ else {
+ // Lookup for the register with the corresponding name.
+ switch (Kind) {
+ case MipsOperand::Kind_AFGR64Regs:
+ case MipsOperand::Kind_FGR64Regs:
+ RegNum = matchFPURegisterName(DefSymbol.substr(1));
+ break;
+ case MipsOperand::Kind_FGR32Regs:
+ RegNum = matchFPURegisterName(DefSymbol.substr(1));
+ break;
+ case MipsOperand::Kind_GPR64:
+ case MipsOperand::Kind_GPR32:
+ default:
+ RegNum = matchCPURegisterName(DefSymbol.substr(1));
+ break;
+ }
+ if (RegNum > -1)
+ RegNum = getReg(regKindToRegClass(Kind), RegNum);
+ }
+ if (RegNum > -1) {
+ Parser.Lex();
+ MipsOperand *op =
+ MipsOperand::CreateReg(RegNum, S, Parser.getTok().getLoc());
+ op->setRegKind(Kind);
+ Operands.push_back(op);
+ return true;
+ }
+ }
+ } else if (Expr->getKind() == MCExpr::Constant) {
+ Parser.Lex();
+ const MCConstantExpr *Const = static_cast<const MCConstantExpr *>(Expr);
+ MipsOperand *op =
+ MipsOperand::CreateImm(Const, S, Parser.getTok().getLoc());
+ Operands.push_back(op);
+ return true;
}
- Parser.Lex(); // Eat the comma.
+ }
+ return false;
+}
- // Set the format for the first register
- setFpFormat(Format2);
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseHWRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseRegs(Operands, (int)MipsOperand::Kind_HWRegs);
+}
- // Parse and remember the operand.
- if (ParseOperand(Operands, Name)) {
- SMLoc Loc = getLexer().getLoc();
- Parser.eatToEndOfStatement();
- return Error(Loc, "unexpected token in argument list");
- }
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseCCRRegs(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ return parseRegs(Operands, (int)MipsOperand::Kind_CCRRegs);
+}
+
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseInvNum(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ const MCExpr *IdVal;
+ // If the first token is '$' we may have register operand.
+ if (Parser.getTok().is(AsmToken::Dollar))
+ return MatchOperand_NoMatch;
+ SMLoc S = Parser.getTok().getLoc();
+ if (getParser().parseExpression(IdVal))
+ return MatchOperand_ParseFail;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(IdVal);
+ assert(MCE && "Unexpected MCExpr type.");
+ int64_t Val = MCE->getValue();
+ SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+ Operands.push_back(MipsOperand::CreateImm(
+ MCConstantExpr::Create(0 - Val, getContext()), S, E));
+ return MatchOperand_Success;
+}
+
+MipsAsmParser::OperandMatchResultTy
+MipsAsmParser::parseLSAImm(SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ switch (getLexer().getKind()) {
+ default:
+ return MatchOperand_NoMatch;
+ case AsmToken::LParen:
+ case AsmToken::Plus:
+ case AsmToken::Minus:
+ case AsmToken::Integer:
+ break;
}
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
- SMLoc Loc = getLexer().getLoc();
- Parser.eatToEndOfStatement();
- return Error(Loc, "unexpected token in argument list");
+ const MCExpr *Expr;
+ SMLoc S = Parser.getTok().getLoc();
+
+ if (getParser().parseExpression(Expr))
+ return MatchOperand_ParseFail;
+
+ int64_t Val;
+ if (!Expr->EvaluateAsAbsolute(Val)) {
+ Error(S, "expected immediate value");
+ return MatchOperand_ParseFail;
}
- Parser.Lex(); // Consume the EndOfStatement.
- return false;
+ // The LSA instruction allows a 2-bit unsigned immediate. For this reason
+ // and because the CPU always adds one to the immediate field, the allowed
+ // range becomes 1..4. We'll only check the range here and will deal
+ // with the addition/subtraction when actually decoding/encoding
+ // the instruction.
+ if (Val < 1 || Val > 4) {
+ Error(S, "immediate not in range (1..4)");
+ return MatchOperand_ParseFail;
+ }
+
+ Operands.push_back(MipsOperand::CreateLSAImm(Expr, S,
+ Parser.getTok().getLoc()));
+ return MatchOperand_Success;
}
-bool MipsAsmParser::
-ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- StringRef Mnemonic;
- // Floating point instructions: Should the register be treated as a double?
- if (requestsDoubleOperand(Name)) {
- setFpFormat(FP_FORMAT_D);
- Operands.push_back(MipsOperand::CreateToken(Name, NameLoc));
- Mnemonic = Name;
- } else {
- setDefaultFpFormat();
- // Create the leading tokens for the mnemonic, split by '.' characters.
- size_t Start = 0, Next = Name.find('.');
- Mnemonic = Name.slice(Start, Next);
-
- Operands.push_back(MipsOperand::CreateToken(Mnemonic, NameLoc));
-
- if (Next != StringRef::npos) {
- // There is a format token in mnemonic.
- size_t Dot = Name.find('.', Next + 1);
- StringRef Format = Name.slice(Next, Dot);
- if (Dot == StringRef::npos) // Only one '.' in a string, it's a format.
- Operands.push_back(MipsOperand::CreateToken(Format, NameLoc));
- else {
- if (Name.startswith("c.")) {
- // Floating point compare, add '.' and immediate represent for cc.
- Operands.push_back(MipsOperand::CreateToken(".", NameLoc));
- int Cc = ConvertCcString(Format);
- if (Cc == -1) {
- return Error(NameLoc, "Invalid conditional code");
- }
- SMLoc E = SMLoc::getFromPointer(
- Parser.getTok().getLoc().getPointer() - 1);
- Operands.push_back(
- MipsOperand::CreateImm(MCConstantExpr::Create(Cc, getContext()),
- NameLoc, E));
- } else {
- // trunc, ceil, floor ...
- return parseMathOperation(Name, NameLoc, Operands);
- }
+MCSymbolRefExpr::VariantKind MipsAsmParser::getVariantKind(StringRef Symbol) {
- // The rest is a format.
- Format = Name.slice(Dot, StringRef::npos);
- Operands.push_back(MipsOperand::CreateToken(Format, NameLoc));
- }
+ MCSymbolRefExpr::VariantKind VK =
+ StringSwitch<MCSymbolRefExpr::VariantKind>(Symbol)
+ .Case("hi", MCSymbolRefExpr::VK_Mips_ABS_HI)
+ .Case("lo", MCSymbolRefExpr::VK_Mips_ABS_LO)
+ .Case("gp_rel", MCSymbolRefExpr::VK_Mips_GPREL)
+ .Case("call16", MCSymbolRefExpr::VK_Mips_GOT_CALL)
+ .Case("got", MCSymbolRefExpr::VK_Mips_GOT)
+ .Case("tlsgd", MCSymbolRefExpr::VK_Mips_TLSGD)
+ .Case("tlsldm", MCSymbolRefExpr::VK_Mips_TLSLDM)
+ .Case("dtprel_hi", MCSymbolRefExpr::VK_Mips_DTPREL_HI)
+ .Case("dtprel_lo", MCSymbolRefExpr::VK_Mips_DTPREL_LO)
+ .Case("gottprel", MCSymbolRefExpr::VK_Mips_GOTTPREL)
+ .Case("tprel_hi", MCSymbolRefExpr::VK_Mips_TPREL_HI)
+ .Case("tprel_lo", MCSymbolRefExpr::VK_Mips_TPREL_LO)
+ .Case("got_disp", MCSymbolRefExpr::VK_Mips_GOT_DISP)
+ .Case("got_page", MCSymbolRefExpr::VK_Mips_GOT_PAGE)
+ .Case("got_ofst", MCSymbolRefExpr::VK_Mips_GOT_OFST)
+ .Case("hi(%neg(%gp_rel", MCSymbolRefExpr::VK_Mips_GPOFF_HI)
+ .Case("lo(%neg(%gp_rel", MCSymbolRefExpr::VK_Mips_GPOFF_LO)
+ .Default(MCSymbolRefExpr::VK_None);
- setFpFormat(Format);
- }
+ return VK;
+}
+
+bool MipsAsmParser::ParseInstruction(
+ ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ // Check if we have valid mnemonic
+ if (!mnemonicIsValid(Name, 0)) {
+ Parser.eatToEndOfStatement();
+ return Error(NameLoc, "Unknown instruction");
}
+ // First operand in MCInst is instruction mnemonic.
+ Operands.push_back(MipsOperand::CreateToken(Name, NameLoc));
// Read the remaining operands.
if (getLexer().isNot(AsmToken::EndOfStatement)) {
// Read the first operand.
- if (ParseOperand(Operands, Mnemonic)) {
+ if (ParseOperand(Operands, Name)) {
SMLoc Loc = getLexer().getLoc();
Parser.eatToEndOfStatement();
return Error(Loc, "unexpected token in argument list");
@@ -1588,7 +2158,6 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
while (getLexer().is(AsmToken::Comma)) {
Parser.Lex(); // Eat the comma.
-
// Parse and remember the operand.
if (ParseOperand(Operands, Name)) {
SMLoc Loc = getLexer().getLoc();
@@ -1597,13 +2166,11 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
}
}
}
-
if (getLexer().isNot(AsmToken::EndOfStatement)) {
SMLoc Loc = getLexer().getLoc();
Parser.eatToEndOfStatement();
return Error(Loc, "unexpected token in argument list");
}
-
Parser.Lex(); // Consume the EndOfStatement.
return false;
}
@@ -1741,8 +2308,23 @@ bool MipsAsmParser::parseSetAssignment() {
return reportParseError("unexpected token in .set directive");
Lex(); // Eat comma
- if (Parser.parseExpression(Value))
- reportParseError("expected valid expression after comma");
+ if (getLexer().is(AsmToken::Dollar)) {
+ MCSymbol *Symbol;
+ SMLoc DollarLoc = getLexer().getLoc();
+ // Consume the dollar sign, and check for a following identifier.
+ Parser.Lex();
+ // We have a '$' followed by something, make sure they are adjacent.
+ if (DollarLoc.getPointer() + 1 != getTok().getLoc().getPointer())
+ return true;
+ StringRef Res =
+ StringRef(DollarLoc.getPointer(),
+ getTok().getEndLoc().getPointer() - DollarLoc.getPointer());
+ Symbol = getContext().GetOrCreateSymbol(Res);
+ Parser.Lex();
+ Value =
+ MCSymbolRefExpr::Create(Symbol, MCSymbolRefExpr::VK_None, getContext());
+ } else if (Parser.parseExpression(Value))
+ return reportParseError("expected valid expression after comma");
// Check if the Name already exists as a symbol.
MCSymbol *Sym = getContext().LookupSymbol(Name);
@@ -1788,6 +2370,34 @@ bool MipsAsmParser::parseDirectiveSet() {
return true;
}
+bool MipsAsmParser::parseDirectiveMipsHackStocg() {
+ MCAsmParser &Parser = getParser();
+ StringRef Name;
+ if (Parser.parseIdentifier(Name))
+ reportParseError("expected identifier");
+
+ MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("unexpected token");
+ Lex();
+
+ int64_t Flags = 0;
+ if (Parser.parseAbsoluteExpression(Flags))
+ return TokError("unexpected token");
+
+ getTargetStreamer().emitMipsHackSTOCG(Sym, Flags);
+ return false;
+}
+
+bool MipsAsmParser::parseDirectiveMipsHackELFFlags() {
+ int64_t Flags = 0;
+ if (Parser.parseAbsoluteExpression(Flags))
+ return TokError("unexpected token");
+
+ getTargetStreamer().emitMipsHackELFFlags(Flags);
+ return false;
+}
+
/// parseDirectiveWord
/// ::= .word [ expression (, expression)* ]
bool MipsAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
@@ -1813,6 +2423,22 @@ bool MipsAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
return false;
}
+/// parseDirectiveGpWord
+/// ::= .gpword local_sym
+bool MipsAsmParser::parseDirectiveGpWord() {
+ const MCExpr *Value;
+ // EmitGPRel32Value requires an expression, so we are using base class
+ // method to evaluate the expression.
+ if (getParser().parseExpression(Value))
+ return true;
+ getParser().getStreamer().EmitGPRel32Value(Value);
+
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return Error(getLexer().getLoc(), "unexpected token in directive");
+ Parser.Lex(); // Eat EndOfStatement token.
+ return false;
+}
+
bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) {
StringRef IDVal = DirectiveID.getString();
@@ -1853,7 +2479,7 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) {
if (IDVal == ".gpword") {
// Ignore this directive for now.
- Parser.eatToEndOfStatement();
+ parseDirectiveGpWord();
return false;
}
@@ -1862,6 +2488,12 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) {
return false;
}
+ if (IDVal == ".mips_hack_stocg")
+ return parseDirectiveMipsHackStocg();
+
+ if (IDVal == ".mips_hack_elf_flags")
+ return parseDirectiveMipsHackELFFlags();
+
return true;
}
diff --git a/lib/Target/Mips/CMakeLists.txt b/lib/Target/Mips/CMakeLists.txt
index 78a9f70c6681..6acc9a88c06e 100644
--- a/lib/Target/Mips/CMakeLists.txt
+++ b/lib/Target/Mips/CMakeLists.txt
@@ -15,6 +15,7 @@ add_public_tablegen_target(MipsCommonTableGen)
add_llvm_target(MipsCodeGen
Mips16FrameLowering.cpp
+ Mips16HardFloat.cpp
Mips16InstrInfo.cpp
Mips16ISelDAGToDAG.cpp
Mips16ISelLowering.cpp
@@ -46,10 +47,11 @@ add_llvm_target(MipsCodeGen
MipsSelectionDAGInfo.cpp
)
-add_dependencies(LLVMMipsCodeGen intrinsics_gen)
+add_dependencies(LLVMMipsCodeGen MipsCommonTableGen intrinsics_gen)
add_subdirectory(InstPrinter)
add_subdirectory(Disassembler)
add_subdirectory(TargetInfo)
add_subdirectory(MCTargetDesc)
add_subdirectory(AsmParser)
+
diff --git a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
index 0dba33a2767a..60508a8c4fcb 100644
--- a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
+++ b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp
@@ -35,26 +35,33 @@ public:
///
MipsDisassemblerBase(const MCSubtargetInfo &STI, const MCRegisterInfo *Info,
bool bigEndian) :
- MCDisassembler(STI), RegInfo(Info), isBigEndian(bigEndian) {}
+ MCDisassembler(STI), RegInfo(Info),
+ IsN64(STI.getFeatureBits() & Mips::FeatureN64), isBigEndian(bigEndian) {}
virtual ~MipsDisassemblerBase() {}
- const MCRegisterInfo *getRegInfo() const { return RegInfo; }
+ const MCRegisterInfo *getRegInfo() const { return RegInfo.get(); }
+
+ bool isN64() const { return IsN64; }
private:
- const MCRegisterInfo *RegInfo;
+ OwningPtr<const MCRegisterInfo> RegInfo;
+ bool IsN64;
protected:
bool isBigEndian;
};
/// MipsDisassembler - a disasembler class for Mips32.
class MipsDisassembler : public MipsDisassemblerBase {
+ bool IsMicroMips;
public:
/// Constructor - Initializes the disassembler.
///
MipsDisassembler(const MCSubtargetInfo &STI, const MCRegisterInfo *Info,
bool bigEndian) :
- MipsDisassemblerBase(STI, Info, bigEndian) {}
+ MipsDisassemblerBase(STI, Info, bigEndian) {
+ IsMicroMips = STI.getFeatureBits() & Mips::FeatureMicroMips;
+ }
/// getInstruction - See MCDisassembler.
virtual DecodeStatus getInstruction(MCInst &instr,
@@ -88,25 +95,30 @@ public:
// Forward declare these because the autogenerated code will reference them.
// Definitions are further down.
-static DecodeStatus DecodeCPU64RegsRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder);
+static DecodeStatus DecodeGPR64RegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
static DecodeStatus DecodeCPU16RegsRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeCPURegsRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder);
+static DecodeStatus DecodeGPR32RegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
-static DecodeStatus DecodeDSPRegsRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder);
+static DecodeStatus DecodePtrRegisterClass(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeDSPRRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
static DecodeStatus DecodeFGR64RegisterClass(MCInst &Inst,
unsigned RegNo,
@@ -118,11 +130,21 @@ static DecodeStatus DecodeFGR32RegisterClass(MCInst &Inst,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeFGRH32RegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeCCRRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeFCCRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeHWRegsRegisterClass(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -133,47 +155,88 @@ static DecodeStatus DecodeAFGR64RegisterClass(MCInst &Inst,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeHWRegs64RegisterClass(MCInst &Inst,
- unsigned Insn,
+static DecodeStatus DecodeACC64DSPRegisterClass(MCInst &Inst,
+ unsigned RegNo,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeACRegsDSPRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder);
+static DecodeStatus DecodeHI32DSPRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
-static DecodeStatus DecodeHIRegsDSPRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder);
+static DecodeStatus DecodeLO32DSPRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
-static DecodeStatus DecodeLORegsDSPRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder);
+static DecodeStatus DecodeMSA128BRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeMSA128HRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeMSA128WRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeMSA128DRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeMSACtrlRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder);
static DecodeStatus DecodeBranchTarget(MCInst &Inst,
unsigned Offset,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeBC1(MCInst &Inst,
- unsigned Insn,
- uint64_t Address,
- const void *Decoder);
-
-
static DecodeStatus DecodeJumpTarget(MCInst &Inst,
unsigned Insn,
uint64_t Address,
const void *Decoder);
+// DecodeBranchTargetMM - Decode microMIPS branch offset, which is
+// shifted left by 1 bit.
+static DecodeStatus DecodeBranchTargetMM(MCInst &Inst,
+ unsigned Offset,
+ uint64_t Address,
+ const void *Decoder);
+
+// DecodeJumpTargetMM - Decode microMIPS jump target, which is
+// shifted left by 1 bit.
+static DecodeStatus DecodeJumpTargetMM(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeMem(MCInst &Inst,
unsigned Insn,
uint64_t Address,
const void *Decoder);
+static DecodeStatus DecodeMSA128Mem(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder);
+
+static DecodeStatus DecodeMemMMImm12(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeMemMMImm16(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
+
static DecodeStatus DecodeFMem(MCInst &Inst, unsigned Insn,
uint64_t Address,
const void *Decoder);
@@ -183,10 +246,12 @@ static DecodeStatus DecodeSimm16(MCInst &Inst,
uint64_t Address,
const void *Decoder);
-static DecodeStatus DecodeCondCode(MCInst &Inst,
- unsigned Insn,
- uint64_t Address,
- const void *Decoder);
+// Decode the immediate field of an LSA instruction which
+// is off by one.
+static DecodeStatus DecodeLSAImm(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder);
static DecodeStatus DecodeInsSize(MCInst &Inst,
unsigned Insn,
@@ -248,11 +313,12 @@ static DecodeStatus readInstruction32(const MemoryObject &region,
uint64_t address,
uint64_t &size,
uint32_t &insn,
- bool isBigEndian) {
+ bool isBigEndian,
+ bool IsMicroMips) {
uint8_t Bytes[4];
// We want to read exactly 4 Bytes of data.
- if (region.readBytes(address, 4, (uint8_t*)Bytes, NULL) == -1) {
+ if (region.readBytes(address, 4, Bytes) == -1) {
size = 0;
return MCDisassembler::Fail;
}
@@ -266,10 +332,20 @@ static DecodeStatus readInstruction32(const MemoryObject &region,
}
else {
// Encoded as a small-endian 32-bit word in the stream.
- insn = (Bytes[0] << 0) |
- (Bytes[1] << 8) |
- (Bytes[2] << 16) |
- (Bytes[3] << 24);
+ // Little-endian byte ordering:
+ // mips32r2: 4 | 3 | 2 | 1
+ // microMIPS: 2 | 1 | 4 | 3
+ if (IsMicroMips) {
+ insn = (Bytes[2] << 0) |
+ (Bytes[3] << 8) |
+ (Bytes[0] << 16) |
+ (Bytes[1] << 24);
+ } else {
+ insn = (Bytes[0] << 0) |
+ (Bytes[1] << 8) |
+ (Bytes[2] << 16) |
+ (Bytes[3] << 24);
+ }
}
return MCDisassembler::Success;
@@ -285,10 +361,21 @@ MipsDisassembler::getInstruction(MCInst &instr,
uint32_t Insn;
DecodeStatus Result = readInstruction32(Region, Address, Size,
- Insn, isBigEndian);
+ Insn, isBigEndian, IsMicroMips);
if (Result == MCDisassembler::Fail)
return MCDisassembler::Fail;
+ if (IsMicroMips) {
+ // Calling the auto-generated decoder function.
+ Result = decodeInstruction(DecoderTableMicroMips32, instr, Insn, Address,
+ this, STI);
+ if (Result != MCDisassembler::Fail) {
+ Size = 4;
+ return Result;
+ }
+ return MCDisassembler::Fail;
+ }
+
// Calling the auto-generated decoder function.
Result = decodeInstruction(DecoderTableMips32, instr, Insn, Address,
this, STI);
@@ -310,7 +397,7 @@ Mips64Disassembler::getInstruction(MCInst &instr,
uint32_t Insn;
DecodeStatus Result = readInstruction32(Region, Address, Size,
- Insn, isBigEndian);
+ Insn, isBigEndian, false);
if (Result == MCDisassembler::Fail)
return MCDisassembler::Fail;
@@ -346,35 +433,45 @@ static DecodeStatus DecodeCPU16RegsRegisterClass(MCInst &Inst,
}
-static DecodeStatus DecodeCPU64RegsRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder) {
+static DecodeStatus DecodeGPR64RegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
if (RegNo > 31)
return MCDisassembler::Fail;
- unsigned Reg = getReg(Decoder, Mips::CPU64RegsRegClassID, RegNo);
+ unsigned Reg = getReg(Decoder, Mips::GPR64RegClassID, RegNo);
Inst.addOperand(MCOperand::CreateReg(Reg));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeCPURegsRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder) {
+static DecodeStatus DecodeGPR32RegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
if (RegNo > 31)
return MCDisassembler::Fail;
- unsigned Reg = getReg(Decoder, Mips::CPURegsRegClassID, RegNo);
+ unsigned Reg = getReg(Decoder, Mips::GPR32RegClassID, RegNo);
Inst.addOperand(MCOperand::CreateReg(Reg));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeDSPRegsRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder) {
- return DecodeCPURegsRegisterClass(Inst, RegNo, Address, Decoder);
+static DecodeStatus DecodePtrRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (static_cast<const MipsDisassembler *>(Decoder)->isN64())
+ return DecodeGPR64RegisterClass(Inst, RegNo, Address, Decoder);
+
+ return DecodeGPR32RegisterClass(Inst, RegNo, Address, Decoder);
+}
+
+static DecodeStatus DecodeDSPRRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return DecodeGPR32RegisterClass(Inst, RegNo, Address, Decoder);
}
static DecodeStatus DecodeFGR64RegisterClass(MCInst &Inst,
@@ -401,11 +498,37 @@ static DecodeStatus DecodeFGR32RegisterClass(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeFGRH32RegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+
+ unsigned Reg = getReg(Decoder, Mips::FGRH32RegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeCCRRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
const void *Decoder) {
- Inst.addOperand(MCOperand::CreateReg(RegNo));
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+ unsigned Reg = getReg(Decoder, Mips::CCRRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeFCCRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 7)
+ return MCDisassembler::Fail;
+ unsigned Reg = getReg(Decoder, Mips::FCCRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
return MCDisassembler::Success;
}
@@ -417,8 +540,8 @@ static DecodeStatus DecodeMem(MCInst &Inst,
unsigned Reg = fieldFromInstruction(Insn, 16, 5);
unsigned Base = fieldFromInstruction(Insn, 21, 5);
- Reg = getReg(Decoder, Mips::CPURegsRegClassID, Reg);
- Base = getReg(Decoder, Mips::CPURegsRegClassID, Base);
+ Reg = getReg(Decoder, Mips::GPR32RegClassID, Reg);
+ Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
if(Inst.getOpcode() == Mips::SC){
Inst.addOperand(MCOperand::CreateReg(Reg));
@@ -431,6 +554,58 @@ static DecodeStatus DecodeMem(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeMSA128Mem(MCInst &Inst, unsigned Insn,
+ uint64_t Address, const void *Decoder) {
+ int Offset = SignExtend32<10>(fieldFromInstruction(Insn, 16, 10));
+ unsigned Reg = fieldFromInstruction(Insn, 6, 5);
+ unsigned Base = fieldFromInstruction(Insn, 11, 5);
+
+ Reg = getReg(Decoder, Mips::MSA128BRegClassID, Reg);
+ Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
+
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ Inst.addOperand(MCOperand::CreateReg(Base));
+ Inst.addOperand(MCOperand::CreateImm(Offset));
+
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeMemMMImm12(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ int Offset = SignExtend32<12>(Insn & 0x0fff);
+ unsigned Reg = fieldFromInstruction(Insn, 21, 5);
+ unsigned Base = fieldFromInstruction(Insn, 16, 5);
+
+ Reg = getReg(Decoder, Mips::GPR32RegClassID, Reg);
+ Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
+
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ Inst.addOperand(MCOperand::CreateReg(Base));
+ Inst.addOperand(MCOperand::CreateImm(Offset));
+
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeMemMMImm16(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ int Offset = SignExtend32<16>(Insn & 0xffff);
+ unsigned Reg = fieldFromInstruction(Insn, 21, 5);
+ unsigned Base = fieldFromInstruction(Insn, 16, 5);
+
+ Reg = getReg(Decoder, Mips::GPR32RegClassID, Reg);
+ Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
+
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ Inst.addOperand(MCOperand::CreateReg(Base));
+ Inst.addOperand(MCOperand::CreateImm(Offset));
+
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeFMem(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -440,7 +615,7 @@ static DecodeStatus DecodeFMem(MCInst &Inst,
unsigned Base = fieldFromInstruction(Insn, 21, 5);
Reg = getReg(Decoder, Mips::FGR64RegClassID, Reg);
- Base = getReg(Decoder, Mips::CPURegsRegClassID, Base);
+ Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
Inst.addOperand(MCOperand::CreateReg(Reg));
Inst.addOperand(MCOperand::CreateReg(Base));
@@ -461,15 +636,6 @@ static DecodeStatus DecodeHWRegsRegisterClass(MCInst &Inst,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeCondCode(MCInst &Inst,
- unsigned Insn,
- uint64_t Address,
- const void *Decoder) {
- int CondCode = Insn & 0xf;
- Inst.addOperand(MCOperand::CreateImm(CondCode));
- return MCDisassembler::Success;
-}
-
static DecodeStatus DecodeAFGR64RegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
@@ -483,49 +649,98 @@ static DecodeStatus DecodeAFGR64RegisterClass(MCInst &Inst,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeHWRegs64RegisterClass(MCInst &Inst,
+static DecodeStatus DecodeACC64DSPRegisterClass(MCInst &Inst,
unsigned RegNo,
uint64_t Address,
const void *Decoder) {
- //Currently only hardware register 29 is supported
- if (RegNo != 29)
- return MCDisassembler::Fail;
- Inst.addOperand(MCOperand::CreateReg(Mips::HWR29_64));
+ if (RegNo >= 4)
+ return MCDisassembler::Fail;
+
+ unsigned Reg = getReg(Decoder, Mips::ACC64DSPRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeACRegsDSPRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder) {
+static DecodeStatus DecodeHI32DSPRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
if (RegNo >= 4)
return MCDisassembler::Fail;
- unsigned Reg = getReg(Decoder, Mips::ACRegsDSPRegClassID, RegNo);
+ unsigned Reg = getReg(Decoder, Mips::HI32DSPRegClassID, RegNo);
Inst.addOperand(MCOperand::CreateReg(Reg));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeHIRegsDSPRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder) {
+static DecodeStatus DecodeLO32DSPRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
if (RegNo >= 4)
return MCDisassembler::Fail;
- unsigned Reg = getReg(Decoder, Mips::HIRegsDSPRegClassID, RegNo);
+ unsigned Reg = getReg(Decoder, Mips::LO32DSPRegClassID, RegNo);
Inst.addOperand(MCOperand::CreateReg(Reg));
return MCDisassembler::Success;
}
-static DecodeStatus DecodeLORegsDSPRegisterClass(MCInst &Inst,
- unsigned RegNo,
- uint64_t Address,
- const void *Decoder) {
- if (RegNo >= 4)
+static DecodeStatus DecodeMSA128BRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+
+ unsigned Reg = getReg(Decoder, Mips::MSA128BRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeMSA128HRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+
+ unsigned Reg = getReg(Decoder, Mips::MSA128HRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeMSA128WRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+
+ unsigned Reg = getReg(Decoder, Mips::MSA128WRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeMSA128DRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+
+ unsigned Reg = getReg(Decoder, Mips::MSA128DRegClassID, RegNo);
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeMSACtrlRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 7)
return MCDisassembler::Fail;
- unsigned Reg = getReg(Decoder, Mips::LORegsDSPRegClassID, RegNo);
+ unsigned Reg = getReg(Decoder, Mips::MSACtrlRegClassID, RegNo);
Inst.addOperand(MCOperand::CreateReg(Reg));
return MCDisassembler::Success;
}
@@ -540,16 +755,6 @@ static DecodeStatus DecodeBranchTarget(MCInst &Inst,
return MCDisassembler::Success;
}
-static DecodeStatus DecodeBC1(MCInst &Inst,
- unsigned Insn,
- uint64_t Address,
- const void *Decoder) {
- unsigned BranchOffset = Insn & 0xffff;
- BranchOffset = SignExtend32<18>(BranchOffset << 2) + 4;
- Inst.addOperand(MCOperand::CreateImm(BranchOffset));
- return MCDisassembler::Success;
-}
-
static DecodeStatus DecodeJumpTarget(MCInst &Inst,
unsigned Insn,
uint64_t Address,
@@ -560,6 +765,24 @@ static DecodeStatus DecodeJumpTarget(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeBranchTargetMM(MCInst &Inst,
+ unsigned Offset,
+ uint64_t Address,
+ const void *Decoder) {
+ unsigned BranchOffset = Offset & 0xffff;
+ BranchOffset = SignExtend32<18>(BranchOffset << 1);
+ Inst.addOperand(MCOperand::CreateImm(BranchOffset));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeJumpTargetMM(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ unsigned JumpOffset = fieldFromInstruction(Insn, 0, 26) << 1;
+ Inst.addOperand(MCOperand::CreateImm(JumpOffset));
+ return MCDisassembler::Success;
+}
static DecodeStatus DecodeSimm16(MCInst &Inst,
unsigned Insn,
@@ -569,6 +792,15 @@ static DecodeStatus DecodeSimm16(MCInst &Inst,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeLSAImm(MCInst &Inst,
+ unsigned Insn,
+ uint64_t Address,
+ const void *Decoder) {
+ // We add one to the immediate field as it was encoded as 'imm - 1'.
+ Inst.addOperand(MCOperand::CreateImm(Insn + 1));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeInsSize(MCInst &Inst,
unsigned Insn,
uint64_t Address,
diff --git a/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp b/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
index fc23cd380352..78845898997c 100644
--- a/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
+++ b/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp
@@ -26,6 +26,12 @@ using namespace llvm;
#define PRINT_ALIAS_INSTR
#include "MipsGenAsmWriter.inc"
+template<unsigned R>
+static bool isReg(const MCInst &MI, unsigned OpNo) {
+ assert(MI.getOperand(OpNo).isReg() && "Register operand expected.");
+ return MI.getOperand(OpNo).getReg() == R;
+}
+
const char* Mips::MipsFCCToString(Mips::CondCode CC) {
switch (CC) {
case FCOND_F:
@@ -80,7 +86,7 @@ void MipsInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
}
// Try to print any aliases first.
- if (!printAliasInstr(MI, O))
+ if (!printAliasInstr(MI, O) && !printAlias(*MI, O))
printInstruction(MI, O);
printAnnotation(O, Annot);
@@ -152,11 +158,6 @@ static void printExpr(const MCExpr *Expr, raw_ostream &OS) {
OS << ')';
}
-void MipsInstPrinter::printCPURegs(const MCInst *MI, unsigned OpNo,
- raw_ostream &O) {
- printRegName(O, MI->getOperand(OpNo).getReg());
-}
-
void MipsInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
const MCOperand &Op = MI->getOperand(OpNo);
@@ -183,6 +184,15 @@ void MipsInstPrinter::printUnsignedImm(const MCInst *MI, int opNum,
printOperand(MI, opNum, O);
}
+void MipsInstPrinter::printUnsignedImm8(const MCInst *MI, int opNum,
+ raw_ostream &O) {
+ const MCOperand &MO = MI->getOperand(opNum);
+ if (MO.isImm())
+ O << (unsigned short int)(unsigned char)MO.getImm();
+ else
+ printOperand(MI, opNum, O);
+}
+
void MipsInstPrinter::
printMemOperand(const MCInst *MI, int opNum, raw_ostream &O) {
// Load/Store memory operands -- imm($reg)
@@ -209,3 +219,70 @@ printFCCOperand(const MCInst *MI, int opNum, raw_ostream &O) {
const MCOperand& MO = MI->getOperand(opNum);
O << MipsFCCToString((Mips::CondCode)MO.getImm());
}
+
+void MipsInstPrinter::
+printSHFMask(const MCInst *MI, int opNum, raw_ostream &O) {
+ llvm_unreachable("TODO");
+}
+
+bool MipsInstPrinter::printAlias(const char *Str, const MCInst &MI,
+ unsigned OpNo, raw_ostream &OS) {
+ OS << "\t" << Str << "\t";
+ printOperand(&MI, OpNo, OS);
+ return true;
+}
+
+bool MipsInstPrinter::printAlias(const char *Str, const MCInst &MI,
+ unsigned OpNo0, unsigned OpNo1,
+ raw_ostream &OS) {
+ printAlias(Str, MI, OpNo0, OS);
+ OS << ", ";
+ printOperand(&MI, OpNo1, OS);
+ return true;
+}
+
+bool MipsInstPrinter::printAlias(const MCInst &MI, raw_ostream &OS) {
+ switch (MI.getOpcode()) {
+ case Mips::BEQ:
+ // beq $zero, $zero, $L2 => b $L2
+ // beq $r0, $zero, $L2 => beqz $r0, $L2
+ return (isReg<Mips::ZERO>(MI, 0) && isReg<Mips::ZERO>(MI, 1) &&
+ printAlias("b", MI, 2, OS)) ||
+ (isReg<Mips::ZERO>(MI, 1) && printAlias("beqz", MI, 0, 2, OS));
+ case Mips::BEQ64:
+ // beq $r0, $zero, $L2 => beqz $r0, $L2
+ return isReg<Mips::ZERO_64>(MI, 1) && printAlias("beqz", MI, 0, 2, OS);
+ case Mips::BNE:
+ // bne $r0, $zero, $L2 => bnez $r0, $L2
+ return isReg<Mips::ZERO>(MI, 1) && printAlias("bnez", MI, 0, 2, OS);
+ case Mips::BNE64:
+ // bne $r0, $zero, $L2 => bnez $r0, $L2
+ return isReg<Mips::ZERO_64>(MI, 1) && printAlias("bnez", MI, 0, 2, OS);
+ case Mips::BGEZAL:
+ // bgezal $zero, $L1 => bal $L1
+ return isReg<Mips::ZERO>(MI, 0) && printAlias("bal", MI, 1, OS);
+ case Mips::BC1T:
+ // bc1t $fcc0, $L1 => bc1t $L1
+ return isReg<Mips::FCC0>(MI, 0) && printAlias("bc1t", MI, 1, OS);
+ case Mips::BC1F:
+ // bc1f $fcc0, $L1 => bc1f $L1
+ return isReg<Mips::FCC0>(MI, 0) && printAlias("bc1f", MI, 1, OS);
+ case Mips::JALR:
+ // jalr $ra, $r1 => jalr $r1
+ return isReg<Mips::RA>(MI, 0) && printAlias("jalr", MI, 1, OS);
+ case Mips::JALR64:
+ // jalr $ra, $r1 => jalr $r1
+ return isReg<Mips::RA_64>(MI, 0) && printAlias("jalr", MI, 1, OS);
+ case Mips::NOR:
+ case Mips::NOR_MM:
+ // nor $r0, $r1, $zero => not $r0, $r1
+ return isReg<Mips::ZERO>(MI, 2) && printAlias("not", MI, 0, 1, OS);
+ case Mips::NOR64:
+ // nor $r0, $r1, $zero => not $r0, $r1
+ return isReg<Mips::ZERO_64>(MI, 2) && printAlias("not", MI, 0, 1, OS);
+ case Mips::OR:
+ // or $r0, $r1, $zero => move $r0, $r1
+ return isReg<Mips::ZERO>(MI, 2) && printAlias("move", MI, 0, 1, OS);
+ default: return false;
+ }
+}
diff --git a/lib/Target/Mips/InstPrinter/MipsInstPrinter.h b/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
index d1b561f9764e..f75ae249c3ee 100644
--- a/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
+++ b/lib/Target/Mips/InstPrinter/MipsInstPrinter.h
@@ -87,16 +87,23 @@ public:
virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
- void printCPURegs(const MCInst *MI, unsigned OpNo, raw_ostream &O);
bool printAliasInstr(const MCInst *MI, raw_ostream &OS);
private:
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printUnsignedImm(const MCInst *MI, int opNum, raw_ostream &O);
+ void printUnsignedImm8(const MCInst *MI, int opNum, raw_ostream &O);
void printMemOperand(const MCInst *MI, int opNum, raw_ostream &O);
void printMemOperandEA(const MCInst *MI, int opNum, raw_ostream &O);
void printFCCOperand(const MCInst *MI, int opNum, raw_ostream &O);
+ void printSHFMask(const MCInst *MI, int opNum, raw_ostream &O);
+
+ bool printAlias(const char *Str, const MCInst &MI, unsigned OpNo,
+ raw_ostream &OS);
+ bool printAlias(const char *Str, const MCInst &MI, unsigned OpNo0,
+ unsigned OpNo1, raw_ostream &OS);
+ bool printAlias(const MCInst &MI, raw_ostream &OS);
};
} // end namespace llvm
diff --git a/lib/Target/Mips/MCTargetDesc/CMakeLists.txt b/lib/Target/Mips/MCTargetDesc/CMakeLists.txt
index 4212c94a5578..911674890c73 100644
--- a/lib/Target/Mips/MCTargetDesc/CMakeLists.txt
+++ b/lib/Target/Mips/MCTargetDesc/CMakeLists.txt
@@ -1,12 +1,11 @@
add_llvm_library(LLVMMipsDesc
MipsAsmBackend.cpp
- MipsDirectObjLower.cpp
MipsMCAsmInfo.cpp
MipsMCCodeEmitter.cpp
MipsMCTargetDesc.cpp
MipsELFObjectWriter.cpp
MipsReginfo.cpp
- MipsELFStreamer.cpp
+ MipsTargetStreamer.cpp
)
add_dependencies(LLVMMipsDesc MipsCommonTableGen)
diff --git a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
index 0b13607a572d..3e70b23dccc6 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
@@ -45,6 +45,10 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
case Mips::fixup_Mips_GOT_DISP:
case Mips::fixup_Mips_GOT_LO16:
case Mips::fixup_Mips_CALL_LO16:
+ case Mips::fixup_MICROMIPS_LO16:
+ case Mips::fixup_MICROMIPS_GOT_PAGE:
+ case Mips::fixup_MICROMIPS_GOT_OFST:
+ case Mips::fixup_MICROMIPS_GOT_DISP:
break;
case Mips::fixup_Mips_PC16:
// So far we are only using this type for branches.
@@ -65,6 +69,7 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
case Mips::fixup_Mips_GOT_Local:
case Mips::fixup_Mips_GOT_HI16:
case Mips::fixup_Mips_CALL_HI16:
+ case Mips::fixup_MICROMIPS_HI16:
// Get the 2nd 16-bits. Also add 1 if bit 15 is 1.
Value = ((Value + 0x8000) >> 16) & 0xffff;
break;
@@ -76,6 +81,13 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
// Get the 4th 16-bits.
Value = ((Value + 0x800080008000LL) >> 48) & 0xffff;
break;
+ case Mips::fixup_MICROMIPS_26_S1:
+ Value >>= 1;
+ break;
+ case Mips::fixup_MICROMIPS_PC16_S1:
+ Value -= 4;
+ Value >>= 1;
+ break;
}
return Value;
@@ -188,7 +200,20 @@ public:
{ "fixup_Mips_GOT_HI16", 0, 16, 0 },
{ "fixup_Mips_GOT_LO16", 0, 16, 0 },
{ "fixup_Mips_CALL_HI16", 0, 16, 0 },
- { "fixup_Mips_CALL_LO16", 0, 16, 0 }
+ { "fixup_Mips_CALL_LO16", 0, 16, 0 },
+ { "fixup_MICROMIPS_26_S1", 0, 26, 0 },
+ { "fixup_MICROMIPS_HI16", 0, 16, 0 },
+ { "fixup_MICROMIPS_LO16", 0, 16, 0 },
+ { "fixup_MICROMIPS_GOT16", 0, 16, 0 },
+ { "fixup_MICROMIPS_PC16_S1", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_MICROMIPS_CALL16", 0, 16, 0 },
+ { "fixup_MICROMIPS_GOT_DISP", 0, 16, 0 },
+ { "fixup_MICROMIPS_GOT_PAGE", 0, 16, 0 },
+ { "fixup_MICROMIPS_GOT_OFST", 0, 16, 0 },
+ { "fixup_MICROMIPS_TLS_DTPREL_HI16", 0, 16, 0 },
+ { "fixup_MICROMIPS_TLS_DTPREL_LO16", 0, 16, 0 },
+ { "fixup_MICROMIPS_TLS_TPREL_HI16", 0, 16, 0 },
+ { "fixup_MICROMIPS_TLS_TPREL_LO16", 0, 16, 0 }
};
if (Kind < FirstTargetFixupKind)
@@ -253,25 +278,33 @@ public:
} // namespace
// MCAsmBackend
-MCAsmBackend *llvm::createMipsAsmBackendEL32(const Target &T, StringRef TT,
+MCAsmBackend *llvm::createMipsAsmBackendEL32(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT,
StringRef CPU) {
return new MipsAsmBackend(T, Triple(TT).getOS(),
/*IsLittle*/true, /*Is64Bit*/false);
}
-MCAsmBackend *llvm::createMipsAsmBackendEB32(const Target &T, StringRef TT,
+MCAsmBackend *llvm::createMipsAsmBackendEB32(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT,
StringRef CPU) {
return new MipsAsmBackend(T, Triple(TT).getOS(),
/*IsLittle*/false, /*Is64Bit*/false);
}
-MCAsmBackend *llvm::createMipsAsmBackendEL64(const Target &T, StringRef TT,
+MCAsmBackend *llvm::createMipsAsmBackendEL64(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT,
StringRef CPU) {
return new MipsAsmBackend(T, Triple(TT).getOS(),
/*IsLittle*/true, /*Is64Bit*/true);
}
-MCAsmBackend *llvm::createMipsAsmBackendEB64(const Target &T, StringRef TT,
+MCAsmBackend *llvm::createMipsAsmBackendEB64(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT,
StringRef CPU) {
return new MipsAsmBackend(T, Triple(TT).getOS(),
/*IsLittle*/false, /*Is64Bit*/true);
diff --git a/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.cpp b/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.cpp
deleted file mode 100644
index 15c4282030db..000000000000
--- a/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-//===-- MipsDirectObjLower.cpp - Mips LLVM direct object lowering -----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains code to lower Mips MCInst records that are normally
-// left to the assembler to lower such as large shifts.
-//
-//===----------------------------------------------------------------------===//
-#include "MipsInstrInfo.h"
-#include "MCTargetDesc/MipsDirectObjLower.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCStreamer.h"
-
-using namespace llvm;
-
-// If the D<shift> instruction has a shift amount that is greater
-// than 31 (checked in calling routine), lower it to a D<shift>32 instruction
-void Mips::LowerLargeShift(MCInst& Inst) {
-
- assert(Inst.getNumOperands() == 3 && "Invalid no. of operands for shift!");
- assert(Inst.getOperand(2).isImm());
-
- int64_t Shift = Inst.getOperand(2).getImm();
- if (Shift <= 31)
- return; // Do nothing
- Shift -= 32;
-
- // saminus32
- Inst.getOperand(2).setImm(Shift);
-
- switch (Inst.getOpcode()) {
- default:
- // Calling function is not synchronized
- llvm_unreachable("Unexpected shift instruction");
- case Mips::DSLL:
- Inst.setOpcode(Mips::DSLL32);
- return;
- case Mips::DSRL:
- Inst.setOpcode(Mips::DSRL32);
- return;
- case Mips::DSRA:
- Inst.setOpcode(Mips::DSRA32);
- return;
- }
-}
-
-// Pick a DEXT or DINS instruction variant based on the pos and size operands
-void Mips::LowerDextDins(MCInst& InstIn) {
- int Opcode = InstIn.getOpcode();
-
- if (Opcode == Mips::DEXT)
- assert(InstIn.getNumOperands() == 4 &&
- "Invalid no. of machine operands for DEXT!");
- else // Only DEXT and DINS are possible
- assert(InstIn.getNumOperands() == 5 &&
- "Invalid no. of machine operands for DINS!");
-
- assert(InstIn.getOperand(2).isImm());
- int64_t pos = InstIn.getOperand(2).getImm();
- assert(InstIn.getOperand(3).isImm());
- int64_t size = InstIn.getOperand(3).getImm();
-
- if (size <= 32) {
- if (pos < 32) // DEXT/DINS, do nothing
- return;
- // DEXTU/DINSU
- InstIn.getOperand(2).setImm(pos - 32);
- InstIn.setOpcode((Opcode == Mips::DEXT) ? Mips::DEXTU : Mips::DINSU);
- return;
- }
- // DEXTM/DINSM
- assert(pos < 32 && "DEXT/DINS cannot have both size and pos > 32");
- InstIn.getOperand(3).setImm(size - 32);
- InstIn.setOpcode((Opcode == Mips::DEXT) ? Mips::DEXTM : Mips::DINSM);
- return;
-}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.h b/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.h
deleted file mode 100644
index 8813cc9ac7a4..000000000000
--- a/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.h
+++ /dev/null
@@ -1,28 +0,0 @@
-//===-- MipsDirectObjLower.h - Mips LLVM direct object lowering *- C++ -*--===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef MIPSDIRECTOBJLOWER_H
-#define MIPSDIRECTOBJLOWER_H
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/Compiler.h"
-
-namespace llvm {
- class MCInst;
- class MCStreamer;
-
- namespace Mips {
- /// MipsDirectObjLower - This name space is used to lower MCInstr in cases
- // where the assembler usually finishes the lowering
- // such as large shifts.
- void LowerLargeShift(MCInst &Inst);
- void LowerDextDins(MCInst &Inst);
- }
-}
-
-#endif
diff --git a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
index 6471b51583ce..83c7d4bcc3c6 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
@@ -183,6 +183,45 @@ unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target,
case Mips::fixup_Mips_CALL_LO16:
Type = ELF::R_MIPS_CALL_LO16;
break;
+ case Mips::fixup_MICROMIPS_26_S1:
+ Type = ELF::R_MICROMIPS_26_S1;
+ break;
+ case Mips::fixup_MICROMIPS_HI16:
+ Type = ELF::R_MICROMIPS_HI16;
+ break;
+ case Mips::fixup_MICROMIPS_LO16:
+ Type = ELF::R_MICROMIPS_LO16;
+ break;
+ case Mips::fixup_MICROMIPS_GOT16:
+ Type = ELF::R_MICROMIPS_GOT16;
+ break;
+ case Mips::fixup_MICROMIPS_PC16_S1:
+ Type = ELF::R_MICROMIPS_PC16_S1;
+ break;
+ case Mips::fixup_MICROMIPS_CALL16:
+ Type = ELF::R_MICROMIPS_CALL16;
+ break;
+ case Mips::fixup_MICROMIPS_GOT_DISP:
+ Type = ELF::R_MICROMIPS_GOT_DISP;
+ break;
+ case Mips::fixup_MICROMIPS_GOT_PAGE:
+ Type = ELF::R_MICROMIPS_GOT_PAGE;
+ break;
+ case Mips::fixup_MICROMIPS_GOT_OFST:
+ Type = ELF::R_MICROMIPS_GOT_OFST;
+ break;
+ case Mips::fixup_MICROMIPS_TLS_DTPREL_HI16:
+ Type = ELF::R_MICROMIPS_TLS_DTPREL_HI16;
+ break;
+ case Mips::fixup_MICROMIPS_TLS_DTPREL_LO16:
+ Type = ELF::R_MICROMIPS_TLS_DTPREL_LO16;
+ break;
+ case Mips::fixup_MICROMIPS_TLS_TPREL_HI16:
+ Type = ELF::R_MICROMIPS_TLS_TPREL_HI16;
+ break;
+ case Mips::fixup_MICROMIPS_TLS_TPREL_LO16:
+ Type = ELF::R_MICROMIPS_TLS_TPREL_LO16;
+ break;
}
return Type;
}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp b/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp
deleted file mode 100644
index c33bc9ae3034..000000000000
--- a/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp
+++ /dev/null
@@ -1,89 +0,0 @@
-//===-- MipsELFStreamer.cpp - MipsELFStreamer ---------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===-------------------------------------------------------------------===//
-#include "MCTargetDesc/MipsELFStreamer.h"
-#include "MipsSubtarget.h"
-#include "llvm/MC/MCAssembler.h"
-#include "llvm/MC/MCELF.h"
-#include "llvm/MC/MCELFSymbolFlags.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Support/ELF.h"
-#include "llvm/Support/ErrorHandling.h"
-
-namespace llvm {
-
- MCELFStreamer* createMipsELFStreamer(MCContext &Context, MCAsmBackend &TAB,
- raw_ostream &OS, MCCodeEmitter *Emitter,
- bool RelaxAll, bool NoExecStack) {
- MipsELFStreamer *S = new MipsELFStreamer(Context, TAB, OS, Emitter,
- RelaxAll, NoExecStack);
- return S;
- }
-
- // For llc. Set a group of ELF header flags
- void
- MipsELFStreamer::emitELFHeaderFlagsCG(const MipsSubtarget &Subtarget) {
-
- if (hasRawTextSupport())
- return;
-
- // Update e_header flags
- MCAssembler& MCA = getAssembler();
- unsigned EFlags = MCA.getELFHeaderEFlags();
-
- if (Subtarget.inMips16Mode())
- EFlags |= ELF::EF_MIPS_ARCH_ASE_M16;
- else
- EFlags |= ELF::EF_MIPS_NOREORDER;
-
- // Architecture
- if (Subtarget.hasMips64r2())
- EFlags |= ELF::EF_MIPS_ARCH_64R2;
- else if (Subtarget.hasMips64())
- EFlags |= ELF::EF_MIPS_ARCH_64;
- else if (Subtarget.hasMips32r2())
- EFlags |= ELF::EF_MIPS_ARCH_32R2;
- else
- EFlags |= ELF::EF_MIPS_ARCH_32;
-
- if (Subtarget.inMicroMipsMode())
- EFlags |= ELF::EF_MIPS_MICROMIPS;
-
- // ABI
- if (Subtarget.isABI_O32())
- EFlags |= ELF::EF_MIPS_ABI_O32;
-
- // Relocation Model
- Reloc::Model RM = Subtarget.getRelocationModel();
- if (RM == Reloc::PIC_ || RM == Reloc::Default)
- EFlags |= ELF::EF_MIPS_PIC;
- else if (RM == Reloc::Static)
- ; // Do nothing for Reloc::Static
- else
- llvm_unreachable("Unsupported relocation model for e_flags");
-
- MCA.setELFHeaderEFlags(EFlags);
- }
-
- // For llc. Set a symbol's STO flags
- void
- MipsELFStreamer::emitMipsSTOCG(const MipsSubtarget &Subtarget,
- MCSymbol *Sym,
- unsigned Val) {
-
- if (hasRawTextSupport())
- return;
-
- MCSymbolData &Data = getOrCreateSymbolData(Sym);
- // The "other" values are stored in the last 6 bits of the second byte
- // The traditional defines for STO values assume the full byte and thus
- // the shift to pack it.
- MCELF::setOther(Data, Val >> 2);
- }
-
-} // namespace llvm
diff --git a/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h b/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h
deleted file mode 100644
index b10ccc78e665..000000000000
--- a/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h
+++ /dev/null
@@ -1,43 +0,0 @@
-//=== MipsELFStreamer.h - MipsELFStreamer ------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENCE.TXT for details.
-//
-//===-------------------------------------------------------------------===//
-#ifndef MIPSELFSTREAMER_H_
-#define MIPSELFSTREAMER_H_
-
-#include "llvm/MC/MCELFStreamer.h"
-
-namespace llvm {
-class MipsAsmPrinter;
-class MipsSubtarget;
-class MCSymbol;
-
-class MipsELFStreamer : public MCELFStreamer {
-public:
- MipsELFStreamer(MCContext &Context, MCAsmBackend &TAB,
- raw_ostream &OS, MCCodeEmitter *Emitter,
- bool RelaxAll, bool NoExecStack)
- : MCELFStreamer(SK_MipsELFStreamer, Context, TAB, OS, Emitter) {
- }
-
- ~MipsELFStreamer() {}
- void emitELFHeaderFlagsCG(const MipsSubtarget &Subtarget);
- void emitMipsSTOCG(const MipsSubtarget &Subtarget,
- MCSymbol *Sym,
- unsigned Val);
-
- static bool classof(const MCStreamer *S) {
- return S->getKind() == SK_MipsELFStreamer;
- }
-};
-
- MCELFStreamer* createMipsELFStreamer(MCContext &Context, MCAsmBackend &TAB,
- raw_ostream &OS, MCCodeEmitter *Emitter,
- bool RelaxAll, bool NoExecStack);
-}
-
-#endif /* MIPSELFSTREAMER_H_ */
diff --git a/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h b/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
index f96390043a3b..6ed44b74cc4b 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h
@@ -128,6 +128,45 @@ namespace Mips {
// resulting in - R_MIPS_CALL_LO16
fixup_Mips_CALL_LO16,
+ // resulting in - R_MICROMIPS_26_S1
+ fixup_MICROMIPS_26_S1,
+
+ // resulting in - R_MICROMIPS_HI16
+ fixup_MICROMIPS_HI16,
+
+ // resulting in - R_MICROMIPS_LO16
+ fixup_MICROMIPS_LO16,
+
+ // resulting in - R_MICROMIPS_GOT16
+ fixup_MICROMIPS_GOT16,
+
+ // resulting in - R_MICROMIPS_PC16_S1
+ fixup_MICROMIPS_PC16_S1,
+
+ // resulting in - R_MICROMIPS_CALL16
+ fixup_MICROMIPS_CALL16,
+
+ // resulting in - R_MICROMIPS_GOT_DISP
+ fixup_MICROMIPS_GOT_DISP,
+
+ // resulting in - R_MICROMIPS_GOT_PAGE
+ fixup_MICROMIPS_GOT_PAGE,
+
+ // resulting in - R_MICROMIPS_GOT_OFST
+ fixup_MICROMIPS_GOT_OFST,
+
+ // resulting in - R_MICROMIPS_TLS_DTPREL_HI16
+ fixup_MICROMIPS_TLS_DTPREL_HI16,
+
+ // resulting in - R_MICROMIPS_TLS_DTPREL_LO16
+ fixup_MICROMIPS_TLS_DTPREL_LO16,
+
+ // resulting in - R_MICROMIPS_TLS_TPREL_HI16
+ fixup_MICROMIPS_TLS_TPREL_HI16,
+
+ // resulting in - R_MICROMIPS_TLS_TPREL_LO16
+ fixup_MICROMIPS_TLS_TPREL_LO16,
+
// Marker
LastTargetFixupKind,
NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
index 5d4b32d30578..6aa3c762d9db 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
@@ -18,7 +18,7 @@ using namespace llvm;
void MipsMCAsmInfo::anchor() { }
-MipsMCAsmInfo::MipsMCAsmInfo(const Target &T, StringRef TT) {
+MipsMCAsmInfo::MipsMCAsmInfo(StringRef TT) {
Triple TheTriple(TT);
if ((TheTriple.getArch() == Triple::mips) ||
(TheTriple.getArch() == Triple::mips64))
@@ -38,7 +38,6 @@ MipsMCAsmInfo::MipsMCAsmInfo(const Target &T, StringRef TT) {
ZeroDirective = "\t.space\t";
GPRel32Directive = "\t.gpword\t";
GPRel64Directive = "\t.gpdword\t";
- WeakRefDirective = "\t.weak\t";
DebugLabelSuffix = "=.";
SupportsDebugInformation = true;
ExceptionsType = ExceptionHandling::DwarfCFI;
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h
index e1d878936f31..1000113351b4 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h
@@ -14,16 +14,15 @@
#ifndef MIPSTARGETASMINFO_H
#define MIPSTARGETASMINFO_H
-#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCAsmInfoELF.h"
namespace llvm {
class StringRef;
- class Target;
- class MipsMCAsmInfo : public MCAsmInfo {
+ class MipsMCAsmInfo : public MCAsmInfoELF {
virtual void anchor();
public:
- explicit MipsMCAsmInfo(const Target &T, StringRef TT);
+ explicit MipsMCAsmInfo(StringRef TT);
};
} // namespace llvm
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
index 9460731c1914..66428bdfa747 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp
@@ -13,7 +13,6 @@
//
#define DEBUG_TYPE "mccodeemitter"
#include "MCTargetDesc/MipsBaseInfo.h"
-#include "MCTargetDesc/MipsDirectObjLower.h"
#include "MCTargetDesc/MipsFixupKinds.h"
#include "MCTargetDesc/MipsMCTargetDesc.h"
#include "llvm/ADT/APFloat.h"
@@ -40,11 +39,14 @@ class MipsMCCodeEmitter : public MCCodeEmitter {
MCContext &Ctx;
const MCSubtargetInfo &STI;
bool IsLittleEndian;
+ bool IsMicroMips;
public:
MipsMCCodeEmitter(const MCInstrInfo &mcii, MCContext &Ctx_,
const MCSubtargetInfo &sti, bool IsLittle) :
- MCII(mcii), Ctx(Ctx_), STI (sti), IsLittleEndian(IsLittle) {}
+ MCII(mcii), Ctx(Ctx_), STI (sti), IsLittleEndian(IsLittle) {
+ IsMicroMips = STI.getFeatureBits() & Mips::FeatureMicroMips;
+ }
~MipsMCCodeEmitter() {}
@@ -54,9 +56,17 @@ public:
void EmitInstruction(uint64_t Val, unsigned Size, raw_ostream &OS) const {
// Output the instruction encoding in little endian byte order.
- for (unsigned i = 0; i < Size; ++i) {
- unsigned Shift = IsLittleEndian ? i * 8 : (Size - 1 - i) * 8;
- EmitByte((Val >> Shift) & 0xff, OS);
+ // Little-endian byte ordering:
+ // mips32r2: 4 | 3 | 2 | 1
+ // microMIPS: 2 | 1 | 4 | 3
+ if (IsLittleEndian && Size == 4 && IsMicroMips) {
+ EmitInstruction(Val>>16, 2, OS);
+ EmitInstruction(Val, 2, OS);
+ } else {
+ for (unsigned i = 0; i < Size; ++i) {
+ unsigned Shift = IsLittleEndian ? i * 8 : (Size - 1 - i) * 8;
+ EmitByte((Val >> Shift) & 0xff, OS);
+ }
}
}
@@ -74,12 +84,24 @@ public:
unsigned getJumpTargetOpValue(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const;
+ // getBranchJumpOpValueMM - Return binary encoding of the microMIPS jump
+ // target operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getJumpTargetOpValueMM(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
// getBranchTargetOpValue - Return binary encoding of the branch
// target operand. If the machine operand requires relocation,
// record the relocation and return zero.
unsigned getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const;
+ // getBranchTargetOpValue - Return binary encoding of the microMIPS branch
+ // target operand. If the machine operand requires relocation,
+ // record the relocation and return zero.
+ unsigned getBranchTargetOpValueMM(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
// getMachineOpValue - Return binary encoding of operand. If the machin
// operand requires relocation, record the relocation and return zero.
unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
@@ -87,11 +109,17 @@ public:
unsigned getMemEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getMemEncodingMMImm12(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
unsigned getSizeExtEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const;
unsigned getSizeInsEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const;
+ // getLSAImmEncoding - Return binary encoding of LSA immediate.
+ unsigned getLSAImmEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
unsigned
getExprOpValue(const MCExpr *Expr,SmallVectorImpl<MCFixup> &Fixups) const;
@@ -114,8 +142,74 @@ MCCodeEmitter *llvm::createMipsMCCodeEmitterEL(const MCInstrInfo &MCII,
return new MipsMCCodeEmitter(MCII, Ctx, STI, true);
}
+
+// If the D<shift> instruction has a shift amount that is greater
+// than 31 (checked in calling routine), lower it to a D<shift>32 instruction
+static void LowerLargeShift(MCInst& Inst) {
+
+ assert(Inst.getNumOperands() == 3 && "Invalid no. of operands for shift!");
+ assert(Inst.getOperand(2).isImm());
+
+ int64_t Shift = Inst.getOperand(2).getImm();
+ if (Shift <= 31)
+ return; // Do nothing
+ Shift -= 32;
+
+ // saminus32
+ Inst.getOperand(2).setImm(Shift);
+
+ switch (Inst.getOpcode()) {
+ default:
+ // Calling function is not synchronized
+ llvm_unreachable("Unexpected shift instruction");
+ case Mips::DSLL:
+ Inst.setOpcode(Mips::DSLL32);
+ return;
+ case Mips::DSRL:
+ Inst.setOpcode(Mips::DSRL32);
+ return;
+ case Mips::DSRA:
+ Inst.setOpcode(Mips::DSRA32);
+ return;
+ case Mips::DROTR:
+ Inst.setOpcode(Mips::DROTR32);
+ return;
+ }
+}
+
+// Pick a DEXT or DINS instruction variant based on the pos and size operands
+static void LowerDextDins(MCInst& InstIn) {
+ int Opcode = InstIn.getOpcode();
+
+ if (Opcode == Mips::DEXT)
+ assert(InstIn.getNumOperands() == 4 &&
+ "Invalid no. of machine operands for DEXT!");
+ else // Only DEXT and DINS are possible
+ assert(InstIn.getNumOperands() == 5 &&
+ "Invalid no. of machine operands for DINS!");
+
+ assert(InstIn.getOperand(2).isImm());
+ int64_t pos = InstIn.getOperand(2).getImm();
+ assert(InstIn.getOperand(3).isImm());
+ int64_t size = InstIn.getOperand(3).getImm();
+
+ if (size <= 32) {
+ if (pos < 32) // DEXT/DINS, do nothing
+ return;
+ // DEXTU/DINSU
+ InstIn.getOperand(2).setImm(pos - 32);
+ InstIn.setOpcode((Opcode == Mips::DEXT) ? Mips::DEXTU : Mips::DINSU);
+ return;
+ }
+ // DEXTM/DINSM
+ assert(pos < 32 && "DEXT/DINS cannot have both size and pos > 32");
+ InstIn.getOperand(3).setImm(size - 32);
+ InstIn.setOpcode((Opcode == Mips::DEXT) ? Mips::DEXTM : Mips::DINSM);
+ return;
+}
+
/// EncodeInstruction - Emit the instruction.
-/// Size the instruction (currently only 4 bytes
+/// Size the instruction with Desc.getSize().
void MipsMCCodeEmitter::
EncodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const
@@ -131,14 +225,16 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
case Mips::DSLL:
case Mips::DSRL:
case Mips::DSRA:
- Mips::LowerLargeShift(TmpInst);
+ case Mips::DROTR:
+ LowerLargeShift(TmpInst);
break;
// Double extract instruction is chosen by pos and size operands
case Mips::DEXT:
case Mips::DINS:
- Mips::LowerDextDins(TmpInst);
+ LowerDextDins(TmpInst);
}
+ unsigned long N = Fixups.size();
uint32_t Binary = getBinaryCodeForInstr(TmpInst, Fixups);
// Check for unimplemented opcodes.
@@ -151,6 +247,8 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
if (STI.getFeatureBits() & Mips::FeatureMicroMips) {
int NewOpcode = Mips::Std2MicroMips (Opcode, Mips::Arch_micromips);
if (NewOpcode != -1) {
+ if (Fixups.size() > N)
+ Fixups.pop_back();
Opcode = NewOpcode;
TmpInst.setOpcode (NewOpcode);
Binary = getBinaryCodeForInstr(TmpInst, Fixups);
@@ -188,6 +286,28 @@ getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
return 0;
}
+/// getBranchTargetOpValue - Return binary encoding of the microMIPS branch
+/// target operand. If the machine operand requires relocation,
+/// record the relocation and return zero.
+unsigned MipsMCCodeEmitter::
+getBranchTargetOpValueMM(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+
+ // If the destination is an immediate, divide by 2.
+ if (MO.isImm()) return MO.getImm() >> 1;
+
+ assert(MO.isExpr() &&
+ "getBranchTargetOpValueMM expects only expressions or immediates");
+
+ const MCExpr *Expr = MO.getExpr();
+ Fixups.push_back(MCFixup::Create(0, Expr,
+ MCFixupKind(Mips::
+ fixup_MICROMIPS_PC16_S1)));
+ return 0;
+}
+
/// getJumpTargetOpValue - Return binary encoding of the jump
/// target operand. If the machine operand requires relocation,
/// record the relocation and return zero.
@@ -209,6 +329,23 @@ getJumpTargetOpValue(const MCInst &MI, unsigned OpNo,
}
unsigned MipsMCCodeEmitter::
+getJumpTargetOpValueMM(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+
+ const MCOperand &MO = MI.getOperand(OpNo);
+ // If the destination is an immediate, divide by 2.
+ if (MO.isImm()) return MO.getImm() >> 1;
+
+ assert(MO.isExpr() &&
+ "getJumpTargetOpValueMM expects only expressions or an immediate");
+
+ const MCExpr *Expr = MO.getExpr();
+ Fixups.push_back(MCFixup::Create(0, Expr,
+ MCFixupKind(Mips::fixup_MICROMIPS_26_S1)));
+ return 0;
+}
+
+unsigned MipsMCCodeEmitter::
getExprOpValue(const MCExpr *Expr,SmallVectorImpl<MCFixup> &Fixups) const {
int64_t Res;
@@ -238,31 +375,39 @@ getExprOpValue(const MCExpr *Expr,SmallVectorImpl<MCFixup> &Fixups) const {
FixupKind = Mips::fixup_Mips_GPOFF_LO;
break;
case MCSymbolRefExpr::VK_Mips_GOT_PAGE :
- FixupKind = Mips::fixup_Mips_GOT_PAGE;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_GOT_PAGE
+ : Mips::fixup_Mips_GOT_PAGE;
break;
case MCSymbolRefExpr::VK_Mips_GOT_OFST :
- FixupKind = Mips::fixup_Mips_GOT_OFST;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_GOT_OFST
+ : Mips::fixup_Mips_GOT_OFST;
break;
case MCSymbolRefExpr::VK_Mips_GOT_DISP :
- FixupKind = Mips::fixup_Mips_GOT_DISP;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_GOT_DISP
+ : Mips::fixup_Mips_GOT_DISP;
break;
case MCSymbolRefExpr::VK_Mips_GPREL:
FixupKind = Mips::fixup_Mips_GPREL16;
break;
case MCSymbolRefExpr::VK_Mips_GOT_CALL:
- FixupKind = Mips::fixup_Mips_CALL16;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_CALL16
+ : Mips::fixup_Mips_CALL16;
break;
case MCSymbolRefExpr::VK_Mips_GOT16:
- FixupKind = Mips::fixup_Mips_GOT_Global;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_GOT16
+ : Mips::fixup_Mips_GOT_Global;
break;
case MCSymbolRefExpr::VK_Mips_GOT:
- FixupKind = Mips::fixup_Mips_GOT_Local;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_GOT16
+ : Mips::fixup_Mips_GOT_Local;
break;
case MCSymbolRefExpr::VK_Mips_ABS_HI:
- FixupKind = Mips::fixup_Mips_HI16;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_HI16
+ : Mips::fixup_Mips_HI16;
break;
case MCSymbolRefExpr::VK_Mips_ABS_LO:
- FixupKind = Mips::fixup_Mips_LO16;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_LO16
+ : Mips::fixup_Mips_LO16;
break;
case MCSymbolRefExpr::VK_Mips_TLSGD:
FixupKind = Mips::fixup_Mips_TLSGD;
@@ -271,19 +416,23 @@ getExprOpValue(const MCExpr *Expr,SmallVectorImpl<MCFixup> &Fixups) const {
FixupKind = Mips::fixup_Mips_TLSLDM;
break;
case MCSymbolRefExpr::VK_Mips_DTPREL_HI:
- FixupKind = Mips::fixup_Mips_DTPREL_HI;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_TLS_DTPREL_HI16
+ : Mips::fixup_Mips_DTPREL_HI;
break;
case MCSymbolRefExpr::VK_Mips_DTPREL_LO:
- FixupKind = Mips::fixup_Mips_DTPREL_LO;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_TLS_DTPREL_LO16
+ : Mips::fixup_Mips_DTPREL_LO;
break;
case MCSymbolRefExpr::VK_Mips_GOTTPREL:
FixupKind = Mips::fixup_Mips_GOTTPREL;
break;
case MCSymbolRefExpr::VK_Mips_TPREL_HI:
- FixupKind = Mips::fixup_Mips_TPREL_HI;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_TLS_TPREL_HI16
+ : Mips::fixup_Mips_TPREL_HI;
break;
case MCSymbolRefExpr::VK_Mips_TPREL_LO:
- FixupKind = Mips::fixup_Mips_TPREL_LO;
+ FixupKind = IsMicroMips ? Mips::fixup_MICROMIPS_TLS_TPREL_LO16
+ : Mips::fixup_Mips_TPREL_LO;
break;
case MCSymbolRefExpr::VK_Mips_HIGHER:
FixupKind = Mips::fixup_Mips_HIGHER;
@@ -318,7 +467,7 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO,
SmallVectorImpl<MCFixup> &Fixups) const {
if (MO.isReg()) {
unsigned Reg = MO.getReg();
- unsigned RegNo = Ctx.getRegisterInfo().getEncodingValue(Reg);
+ unsigned RegNo = Ctx.getRegisterInfo()->getEncodingValue(Reg);
return RegNo;
} else if (MO.isImm()) {
return static_cast<unsigned>(MO.getImm());
@@ -344,6 +493,17 @@ MipsMCCodeEmitter::getMemEncoding(const MCInst &MI, unsigned OpNo,
return (OffBits & 0xFFFF) | RegBits;
}
+unsigned MipsMCCodeEmitter::
+getMemEncodingMMImm12(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ // Base register is encoded in bits 20-16, offset is encoded in bits 11-0.
+ assert(MI.getOperand(OpNo).isReg());
+ unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups) << 16;
+ unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups);
+
+ return (OffBits & 0x0FFF) | RegBits;
+}
+
unsigned
MipsMCCodeEmitter::getSizeExtEncoding(const MCInst &MI, unsigned OpNo,
SmallVectorImpl<MCFixup> &Fixups) const {
@@ -365,5 +525,13 @@ MipsMCCodeEmitter::getSizeInsEncoding(const MCInst &MI, unsigned OpNo,
return Position + Size - 1;
}
+unsigned
+MipsMCCodeEmitter::getLSAImmEncoding(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ assert(MI.getOperand(OpNo).isImm());
+ // The immediate is encoded as 'immediate - 1'.
+ return getMachineOpValue(MI, MI.getOperand(OpNo), Fixups) - 1;
+}
+
#include "MipsGenMCCodeEmitter.inc"
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
index be83b54b6124..5548aaa9a6d8 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
@@ -11,17 +11,21 @@
//
//===----------------------------------------------------------------------===//
-#include "MCTargetDesc/MipsELFStreamer.h"
#include "MipsMCTargetDesc.h"
#include "InstPrinter/MipsInstPrinter.h"
#include "MipsMCAsmInfo.h"
+#include "MipsTargetStreamer.h"
#include "llvm/MC/MCCodeGenInfo.h"
+#include "llvm/MC/MCELF.h"
+#include "llvm/MC/MCELFStreamer.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MachineLocation.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/TargetRegistry.h"
#define GET_INSTRINFO_MC_DESC
@@ -93,12 +97,12 @@ static MCSubtargetInfo *createMipsMCSubtargetInfo(StringRef TT, StringRef CPU,
return X;
}
-static MCAsmInfo *createMipsMCAsmInfo(const Target &T, StringRef TT) {
- MCAsmInfo *MAI = new MipsMCAsmInfo(T, TT);
+static MCAsmInfo *createMipsMCAsmInfo(const MCRegisterInfo &MRI, StringRef TT) {
+ MCAsmInfo *MAI = new MipsMCAsmInfo(TT);
- MachineLocation Dst(MachineLocation::VirtualFP);
- MachineLocation Src(Mips::SP, 0);
- MAI->addInitialFrameState(0, Dst, Src);
+ unsigned SP = MRI.getDwarfRegNum(Mips::SP, true);
+ MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(0, SP, 0);
+ MAI->addInitialFrameState(Inst);
return MAI;
}
@@ -125,14 +129,23 @@ static MCInstPrinter *createMipsMCInstPrinter(const Target &T,
}
static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
- MCContext &Ctx, MCAsmBackend &MAB,
- raw_ostream &_OS,
- MCCodeEmitter *_Emitter,
- bool RelaxAll,
- bool NoExecStack) {
- Triple TheTriple(TT);
-
- return createMipsELFStreamer(Ctx, MAB, _OS, _Emitter, RelaxAll, NoExecStack);
+ MCContext &Context, MCAsmBackend &MAB,
+ raw_ostream &OS, MCCodeEmitter *Emitter,
+ bool RelaxAll, bool NoExecStack) {
+ MipsTargetELFStreamer *S = new MipsTargetELFStreamer();
+ return createELFStreamer(Context, S, MAB, OS, Emitter, RelaxAll, NoExecStack);
+}
+
+static MCStreamer *
+createMCAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
+ bool isVerboseAsm, bool useLoc, bool useCFI,
+ bool useDwarfDirectory, MCInstPrinter *InstPrint,
+ MCCodeEmitter *CE, MCAsmBackend *TAB, bool ShowInst) {
+ MipsTargetAsmStreamer *S = new MipsTargetAsmStreamer(OS);
+
+ return llvm::createAsmStreamer(Ctx, S, OS, isVerboseAsm, useLoc, useCFI,
+ useDwarfDirectory, InstPrint, CE, TAB,
+ ShowInst);
}
extern "C" void LLVMInitializeMipsTargetMC() {
@@ -183,6 +196,12 @@ extern "C" void LLVMInitializeMipsTargetMC() {
TargetRegistry::RegisterMCObjectStreamer(TheMips64elTarget,
createMCStreamer);
+ // Register the asm streamer.
+ TargetRegistry::RegisterAsmStreamer(TheMipsTarget, createMCAsmStreamer);
+ TargetRegistry::RegisterAsmStreamer(TheMipselTarget, createMCAsmStreamer);
+ TargetRegistry::RegisterAsmStreamer(TheMips64Target, createMCAsmStreamer);
+ TargetRegistry::RegisterAsmStreamer(TheMips64elTarget, createMCAsmStreamer);
+
// Register the asm backend.
TargetRegistry::RegisterMCAsmBackend(TheMipsTarget,
createMipsAsmBackendEB32);
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
index 71954a4bd862..eabebfe1349e 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
@@ -42,14 +42,14 @@ MCCodeEmitter *createMipsMCCodeEmitterEL(const MCInstrInfo &MCII,
const MCSubtargetInfo &STI,
MCContext &Ctx);
-MCAsmBackend *createMipsAsmBackendEB32(const Target &T, StringRef TT,
- StringRef CPU);
-MCAsmBackend *createMipsAsmBackendEL32(const Target &T, StringRef TT,
- StringRef CPU);
-MCAsmBackend *createMipsAsmBackendEB64(const Target &T, StringRef TT,
- StringRef CPU);
-MCAsmBackend *createMipsAsmBackendEL64(const Target &T, StringRef TT,
- StringRef CPU);
+MCAsmBackend *createMipsAsmBackendEB32(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
+MCAsmBackend *createMipsAsmBackendEL32(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
+MCAsmBackend *createMipsAsmBackendEB64(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
+MCAsmBackend *createMipsAsmBackendEL64(const Target &T, const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
MCObjectWriter *createMipsELFObjectWriter(raw_ostream &OS,
uint8_t OSABI,
diff --git a/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp b/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
new file mode 100644
index 000000000000..5e90bbc635a5
--- /dev/null
+++ b/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
@@ -0,0 +1,67 @@
+//===-- MipsTargetStreamer.cpp - Mips Target Streamer Methods -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Mips specific target streamer methods.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MipsTargetStreamer.h"
+#include "llvm/MC/MCELF.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormattedStream.h"
+
+using namespace llvm;
+
+static cl::opt<bool> PrintHackDirectives("print-hack-directives",
+ cl::init(false), cl::Hidden);
+
+// pin vtable to this file
+void MipsTargetStreamer::anchor() {}
+
+MipsTargetAsmStreamer::MipsTargetAsmStreamer(formatted_raw_ostream &OS)
+ : OS(OS) {}
+
+void MipsTargetAsmStreamer::emitMipsHackELFFlags(unsigned Flags) {
+ if (!PrintHackDirectives)
+ return;
+
+ OS << "\t.mips_hack_elf_flags 0x";
+ OS.write_hex(Flags);
+ OS << '\n';
+}
+void MipsTargetAsmStreamer::emitMipsHackSTOCG(MCSymbol *Sym, unsigned Val) {
+ if (!PrintHackDirectives)
+ return;
+
+ OS << "\t.mips_hack_stocg ";
+ OS << Sym->getName();
+ OS << ", ";
+ OS << Val;
+ OS << '\n';
+}
+
+MCELFStreamer &MipsTargetELFStreamer::getStreamer() {
+ return static_cast<MCELFStreamer &>(*Streamer);
+}
+
+void MipsTargetELFStreamer::emitMipsHackELFFlags(unsigned Flags) {
+ MCAssembler &MCA = getStreamer().getAssembler();
+ MCA.setELFHeaderEFlags(Flags);
+}
+
+// Set a symbol's STO flags
+void MipsTargetELFStreamer::emitMipsHackSTOCG(MCSymbol *Sym, unsigned Val) {
+ MCSymbolData &Data = getStreamer().getOrCreateSymbolData(Sym);
+ // The "other" values are stored in the last 6 bits of the second byte
+ // The traditional defines for STO values assume the full byte and thus
+ // the shift to pack it.
+ MCELF::setOther(Data, Val >> 2);
+}
diff --git a/lib/Target/Mips/MSA.txt b/lib/Target/Mips/MSA.txt
new file mode 100644
index 000000000000..d1c41932fcb5
--- /dev/null
+++ b/lib/Target/Mips/MSA.txt
@@ -0,0 +1,78 @@
+Code Generation Notes for MSA
+=============================
+
+Intrinsics are lowered to SelectionDAG nodes where possible in order to enable
+optimisation, reduce the size of the ISel matcher, and reduce repetition in
+the implementation. In a small number of cases, this can cause different
+(semantically equivalent) instructions to be used in place of the requested
+instruction, even when no optimisation has taken place.
+
+Instructions
+============
+
+This section describes any quirks of instruction selection for MSA. For
+example, two instructions might be equally valid for some given IR and one is
+chosen in preference to the other.
+
+bclri.b:
+ It is not possible to emit bclri.b since andi.b covers exactly the
+ same cases. andi.b should use fractionally less power than bclri.b in
+ most hardware implementations so it is used in preference to bclri.b.
+
+vshf.w:
+ It is not possible to emit vshf.w when the shuffle description is
+ constant since shf.w covers exactly the same cases. shf.w is used
+ instead. It is also impossible for the shuffle description to be
+ unknown at compile-time due to the definition of shufflevector in
+ LLVM IR.
+
+vshf.[bhwd]
+ When the shuffle description describes a splat operation, splat.[bhwd]
+ instructions will be selected instead of vshf.[bhwd]. Unlike the ilv*,
+ and pck* instructions, this is matched from MipsISD::VSHF instead of
+ a special-case MipsISD node.
+
+ilvl.d, pckev.d:
+ It is not possible to emit ilvl.d, or pckev.d since ilvev.d covers the
+ same shuffle. ilvev.d will be emitted instead.
+
+ilvr.d, ilvod.d, pckod.d:
+ It is not possible to emit ilvr.d, or pckod.d since ilvod.d covers the
+ same shuffle. ilvod.d will be emitted instead.
+
+splat.[bhwd]
+ The intrinsic will work as expected. However, unlike other intrinsics
+ it lowers directly to MipsISD::VSHF instead of using common IR.
+
+splati.w:
+ It is not possible to emit splati.w since shf.w covers the same cases.
+ shf.w will be emitted instead.
+
+copy_s.w:
+ On MIPS32, the copy_u.d intrinsic will emit this instruction instead of
+ copy_u.w. This is semantically equivalent since the general-purpose
+ register file is 32-bits wide.
+
+binsri.[bhwd], binsli.[bhwd]:
+ These two operations are equivalent to each other with the operands
+ swapped and condition inverted. The compiler may use either one as
+ appropriate.
+ Furthermore, the compiler may use bsel.[bhwd] for some masks that do
+ not survive the legalization process (this is a bug and will be fixed).
+
+bmnz.v, bmz.v, bsel.v:
+ These three operations differ only in the operand that is tied to the
+ result.
+ It is (currently) not possible to emit bmz.v, or bsel.v since bmnz.v is
+ the same operation and will be emitted instead.
+ In future, the compiler may choose between these three instructions
+ according to register allocation.
+
+bmnzi.b, bmzi.b:
+ Like their non-immediate counterparts, bmnzi.v and bmzi.v are the same
+ operation with the operands swapped. bmnzi.v will (currently) be emitted
+ for both cases.
+
+bseli.v:
+ Unlike the non-immediate versions, bseli.v is distinguishable from
+ bmnzi.b and bmzi.b and can be emitted.
diff --git a/lib/Target/Mips/MicroMipsInstrFormats.td b/lib/Target/Mips/MicroMipsInstrFormats.td
index 665b4d2d8b41..c12a32e3d803 100644
--- a/lib/Target/Mips/MicroMipsInstrFormats.td
+++ b/lib/Target/Mips/MicroMipsInstrFormats.td
@@ -39,8 +39,8 @@ class SLTI_FM_MM<bits<6> op> : MMArch {
bits<32> Inst;
let Inst{31-26} = op;
- let Inst{25-21} = rs;
- let Inst{20-16} = rt;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
let Inst{15-0} = imm16;
}
@@ -110,3 +110,195 @@ class LW_FM_MM<bits<6> op> : MMArch {
let Inst{20-16} = addr{20-16};
let Inst{15-0} = addr{15-0};
}
+
+class LWL_FM_MM<bits<4> funct> {
+ bits<5> rt;
+ bits<21> addr;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x18;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = addr{20-16};
+ let Inst{15-12} = funct;
+ let Inst{11-0} = addr{11-0};
+}
+
+class CMov_F_I_FM_MM<bits<7> func> : MMArch {
+ bits<5> rd;
+ bits<5> rs;
+ bits<3> fcc;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x15;
+ let Inst{25-21} = rd;
+ let Inst{20-16} = rs;
+ let Inst{15-13} = fcc;
+ let Inst{12-6} = func;
+ let Inst{5-0} = 0x3b;
+}
+
+class MTLO_FM_MM<bits<10> funct> : MMArch {
+ bits<5> rs;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-21} = 0x00;
+ let Inst{20-16} = rs;
+ let Inst{15-6} = funct;
+ let Inst{5-0} = 0x3c;
+}
+
+class MFLO_FM_MM<bits<10> funct> : MMArch {
+ bits<5> rd;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-21} = 0x00;
+ let Inst{20-16} = rd;
+ let Inst{15-6} = funct;
+ let Inst{5-0} = 0x3c;
+}
+
+class CLO_FM_MM<bits<10> funct> : MMArch {
+ bits<5> rd;
+ bits<5> rs;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-21} = rd;
+ let Inst{20-16} = rs;
+ let Inst{15-6} = funct;
+ let Inst{5-0} = 0x3c;
+}
+
+class SEB_FM_MM<bits<10> funct> : MMArch {
+ bits<5> rd;
+ bits<5> rt;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-21} = rd;
+ let Inst{20-16} = rt;
+ let Inst{15-6} = funct;
+ let Inst{5-0} = 0x3c;
+}
+
+class EXT_FM_MM<bits<6> funct> : MMArch {
+ bits<5> rt;
+ bits<5> rs;
+ bits<5> pos;
+ bits<5> size;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-11} = size;
+ let Inst{10-6} = pos;
+ let Inst{5-0} = funct;
+}
+
+class J_FM_MM<bits<6> op> : MMArch {
+ bits<26> target;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = op;
+ let Inst{25-0} = target;
+}
+
+class JR_FM_MM<bits<8> funct> : MMArch {
+ bits<5> rs;
+
+ bits<32> Inst;
+
+ let Inst{31-21} = 0x00;
+ let Inst{20-16} = rs;
+ let Inst{15-14} = 0x0;
+ let Inst{13-6} = funct;
+ let Inst{5-0} = 0x3c;
+}
+
+class JALR_FM_MM<bits<10> funct> : MMArch {
+ bits<5> rs;
+ bits<5> rd;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-21} = rd;
+ let Inst{20-16} = rs;
+ let Inst{15-6} = funct;
+ let Inst{5-0} = 0x3c;
+}
+
+class BEQ_FM_MM<bits<6> op> : MMArch {
+ bits<5> rs;
+ bits<5> rt;
+ bits<16> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = op;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-0} = offset;
+}
+
+class BGEZ_FM_MM<bits<5> funct> : MMArch {
+ bits<5> rs;
+ bits<16> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x10;
+ let Inst{25-21} = funct;
+ let Inst{20-16} = rs;
+ let Inst{15-0} = offset;
+}
+
+class BGEZAL_FM_MM<bits<5> funct> : MMArch {
+ bits<5> rs;
+ bits<16> offset;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x10;
+ let Inst{25-21} = funct;
+ let Inst{20-16} = rs;
+ let Inst{15-0} = offset;
+}
+
+class TEQ_FM_MM<bits<6> funct> : MMArch {
+ bits<5> rs;
+ bits<5> rt;
+ bits<4> code_;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-21} = rt;
+ let Inst{20-16} = rs;
+ let Inst{15-12} = code_;
+ let Inst{11-6} = funct;
+ let Inst{5-0} = 0x3c;
+}
+
+class TEQI_FM_MM<bits<5> funct> : MMArch {
+ bits<5> rs;
+ bits<16> imm16;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x10;
+ let Inst{25-21} = funct;
+ let Inst{20-16} = rs;
+ let Inst{15-0} = imm16;
+}
diff --git a/lib/Target/Mips/MicroMipsInstrInfo.td b/lib/Target/Mips/MicroMipsInstrInfo.td
index 74cdccd3ed57..d9507fa88ebc 100644
--- a/lib/Target/Mips/MicroMipsInstrInfo.td
+++ b/lib/Target/Mips/MicroMipsInstrInfo.td
@@ -1,67 +1,219 @@
-let isCodeGenOnly = 1 in {
+def addrimm12 : ComplexPattern<iPTR, 2, "selectIntAddrMM", [frameindex]>;
+
+def simm12 : Operand<i32> {
+ let DecoderMethod = "DecodeSimm12";
+}
+
+def mem_mm_12 : Operand<i32> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops GPR32, simm12);
+ let EncoderMethod = "getMemEncodingMMImm12";
+ let ParserMatchClass = MipsMemAsmOperand;
+ let OperandType = "OPERAND_MEMORY";
+}
+
+def jmptarget_mm : Operand<OtherVT> {
+ let EncoderMethod = "getJumpTargetOpValueMM";
+}
+
+def calltarget_mm : Operand<iPTR> {
+ let EncoderMethod = "getJumpTargetOpValueMM";
+}
+
+def brtarget_mm : Operand<OtherVT> {
+ let EncoderMethod = "getBranchTargetOpValueMM";
+ let OperandType = "OPERAND_PCREL";
+ let DecoderMethod = "DecodeBranchTargetMM";
+}
+
+let canFoldAsLoad = 1 in
+class LoadLeftRightMM<string opstr, SDNode OpNode, RegisterOperand RO,
+ Operand MemOpnd> :
+ InstSE<(outs RO:$rt), (ins MemOpnd:$addr, RO:$src),
+ !strconcat(opstr, "\t$rt, $addr"),
+ [(set RO:$rt, (OpNode addrimm12:$addr, RO:$src))],
+ NoItinerary, FrmI> {
+ let DecoderMethod = "DecodeMemMMImm12";
+ string Constraints = "$src = $rt";
+}
+
+class StoreLeftRightMM<string opstr, SDNode OpNode, RegisterOperand RO,
+ Operand MemOpnd>:
+ InstSE<(outs), (ins RO:$rt, MemOpnd:$addr),
+ !strconcat(opstr, "\t$rt, $addr"),
+ [(OpNode RO:$rt, addrimm12:$addr)], NoItinerary, FrmI> {
+ let DecoderMethod = "DecodeMemMMImm12";
+}
+
+let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
/// Arithmetic Instructions (ALU Immediate)
- def ADDiu_MM : MMRel, ArithLogicI<"addiu", simm16, CPURegsOpnd>,
+ def ADDiu_MM : MMRel, ArithLogicI<"addiu", simm16, GPR32Opnd>,
ADDI_FM_MM<0xc>;
- def ADDi_MM : MMRel, ArithLogicI<"addi", simm16, CPURegsOpnd>,
+ def ADDi_MM : MMRel, ArithLogicI<"addi", simm16, GPR32Opnd>,
ADDI_FM_MM<0x4>;
- def SLTi_MM : MMRel, SetCC_I<"slti", setlt, simm16, immSExt16, CPURegs>,
+ def SLTi_MM : MMRel, SetCC_I<"slti", setlt, simm16, immSExt16, GPR32Opnd>,
SLTI_FM_MM<0x24>;
- def SLTiu_MM : MMRel, SetCC_I<"sltiu", setult, simm16, immSExt16, CPURegs>,
+ def SLTiu_MM : MMRel, SetCC_I<"sltiu", setult, simm16, immSExt16, GPR32Opnd>,
SLTI_FM_MM<0x2c>;
- def ANDi_MM : MMRel, ArithLogicI<"andi", uimm16, CPURegsOpnd, immZExt16, and>,
+ def ANDi_MM : MMRel, ArithLogicI<"andi", uimm16, GPR32Opnd>,
ADDI_FM_MM<0x34>;
- def ORi_MM : MMRel, ArithLogicI<"ori", uimm16, CPURegsOpnd, immZExt16, or>,
+ def ORi_MM : MMRel, ArithLogicI<"ori", uimm16, GPR32Opnd>,
ADDI_FM_MM<0x14>;
- def XORi_MM : MMRel, ArithLogicI<"xori", uimm16, CPURegsOpnd, immZExt16, xor>,
+ def XORi_MM : MMRel, ArithLogicI<"xori", uimm16, GPR32Opnd>,
ADDI_FM_MM<0x1c>;
- def LUi_MM : MMRel, LoadUpper<"lui", CPURegs, uimm16>, LUI_FM_MM;
+ def LUi_MM : MMRel, LoadUpper<"lui", GPR32Opnd, uimm16>, LUI_FM_MM;
/// Arithmetic Instructions (3-Operand, R-Type)
- def ADDu_MM : MMRel, ArithLogicR<"addu", CPURegsOpnd>, ADD_FM_MM<0, 0x150>;
- def SUBu_MM : MMRel, ArithLogicR<"subu", CPURegsOpnd>, ADD_FM_MM<0, 0x1d0>;
- def MUL_MM : MMRel, ArithLogicR<"mul", CPURegsOpnd>, ADD_FM_MM<0, 0x210>;
- def ADD_MM : MMRel, ArithLogicR<"add", CPURegsOpnd>, ADD_FM_MM<0, 0x110>;
- def SUB_MM : MMRel, ArithLogicR<"sub", CPURegsOpnd>, ADD_FM_MM<0, 0x190>;
- def SLT_MM : MMRel, SetCC_R<"slt", setlt, CPURegs>, ADD_FM_MM<0, 0x350>;
- def SLTu_MM : MMRel, SetCC_R<"sltu", setult, CPURegs>,
+ def ADDu_MM : MMRel, ArithLogicR<"addu", GPR32Opnd>, ADD_FM_MM<0, 0x150>;
+ def SUBu_MM : MMRel, ArithLogicR<"subu", GPR32Opnd>, ADD_FM_MM<0, 0x1d0>;
+ def MUL_MM : MMRel, ArithLogicR<"mul", GPR32Opnd>, ADD_FM_MM<0, 0x210>;
+ def ADD_MM : MMRel, ArithLogicR<"add", GPR32Opnd>, ADD_FM_MM<0, 0x110>;
+ def SUB_MM : MMRel, ArithLogicR<"sub", GPR32Opnd>, ADD_FM_MM<0, 0x190>;
+ def SLT_MM : MMRel, SetCC_R<"slt", setlt, GPR32Opnd>, ADD_FM_MM<0, 0x350>;
+ def SLTu_MM : MMRel, SetCC_R<"sltu", setult, GPR32Opnd>,
ADD_FM_MM<0, 0x390>;
- def AND_MM : MMRel, ArithLogicR<"and", CPURegsOpnd, 1, IIAlu, and>,
+ def AND_MM : MMRel, ArithLogicR<"and", GPR32Opnd, 1, IIAlu, and>,
ADD_FM_MM<0, 0x250>;
- def OR_MM : MMRel, ArithLogicR<"or", CPURegsOpnd, 1, IIAlu, or>,
+ def OR_MM : MMRel, ArithLogicR<"or", GPR32Opnd, 1, IIAlu, or>,
ADD_FM_MM<0, 0x290>;
- def XOR_MM : MMRel, ArithLogicR<"xor", CPURegsOpnd, 1, IIAlu, xor>,
+ def XOR_MM : MMRel, ArithLogicR<"xor", GPR32Opnd, 1, IIAlu, xor>,
ADD_FM_MM<0, 0x310>;
- def NOR_MM : MMRel, LogicNOR<"nor", CPURegsOpnd>, ADD_FM_MM<0, 0x2d0>;
- def MULT_MM : MMRel, Mult<"mult", IIImul, CPURegsOpnd, [HI, LO]>,
+ def NOR_MM : MMRel, LogicNOR<"nor", GPR32Opnd>, ADD_FM_MM<0, 0x2d0>;
+ def MULT_MM : MMRel, Mult<"mult", IIImul, GPR32Opnd, [HI0, LO0]>,
MULT_FM_MM<0x22c>;
- def MULTu_MM : MMRel, Mult<"multu", IIImul, CPURegsOpnd, [HI, LO]>,
+ def MULTu_MM : MMRel, Mult<"multu", IIImul, GPR32Opnd, [HI0, LO0]>,
MULT_FM_MM<0x26c>;
+ def SDIV_MM : MMRel, Div<"div", IIIdiv, GPR32Opnd, [HI0, LO0]>,
+ MULT_FM_MM<0x2ac>;
+ def UDIV_MM : MMRel, Div<"divu", IIIdiv, GPR32Opnd, [HI0, LO0]>,
+ MULT_FM_MM<0x2ec>;
/// Shift Instructions
- def SLL_MM : MMRel, shift_rotate_imm<"sll", shamt, CPURegsOpnd>,
+ def SLL_MM : MMRel, shift_rotate_imm<"sll", uimm5, GPR32Opnd>,
SRA_FM_MM<0, 0>;
- def SRL_MM : MMRel, shift_rotate_imm<"srl", shamt, CPURegsOpnd>,
+ def SRL_MM : MMRel, shift_rotate_imm<"srl", uimm5, GPR32Opnd>,
SRA_FM_MM<0x40, 0>;
- def SRA_MM : MMRel, shift_rotate_imm<"sra", shamt, CPURegsOpnd>,
+ def SRA_MM : MMRel, shift_rotate_imm<"sra", uimm5, GPR32Opnd>,
SRA_FM_MM<0x80, 0>;
- def SLLV_MM : MMRel, shift_rotate_reg<"sllv", CPURegsOpnd>,
+ def SLLV_MM : MMRel, shift_rotate_reg<"sllv", GPR32Opnd>,
SRLV_FM_MM<0x10, 0>;
- def SRLV_MM : MMRel, shift_rotate_reg<"srlv", CPURegsOpnd>,
+ def SRLV_MM : MMRel, shift_rotate_reg<"srlv", GPR32Opnd>,
SRLV_FM_MM<0x50, 0>;
- def SRAV_MM : MMRel, shift_rotate_reg<"srav", CPURegsOpnd>,
+ def SRAV_MM : MMRel, shift_rotate_reg<"srav", GPR32Opnd>,
SRLV_FM_MM<0x90, 0>;
- def ROTR_MM : MMRel, shift_rotate_imm<"rotr", shamt, CPURegsOpnd>,
+ def ROTR_MM : MMRel, shift_rotate_imm<"rotr", uimm5, GPR32Opnd>,
SRA_FM_MM<0xc0, 0>;
- def ROTRV_MM : MMRel, shift_rotate_reg<"rotrv", CPURegsOpnd>,
+ def ROTRV_MM : MMRel, shift_rotate_reg<"rotrv", GPR32Opnd>,
SRLV_FM_MM<0xd0, 0>;
/// Load and Store Instructions - aligned
- defm LB_MM : LoadM<"lb", CPURegs, sextloadi8>, MMRel, LW_FM_MM<0x7>;
- defm LBu_MM : LoadM<"lbu", CPURegs, zextloadi8>, MMRel, LW_FM_MM<0x5>;
- defm LH_MM : LoadM<"lh", CPURegs, sextloadi16>, MMRel, LW_FM_MM<0xf>;
- defm LHu_MM : LoadM<"lhu", CPURegs, zextloadi16>, MMRel, LW_FM_MM<0xd>;
- defm LW_MM : LoadM<"lw", CPURegs>, MMRel, LW_FM_MM<0x3f>;
- defm SB_MM : StoreM<"sb", CPURegs, truncstorei8>, MMRel, LW_FM_MM<0x6>;
- defm SH_MM : StoreM<"sh", CPURegs, truncstorei16>, MMRel, LW_FM_MM<0xe>;
- defm SW_MM : StoreM<"sw", CPURegs>, MMRel, LW_FM_MM<0x3e>;
+ let DecoderMethod = "DecodeMemMMImm16" in {
+ def LB_MM : Load<"lb", GPR32Opnd>, MMRel, LW_FM_MM<0x7>;
+ def LBu_MM : Load<"lbu", GPR32Opnd>, MMRel, LW_FM_MM<0x5>;
+ def LH_MM : Load<"lh", GPR32Opnd>, MMRel, LW_FM_MM<0xf>;
+ def LHu_MM : Load<"lhu", GPR32Opnd>, MMRel, LW_FM_MM<0xd>;
+ def LW_MM : Load<"lw", GPR32Opnd>, MMRel, LW_FM_MM<0x3f>;
+ def SB_MM : Store<"sb", GPR32Opnd>, MMRel, LW_FM_MM<0x6>;
+ def SH_MM : Store<"sh", GPR32Opnd>, MMRel, LW_FM_MM<0xe>;
+ def SW_MM : Store<"sw", GPR32Opnd>, MMRel, LW_FM_MM<0x3e>;
+ }
+
+ /// Load and Store Instructions - unaligned
+ def LWL_MM : LoadLeftRightMM<"lwl", MipsLWL, GPR32Opnd, mem_mm_12>,
+ LWL_FM_MM<0x0>;
+ def LWR_MM : LoadLeftRightMM<"lwr", MipsLWR, GPR32Opnd, mem_mm_12>,
+ LWL_FM_MM<0x1>;
+ def SWL_MM : StoreLeftRightMM<"swl", MipsSWL, GPR32Opnd, mem_mm_12>,
+ LWL_FM_MM<0x8>;
+ def SWR_MM : StoreLeftRightMM<"swr", MipsSWR, GPR32Opnd, mem_mm_12>,
+ LWL_FM_MM<0x9>;
+
+ /// Move Conditional
+ def MOVZ_I_MM : MMRel, CMov_I_I_FT<"movz", GPR32Opnd, GPR32Opnd,
+ NoItinerary>, ADD_FM_MM<0, 0x58>;
+ def MOVN_I_MM : MMRel, CMov_I_I_FT<"movn", GPR32Opnd, GPR32Opnd,
+ NoItinerary>, ADD_FM_MM<0, 0x18>;
+ def MOVT_I_MM : MMRel, CMov_F_I_FT<"movt", GPR32Opnd, IIAlu>,
+ CMov_F_I_FM_MM<0x25>;
+ def MOVF_I_MM : MMRel, CMov_F_I_FT<"movf", GPR32Opnd, IIAlu>,
+ CMov_F_I_FM_MM<0x5>;
+
+ /// Move to/from HI/LO
+ def MTHI_MM : MMRel, MoveToLOHI<"mthi", GPR32Opnd, [HI0]>,
+ MTLO_FM_MM<0x0b5>;
+ def MTLO_MM : MMRel, MoveToLOHI<"mtlo", GPR32Opnd, [LO0]>,
+ MTLO_FM_MM<0x0f5>;
+ def MFHI_MM : MMRel, MoveFromLOHI<"mfhi", GPR32Opnd, AC0>,
+ MFLO_FM_MM<0x035>;
+ def MFLO_MM : MMRel, MoveFromLOHI<"mflo", GPR32Opnd, AC0>,
+ MFLO_FM_MM<0x075>;
+
+ /// Multiply Add/Sub Instructions
+ def MADD_MM : MMRel, MArithR<"madd", 1>, MULT_FM_MM<0x32c>;
+ def MADDU_MM : MMRel, MArithR<"maddu", 1>, MULT_FM_MM<0x36c>;
+ def MSUB_MM : MMRel, MArithR<"msub">, MULT_FM_MM<0x3ac>;
+ def MSUBU_MM : MMRel, MArithR<"msubu">, MULT_FM_MM<0x3ec>;
+
+ /// Count Leading
+ def CLZ_MM : MMRel, CountLeading0<"clz", GPR32Opnd>, CLO_FM_MM<0x16c>;
+ def CLO_MM : MMRel, CountLeading1<"clo", GPR32Opnd>, CLO_FM_MM<0x12c>;
+
+ /// Sign Ext In Register Instructions.
+ def SEB_MM : MMRel, SignExtInReg<"seb", i8, GPR32Opnd>, SEB_FM_MM<0x0ac>;
+ def SEH_MM : MMRel, SignExtInReg<"seh", i16, GPR32Opnd>, SEB_FM_MM<0x0ec>;
+
+ /// Word Swap Bytes Within Halfwords
+ def WSBH_MM : MMRel, SubwordSwap<"wsbh", GPR32Opnd>, SEB_FM_MM<0x1ec>;
+
+ def EXT_MM : MMRel, ExtBase<"ext", GPR32Opnd, uimm5, MipsExt>,
+ EXT_FM_MM<0x2c>;
+ def INS_MM : MMRel, InsBase<"ins", GPR32Opnd, uimm5, MipsIns>,
+ EXT_FM_MM<0x0c>;
+
+ /// Jump Instructions
+ let DecoderMethod = "DecodeJumpTargetMM" in {
+ def J_MM : MMRel, JumpFJ<jmptarget_mm, "j", br, bb, "j">,
+ J_FM_MM<0x35>;
+ def JAL_MM : MMRel, JumpLink<"jal", calltarget_mm>, J_FM_MM<0x3d>;
+ def TAILCALL_MM : MMRel, JumpFJ<calltarget_mm, "j", MipsTailCall, imm,
+ "tcall">, J_FM_MM<0x3d>, IsTailCall;
+ }
+ def JR_MM : MMRel, IndirectBranch<"jr", GPR32Opnd>, JR_FM_MM<0x3c>;
+ def JALR_MM : MMRel, JumpLinkReg<"jalr", GPR32Opnd>, JALR_FM_MM<0x03c>;
+ def TAILCALL_R_MM : MMRel, JumpFR<"tcallr", GPR32Opnd, MipsTailCall>,
+ JR_FM_MM<0x3c>, IsTailCall;
+ def RET_MM : MMRel, RetBase<"ret", GPR32Opnd>, JR_FM_MM<0x3c>;
+
+ /// Branch Instructions
+ def BEQ_MM : MMRel, CBranch<"beq", brtarget_mm, seteq, GPR32Opnd>,
+ BEQ_FM_MM<0x25>;
+ def BNE_MM : MMRel, CBranch<"bne", brtarget_mm, setne, GPR32Opnd>,
+ BEQ_FM_MM<0x2d>;
+ def BGEZ_MM : MMRel, CBranchZero<"bgez", brtarget_mm, setge, GPR32Opnd>,
+ BGEZ_FM_MM<0x2>;
+ def BGTZ_MM : MMRel, CBranchZero<"bgtz", brtarget_mm, setgt, GPR32Opnd>,
+ BGEZ_FM_MM<0x6>;
+ def BLEZ_MM : MMRel, CBranchZero<"blez", brtarget_mm, setle, GPR32Opnd>,
+ BGEZ_FM_MM<0x4>;
+ def BLTZ_MM : MMRel, CBranchZero<"bltz", brtarget_mm, setlt, GPR32Opnd>,
+ BGEZ_FM_MM<0x0>;
+ def BGEZAL_MM : MMRel, BGEZAL_FT<"bgezal", brtarget_mm, GPR32Opnd>,
+ BGEZAL_FM_MM<0x03>;
+ def BLTZAL_MM : MMRel, BGEZAL_FT<"bltzal", brtarget_mm, GPR32Opnd>,
+ BGEZAL_FM_MM<0x01>;
+
+ /// Trap Instructions
+ def TEQ_MM : MMRel, TEQ_FT<"teq", GPR32Opnd>, TEQ_FM_MM<0x0>;
+ def TGE_MM : MMRel, TEQ_FT<"tge", GPR32Opnd>, TEQ_FM_MM<0x08>;
+ def TGEU_MM : MMRel, TEQ_FT<"tgeu", GPR32Opnd>, TEQ_FM_MM<0x10>;
+ def TLT_MM : MMRel, TEQ_FT<"tlt", GPR32Opnd>, TEQ_FM_MM<0x20>;
+ def TLTU_MM : MMRel, TEQ_FT<"tltu", GPR32Opnd>, TEQ_FM_MM<0x28>;
+ def TNE_MM : MMRel, TEQ_FT<"tne", GPR32Opnd>, TEQ_FM_MM<0x30>;
+
+ def TEQI_MM : MMRel, TEQI_FT<"teqi", GPR32Opnd>, TEQI_FM_MM<0x0e>;
+ def TGEI_MM : MMRel, TEQI_FT<"tgei", GPR32Opnd>, TEQI_FM_MM<0x09>;
+ def TGEIU_MM : MMRel, TEQI_FT<"tgeiu", GPR32Opnd>, TEQI_FM_MM<0x0b>;
+ def TLTI_MM : MMRel, TEQI_FT<"tlti", GPR32Opnd>, TEQI_FM_MM<0x08>;
+ def TLTIU_MM : MMRel, TEQI_FT<"tltiu", GPR32Opnd>, TEQI_FM_MM<0x0a>;
+ def TNEI_MM : MMRel, TEQI_FT<"tnei", GPR32Opnd>, TEQI_FM_MM<0x0c>;
}
diff --git a/lib/Target/Mips/Mips.h b/lib/Target/Mips/Mips.h
index 8c65bb4020b5..e796debd79b6 100644
--- a/lib/Target/Mips/Mips.h
+++ b/lib/Target/Mips/Mips.h
@@ -28,7 +28,6 @@ namespace llvm {
FunctionPass *createMipsJITCodeEmitterPass(MipsTargetMachine &TM,
JITCodeEmitter &JCE);
FunctionPass *createMipsConstantIslandPass(MipsTargetMachine &tm);
-
} // end namespace llvm;
#endif
diff --git a/lib/Target/Mips/Mips.td b/lib/Target/Mips/Mips.td
index eefb02a494ca..b8e3f39256da 100644
--- a/lib/Target/Mips/Mips.td
+++ b/lib/Target/Mips/Mips.td
@@ -78,6 +78,8 @@ def FeatureDSP : SubtargetFeature<"dsp", "HasDSP", "true", "Mips DSP ASE">;
def FeatureDSPR2 : SubtargetFeature<"dspr2", "HasDSPR2", "true",
"Mips DSP-R2 ASE", [FeatureDSP]>;
+def FeatureMSA : SubtargetFeature<"msa", "HasMSA", "true", "Mips MSA ASE">;
+
def FeatureMicroMips : SubtargetFeature<"micromips", "InMicroMipsMode", "true",
"microMips mode">;
@@ -101,6 +103,7 @@ def MipsAsmWriter : AsmWriter {
def MipsAsmParser : AsmParser {
let ShouldEmitMatchRegisterName = 0;
+ let MnemonicContainsDot = 1;
}
def MipsAsmParserVariant : AsmParserVariant {
diff --git a/lib/Target/Mips/Mips16FrameLowering.cpp b/lib/Target/Mips/Mips16FrameLowering.cpp
index 1bb6fe46295b..6655ff98e033 100644
--- a/lib/Target/Mips/Mips16FrameLowering.cpp
+++ b/lib/Target/Mips/Mips16FrameLowering.cpp
@@ -40,7 +40,7 @@ void Mips16FrameLowering::emitPrologue(MachineFunction &MF) const {
if (StackSize == 0 && !MFI->adjustsStack()) return;
MachineModuleInfo &MMI = MF.getMMI();
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
+ const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
MachineLocation DstML, SrcML;
// Adjust stack.
@@ -50,24 +50,23 @@ void Mips16FrameLowering::emitPrologue(MachineFunction &MF) const {
MCSymbol *AdjustSPLabel = MMI.getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, dl,
TII.get(TargetOpcode::PROLOG_LABEL)).addSym(AdjustSPLabel);
- DstML = MachineLocation(MachineLocation::VirtualFP);
- SrcML = MachineLocation(MachineLocation::VirtualFP, -StackSize);
- Moves.push_back(MachineMove(AdjustSPLabel, DstML, SrcML));
+ MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(AdjustSPLabel, -StackSize));
MCSymbol *CSLabel = MMI.getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, dl,
TII.get(TargetOpcode::PROLOG_LABEL)).addSym(CSLabel);
- DstML = MachineLocation(MachineLocation::VirtualFP, -8);
- SrcML = MachineLocation(Mips::S1);
- Moves.push_back(MachineMove(CSLabel, DstML, SrcML));
+ unsigned S2 = MRI->getDwarfRegNum(Mips::S2, true);
+ MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, S2, -8));
- DstML = MachineLocation(MachineLocation::VirtualFP, -12);
- SrcML = MachineLocation(Mips::S0);
- Moves.push_back(MachineMove(CSLabel, DstML, SrcML));
+ unsigned S1 = MRI->getDwarfRegNum(Mips::S1, true);
+ MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, S1, -12));
- DstML = MachineLocation(MachineLocation::VirtualFP, -4);
- SrcML = MachineLocation(Mips::RA);
- Moves.push_back(MachineMove(CSLabel, DstML, SrcML));
+ unsigned S0 = MRI->getDwarfRegNum(Mips::S0, true);
+ MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, S0, -16));
+
+ unsigned RA = MRI->getDwarfRegNum(Mips::RA, true);
+ MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel, RA, -4));
if (hasFP(MF))
BuildMI(MBB, MBBI, dl, TII.get(Mips::MoveR3216), Mips::S0)
@@ -172,6 +171,7 @@ processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
MF.getRegInfo().setPhysRegUsed(Mips::RA);
MF.getRegInfo().setPhysRegUsed(Mips::S0);
MF.getRegInfo().setPhysRegUsed(Mips::S1);
+ MF.getRegInfo().setPhysRegUsed(Mips::S2);
}
const MipsFrameLowering *
diff --git a/lib/Target/Mips/Mips16FrameLowering.h b/lib/Target/Mips/Mips16FrameLowering.h
index 54fdb7871466..8ce2ceda7c74 100644
--- a/lib/Target/Mips/Mips16FrameLowering.h
+++ b/lib/Target/Mips/Mips16FrameLowering.h
@@ -20,7 +20,7 @@ namespace llvm {
class Mips16FrameLowering : public MipsFrameLowering {
public:
explicit Mips16FrameLowering(const MipsSubtarget &STI)
- : MipsFrameLowering(STI, 8) {}
+ : MipsFrameLowering(STI, STI.stackAlignment()) {}
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
diff --git a/lib/Target/Mips/Mips16HardFloat.cpp b/lib/Target/Mips/Mips16HardFloat.cpp
new file mode 100644
index 000000000000..81bf18cd09d9
--- /dev/null
+++ b/lib/Target/Mips/Mips16HardFloat.cpp
@@ -0,0 +1,517 @@
+//===---- Mips16HardFloat.cpp for Mips16 Hard Float --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a pass needed for Mips16 Hard Float
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "mips16-hard-float"
+#include "Mips16HardFloat.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <string>
+
+static void inlineAsmOut
+ (LLVMContext &C, StringRef AsmString, BasicBlock *BB ) {
+ std::vector<llvm::Type *> AsmArgTypes;
+ std::vector<llvm::Value*> AsmArgs;
+ llvm::FunctionType *AsmFTy =
+ llvm::FunctionType::get(Type::getVoidTy(C),
+ AsmArgTypes, false);
+ llvm::InlineAsm *IA =
+ llvm::InlineAsm::get(AsmFTy, AsmString, "", true,
+ /* IsAlignStack */ false,
+ llvm::InlineAsm::AD_ATT);
+ CallInst::Create(IA, AsmArgs, "", BB);
+}
+
+namespace {
+
+class InlineAsmHelper {
+ LLVMContext &C;
+ BasicBlock *BB;
+public:
+ InlineAsmHelper(LLVMContext &C_, BasicBlock *BB_) :
+ C(C_), BB(BB_) {
+ }
+
+ void Out(StringRef AsmString) {
+ inlineAsmOut(C, AsmString, BB);
+ }
+
+};
+}
+//
+// Return types that matter for hard float are:
+// float, double, complex float, and complex double
+//
+enum FPReturnVariant {
+ FRet, DRet, CFRet, CDRet, NoFPRet
+};
+
+//
+// Determine which FP return type this function has
+//
+static FPReturnVariant whichFPReturnVariant(Type *T) {
+ switch (T->getTypeID()) {
+ case Type::FloatTyID:
+ return FRet;
+ case Type::DoubleTyID:
+ return DRet;
+ case Type::StructTyID:
+ if (T->getStructNumElements() != 2)
+ break;
+ if ((T->getContainedType(0)->isFloatTy()) &&
+ (T->getContainedType(1)->isFloatTy()))
+ return CFRet;
+ if ((T->getContainedType(0)->isDoubleTy()) &&
+ (T->getContainedType(1)->isDoubleTy()))
+ return CDRet;
+ break;
+ default:
+ break;
+ }
+ return NoFPRet;
+}
+
+//
+// Parameter type that matter are float, (float, float), (float, double),
+// double, (double, double), (double, float)
+//
+enum FPParamVariant {
+ FSig, FFSig, FDSig,
+ DSig, DDSig, DFSig, NoSig
+};
+
+// which floating point parameter signature variant we are dealing with
+//
+typedef Type::TypeID TypeID;
+const Type::TypeID FloatTyID = Type::FloatTyID;
+const Type::TypeID DoubleTyID = Type::DoubleTyID;
+
+static FPParamVariant whichFPParamVariantNeeded(Function &F) {
+ switch (F.arg_size()) {
+ case 0:
+ return NoSig;
+ case 1:{
+ TypeID ArgTypeID = F.getFunctionType()->getParamType(0)->getTypeID();
+ switch (ArgTypeID) {
+ case FloatTyID:
+ return FSig;
+ case DoubleTyID:
+ return DSig;
+ default:
+ return NoSig;
+ }
+ }
+ default: {
+ TypeID ArgTypeID0 = F.getFunctionType()->getParamType(0)->getTypeID();
+ TypeID ArgTypeID1 = F.getFunctionType()->getParamType(1)->getTypeID();
+ switch(ArgTypeID0) {
+ case FloatTyID: {
+ switch (ArgTypeID1) {
+ case FloatTyID:
+ return FFSig;
+ case DoubleTyID:
+ return FDSig;
+ default:
+ return FSig;
+ }
+ }
+ case DoubleTyID: {
+ switch (ArgTypeID1) {
+ case FloatTyID:
+ return DFSig;
+ case DoubleTyID:
+ return DDSig;
+ default:
+ return DSig;
+ }
+ }
+ default:
+ return NoSig;
+ }
+ }
+ }
+ llvm_unreachable("can't get here");
+}
+
+// Figure out if we need float point based on the function parameters.
+// We need to move variables in and/or out of floating point
+// registers because of the ABI
+//
+static bool needsFPStubFromParams(Function &F) {
+ if (F.arg_size() >=1) {
+ Type *ArgType = F.getFunctionType()->getParamType(0);
+ switch (ArgType->getTypeID()) {
+ case Type::FloatTyID:
+ case Type::DoubleTyID:
+ return true;
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
+static bool needsFPReturnHelper(Function &F) {
+ Type* RetType = F.getReturnType();
+ return whichFPReturnVariant(RetType) != NoFPRet;
+}
+
+static bool needsFPHelperFromSig(Function &F) {
+ return needsFPStubFromParams(F) || needsFPReturnHelper(F);
+}
+
+//
+// We swap between FP and Integer registers to allow Mips16 and Mips32 to
+// interoperate
+//
+
+static void swapFPIntParams
+ (FPParamVariant PV, Module *M, InlineAsmHelper &IAH,
+ bool LE, bool ToFP) {
+ //LLVMContext &Context = M->getContext();
+ std::string MI = ToFP? "mtc1 ": "mfc1 ";
+ switch (PV) {
+ case FSig:
+ IAH.Out(MI + "$$4,$$f12");
+ break;
+ case FFSig:
+ IAH.Out(MI +"$$4,$$f12");
+ IAH.Out(MI + "$$5,$$f14");
+ break;
+ case FDSig:
+ IAH.Out(MI + "$$4,$$f12");
+ if (LE) {
+ IAH.Out(MI + "$$6,$$f14");
+ IAH.Out(MI + "$$7,$$f15");
+ } else {
+ IAH.Out(MI + "$$7,$$f14");
+ IAH.Out(MI + "$$6,$$f15");
+ }
+ break;
+ case DSig:
+ if (LE) {
+ IAH.Out(MI + "$$4,$$f12");
+ IAH.Out(MI + "$$5,$$f13");
+ } else {
+ IAH.Out(MI + "$$5,$$f12");
+ IAH.Out(MI + "$$4,$$f13");
+ }
+ break;
+ case DDSig:
+ if (LE) {
+ IAH.Out(MI + "$$4,$$f12");
+ IAH.Out(MI + "$$5,$$f13");
+ IAH.Out(MI + "$$6,$$f14");
+ IAH.Out(MI + "$$7,$$f15");
+ } else {
+ IAH.Out(MI + "$$5,$$f12");
+ IAH.Out(MI + "$$4,$$f13");
+ IAH.Out(MI + "$$7,$$f14");
+ IAH.Out(MI + "$$6,$$f15");
+ }
+ break;
+ case DFSig:
+ if (LE) {
+ IAH.Out(MI + "$$4,$$f12");
+ IAH.Out(MI + "$$5,$$f13");
+ } else {
+ IAH.Out(MI + "$$5,$$f12");
+ IAH.Out(MI + "$$4,$$f13");
+ }
+ IAH.Out(MI + "$$6,$$f14");
+ break;
+ case NoSig:
+ return;
+ }
+}
+//
+// Make sure that we know we already need a stub for this function.
+// Having called needsFPHelperFromSig
+//
+static void assureFPCallStub(Function &F, Module *M,
+ const MipsSubtarget &Subtarget){
+ // for now we only need them for static relocation
+ if (Subtarget.getRelocationModel() == Reloc::PIC_)
+ return;
+ LLVMContext &Context = M->getContext();
+ bool LE = Subtarget.isLittle();
+ std::string Name = F.getName();
+ std::string SectionName = ".mips16.call.fp." + Name;
+ std::string StubName = "__call_stub_fp_" + Name;
+ //
+ // see if we already have the stub
+ //
+ Function *FStub = M->getFunction(StubName);
+ if (FStub && !FStub->isDeclaration()) return;
+ FStub = Function::Create(F.getFunctionType(),
+ Function::InternalLinkage, StubName, M);
+ FStub->addFnAttr("mips16_fp_stub");
+ FStub->addFnAttr(llvm::Attribute::Naked);
+ FStub->addFnAttr(llvm::Attribute::NoInline);
+ FStub->addFnAttr(llvm::Attribute::NoUnwind);
+ FStub->addFnAttr("nomips16");
+ FStub->setSection(SectionName);
+ BasicBlock *BB = BasicBlock::Create(Context, "entry", FStub);
+ InlineAsmHelper IAH(Context, BB);
+ IAH.Out(".set reorder");
+ FPReturnVariant RV = whichFPReturnVariant(FStub->getReturnType());
+ FPParamVariant PV = whichFPParamVariantNeeded(F);
+ swapFPIntParams(PV, M, IAH, LE, true);
+ if (RV != NoFPRet) {
+ IAH.Out("move $$18, $$31");
+ IAH.Out("jal " + Name);
+ } else {
+ IAH.Out("lui $$25,%hi(" + Name + ")");
+ IAH.Out("addiu $$25,$$25,%lo(" + Name + ")" );
+ }
+ switch (RV) {
+ case FRet:
+ IAH.Out("mfc1 $$2,$$f0");
+ break;
+ case DRet:
+ if (LE) {
+ IAH.Out("mfc1 $$2,$$f0");
+ IAH.Out("mfc1 $$3,$$f1");
+ } else {
+ IAH.Out("mfc1 $$3,$$f0");
+ IAH.Out("mfc1 $$2,$$f1");
+ }
+ break;
+ case CFRet:
+ if (LE) {
+ IAH.Out("mfc1 $$2,$$f0");
+ IAH.Out("mfc1 $$3,$$f2");
+ } else {
+ IAH.Out("mfc1 $$3,$$f0");
+ IAH.Out("mfc1 $$3,$$f2");
+ }
+ break;
+ case CDRet:
+ if (LE) {
+ IAH.Out("mfc1 $$4,$$f2");
+ IAH.Out("mfc1 $$5,$$f3");
+ IAH.Out("mfc1 $$2,$$f0");
+ IAH.Out("mfc1 $$3,$$f1");
+
+ } else {
+ IAH.Out("mfc1 $$5,$$f2");
+ IAH.Out("mfc1 $$4,$$f3");
+ IAH.Out("mfc1 $$3,$$f0");
+ IAH.Out("mfc1 $$2,$$f1");
+ }
+ break;
+ case NoFPRet:
+ break;
+ }
+ if (RV != NoFPRet)
+ IAH.Out("jr $$18");
+ else
+ IAH.Out("jr $$25");
+ new UnreachableInst(Context, BB);
+}
+
+//
+// Functions that are llvm intrinsics and don't need helpers.
+//
+static const char *IntrinsicInline[] =
+ {"fabs",
+ "fabsf",
+ "llvm.ceil.f32", "llvm.ceil.f64",
+ "llvm.copysign.f32", "llvm.copysign.f64",
+ "llvm.cos.f32", "llvm.cos.f64",
+ "llvm.exp.f32", "llvm.exp.f64",
+ "llvm.exp2.f32", "llvm.exp2.f64",
+ "llvm.fabs.f32", "llvm.fabs.f64",
+ "llvm.floor.f32", "llvm.floor.f64",
+ "llvm.fma.f32", "llvm.fma.f64",
+ "llvm.log.f32", "llvm.log.f64",
+ "llvm.log10.f32", "llvm.log10.f64",
+ "llvm.nearbyint.f32", "llvm.nearbyint.f64",
+ "llvm.pow.f32", "llvm.pow.f64",
+ "llvm.powi.f32", "llvm.powi.f64",
+ "llvm.rint.f32", "llvm.rint.f64",
+ "llvm.round.f32", "llvm.round.f64",
+ "llvm.sin.f32", "llvm.sin.f64",
+ "llvm.sqrt.f32", "llvm.sqrt.f64",
+ "llvm.trunc.f32", "llvm.trunc.f64",
+ };
+
+static bool isIntrinsicInline(Function *F) {
+ return std::binary_search(
+ IntrinsicInline, array_endof(IntrinsicInline),
+ F->getName());
+}
+//
+// Returns of float, double and complex need to be handled with a helper
+// function.
+//
+static bool fixupFPReturnAndCall
+ (Function &F, Module *M, const MipsSubtarget &Subtarget) {
+ bool Modified = false;
+ LLVMContext &C = M->getContext();
+ Type *MyVoid = Type::getVoidTy(C);
+ for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end();
+ I != E; ++I) {
+ Instruction &Inst = *I;
+ if (const ReturnInst *RI = dyn_cast<ReturnInst>(I)) {
+ Value *RVal = RI->getReturnValue();
+ if (!RVal) continue;
+ //
+ // If there is a return value and it needs a helper function,
+ // figure out which one and add a call before the actual
+ // return to this helper. The purpose of the helper is to move
+ // floating point values from their soft float return mapping to
+ // where they would have been mapped to in floating point registers.
+ //
+ Type *T = RVal->getType();
+ FPReturnVariant RV = whichFPReturnVariant(T);
+ if (RV == NoFPRet) continue;
+ static const char* Helper[NoFPRet] =
+ {"__mips16_ret_sf", "__mips16_ret_df", "__mips16_ret_sc",
+ "__mips16_ret_dc"};
+ const char *Name = Helper[RV];
+ AttributeSet A;
+ Value *Params[] = {RVal};
+ Modified = true;
+ //
+ // These helper functions have a different calling ABI so
+ // this __Mips16RetHelper indicates that so that later
+ // during call setup, the proper call lowering to the helper
+ // functions will take place.
+ //
+ A = A.addAttribute(C, AttributeSet::FunctionIndex,
+ "__Mips16RetHelper");
+ A = A.addAttribute(C, AttributeSet::FunctionIndex,
+ Attribute::ReadNone);
+ A = A.addAttribute(C, AttributeSet::FunctionIndex,
+ Attribute::NoInline);
+ Value *F = (M->getOrInsertFunction(Name, A, MyVoid, T, NULL));
+ CallInst::Create(F, Params, "", &Inst );
+ } else if (const CallInst *CI = dyn_cast<CallInst>(I)) {
+ // pic mode calls are handled by already defined
+ // helper functions
+ if (Subtarget.getRelocationModel() != Reloc::PIC_ ) {
+ Function *F_ = CI->getCalledFunction();
+ if (F_ && !isIntrinsicInline(F_) && needsFPHelperFromSig(*F_)) {
+ assureFPCallStub(*F_, M, Subtarget);
+ Modified=true;
+ }
+ }
+ }
+ }
+ return Modified;
+}
+
+static void createFPFnStub(Function *F, Module *M, FPParamVariant PV,
+ const MipsSubtarget &Subtarget ) {
+ bool PicMode = Subtarget.getRelocationModel() == Reloc::PIC_;
+ bool LE = Subtarget.isLittle();
+ LLVMContext &Context = M->getContext();
+ std::string Name = F->getName();
+ std::string SectionName = ".mips16.fn." + Name;
+ std::string StubName = "__fn_stub_" + Name;
+ std::string LocalName = "$$__fn_local_" + Name;
+ Function *FStub = Function::Create
+ (F->getFunctionType(),
+ Function::InternalLinkage, StubName, M);
+ FStub->addFnAttr("mips16_fp_stub");
+ FStub->addFnAttr(llvm::Attribute::Naked);
+ FStub->addFnAttr(llvm::Attribute::NoUnwind);
+ FStub->addFnAttr(llvm::Attribute::NoInline);
+ FStub->addFnAttr("nomips16");
+ FStub->setSection(SectionName);
+ BasicBlock *BB = BasicBlock::Create(Context, "entry", FStub);
+ InlineAsmHelper IAH(Context, BB);
+ IAH.Out(" .set macro");
+ if (PicMode) {
+ IAH.Out(".set noreorder");
+ IAH.Out(".cpload $$25");
+ IAH.Out(".set reorder");
+ IAH.Out(".reloc 0,R_MIPS_NONE," + Name);
+ IAH.Out("la $$25," + LocalName);
+ }
+ else {
+ IAH.Out(".set reorder");
+ IAH.Out("la $$25," + Name);
+ }
+ swapFPIntParams(PV, M, IAH, LE, false);
+ IAH.Out("jr $$25");
+ IAH.Out(LocalName + " = " + Name);
+ new UnreachableInst(FStub->getContext(), BB);
+}
+
+//
+// remove the use-soft-float attribute
+//
+static void removeUseSoftFloat(Function &F) {
+ AttributeSet A;
+ DEBUG(errs() << "removing -use-soft-float\n");
+ A = A.addAttribute(F.getContext(), AttributeSet::FunctionIndex,
+ "use-soft-float", "false");
+ F.removeAttributes(AttributeSet::FunctionIndex, A);
+ if (F.hasFnAttribute("use-soft-float")) {
+ DEBUG(errs() << "still has -use-soft-float\n");
+ }
+ F.addAttributes(AttributeSet::FunctionIndex, A);
+}
+
+namespace llvm {
+
+//
+// This pass only makes sense when the underlying chip has floating point but
+// we are compiling as mips16.
+// For all mips16 functions (that are not stubs we have already generated), or
+// declared via attributes as nomips16, we must:
+// 1) fixup all returns of float, double, single and double complex
+// by calling a helper function before the actual return.
+// 2) generate helper functions (stubs) that can be called by mips32 functions
+// that will move parameters passed normally passed in floating point
+// registers the soft float equivalents.
+// 3) in the case of static relocation, generate helper functions so that
+// mips16 functions can call extern functions of unknown type (mips16 or
+// mips32).
+// 4) TBD. For pic, calls to extern functions of unknown type are handled by
+// predefined helper functions in libc but this work is currently done
+// during call lowering but it should be moved here in the future.
+//
+bool Mips16HardFloat::runOnModule(Module &M) {
+ DEBUG(errs() << "Run on Module Mips16HardFloat\n");
+ bool Modified = false;
+ for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
+ if (F->hasFnAttribute("nomips16") &&
+ F->hasFnAttribute("use-soft-float")) {
+ removeUseSoftFloat(*F);
+ continue;
+ }
+ if (F->isDeclaration() || F->hasFnAttribute("mips16_fp_stub") ||
+ F->hasFnAttribute("nomips16")) continue;
+ Modified |= fixupFPReturnAndCall(*F, &M, Subtarget);
+ FPParamVariant V = whichFPParamVariantNeeded(*F);
+ if (V != NoSig) {
+ Modified = true;
+ createFPFnStub(F, &M, V, Subtarget);
+ }
+ }
+ return Modified;
+}
+
+char Mips16HardFloat::ID = 0;
+
+}
+
+ModulePass *llvm::createMips16HardFloat(MipsTargetMachine &TM) {
+ return new Mips16HardFloat(TM);
+}
+
diff --git a/lib/Target/Mips/Mips16HardFloat.h b/lib/Target/Mips/Mips16HardFloat.h
new file mode 100644
index 000000000000..b7f712af5b28
--- /dev/null
+++ b/lib/Target/Mips/Mips16HardFloat.h
@@ -0,0 +1,54 @@
+//===---- Mips16HardFloat.h for Mips16 Hard Float --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a phase which implements part of the floating point
+// interoperability between Mips16 and Mips32 code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/MipsMCTargetDesc.h"
+#include "MipsTargetMachine.h"
+#include "llvm/Pass.h"
+#include "llvm/Target/TargetMachine.h"
+
+
+#ifndef MIPS16HARDFLOAT_H
+#define MIPS16HARDFLOAT_H
+
+using namespace llvm;
+
+namespace llvm {
+
+class Mips16HardFloat : public ModulePass {
+
+public:
+ static char ID;
+
+ Mips16HardFloat(MipsTargetMachine &TM_) : ModulePass(ID),
+ TM(TM_), Subtarget(TM.getSubtarget<MipsSubtarget>()) {
+ }
+
+ virtual const char *getPassName() const {
+ return "MIPS16 Hard Float Pass";
+ }
+
+ virtual bool runOnModule(Module &M);
+
+protected:
+ /// Keep a pointer to the MipsSubtarget around so that we can make the right
+ /// decision when generating code for different targets.
+ const TargetMachine &TM;
+ const MipsSubtarget &Subtarget;
+
+};
+
+ModulePass *createMips16HardFloat(MipsTargetMachine &TM);
+
+}
+#endif
diff --git a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
index c1c635cb9f4f..4948f40734c8 100644
--- a/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
+++ b/lib/Target/Mips/Mips16ISelDAGToDAG.cpp
@@ -42,7 +42,7 @@ bool Mips16DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
}
/// Select multiply instructions.
std::pair<SDNode*, SDNode*>
-Mips16DAGToDAGISel::selectMULT(SDNode *N, unsigned Opc, DebugLoc DL, EVT Ty,
+Mips16DAGToDAGISel::selectMULT(SDNode *N, unsigned Opc, SDLoc DL, EVT Ty,
bool HasLo, bool HasHi) {
SDNode *Lo = 0, *Hi = 0;
SDNode *Mul = CurDAG->getMachineNode(Opc, DL, MVT::Glue, N->getOperand(0),
@@ -80,10 +80,11 @@ void Mips16DAGToDAGISel::initGlobalBaseReg(MachineFunction &MF) {
V1 = RegInfo.createVirtualRegister(RC);
V2 = RegInfo.createVirtualRegister(RC);
- BuildMI(MBB, I, DL, TII.get(Mips::LiRxImmX16), V0)
- .addExternalSymbol("_gp_disp", MipsII::MO_ABS_HI);
- BuildMI(MBB, I, DL, TII.get(Mips::AddiuRxPcImmX16), V1)
- .addExternalSymbol("_gp_disp", MipsII::MO_ABS_LO);
+ BuildMI(MBB, I, DL, TII.get(Mips::GotPrologue16), V0).
+ addReg(V1, RegState::Define).
+ addExternalSymbol("_gp_disp", MipsII::MO_ABS_HI).
+ addExternalSymbol("_gp_disp", MipsII::MO_ABS_LO);
+
BuildMI(MBB, I, DL, TII.get(Mips::SllX16), V2).addReg(V0).addImm(16);
BuildMI(MBB, I, DL, TII.get(Mips::AdduRxRyRz16), GlobalBaseReg)
.addReg(V1).addReg(V2);
@@ -118,11 +119,13 @@ void Mips16DAGToDAGISel::processFunctionAfterISel(MachineFunction &MF) {
SDValue Mips16DAGToDAGISel::getMips16SPAliasReg() {
unsigned Mips16SPAliasReg =
MF->getInfo<MipsFunctionInfo>()->getMips16SPAliasReg();
- return CurDAG->getRegister(Mips16SPAliasReg, TLI.getPointerTy());
+ return CurDAG->getRegister(Mips16SPAliasReg,
+ getTargetLowering()->getPointerTy());
}
void Mips16DAGToDAGISel::getMips16SPRefReg(SDNode *Parent, SDValue &AliasReg) {
- SDValue AliasFPReg = CurDAG->getRegister(Mips::S0, TLI.getPointerTy());
+ SDValue AliasFPReg = CurDAG->getRegister(Mips::S0,
+ getTargetLowering()->getPointerTy());
if (Parent) {
switch (Parent->getOpcode()) {
case ISD::LOAD: {
@@ -149,7 +152,7 @@ void Mips16DAGToDAGISel::getMips16SPRefReg(SDNode *Parent, SDValue &AliasReg) {
}
}
}
- AliasReg = CurDAG->getRegister(Mips::SP, TLI.getPointerTy());
+ AliasReg = CurDAG->getRegister(Mips::SP, getTargetLowering()->getPointerTy());
return;
}
@@ -235,7 +238,7 @@ bool Mips16DAGToDAGISel::selectAddr16(
/// expanded, promoted and normal instructions
std::pair<bool, SDNode*> Mips16DAGToDAGISel::selectNode(SDNode *Node) {
unsigned Opcode = Node->getOpcode();
- DebugLoc DL = Node->getDebugLoc();
+ SDLoc DL(Node);
///
// Instruction Selection not handled by the auto-generated
diff --git a/lib/Target/Mips/Mips16ISelDAGToDAG.h b/lib/Target/Mips/Mips16ISelDAGToDAG.h
index f05f9b766df8..49dc6e587b62 100644
--- a/lib/Target/Mips/Mips16ISelDAGToDAG.h
+++ b/lib/Target/Mips/Mips16ISelDAGToDAG.h
@@ -23,7 +23,7 @@ public:
explicit Mips16DAGToDAGISel(MipsTargetMachine &TM) : MipsDAGToDAGISel(TM) {}
private:
- std::pair<SDNode*, SDNode*> selectMULT(SDNode *N, unsigned Opc, DebugLoc DL,
+ std::pair<SDNode*, SDNode*> selectMULT(SDNode *N, unsigned Opc, SDLoc DL,
EVT Ty, bool HasLo, bool HasHi);
SDValue getMips16SPAliasReg();
diff --git a/lib/Target/Mips/Mips16ISelLowering.cpp b/lib/Target/Mips/Mips16ISelLowering.cpp
index f63318f1e6de..61d8bb8e5582 100644
--- a/lib/Target/Mips/Mips16ISelLowering.cpp
+++ b/lib/Target/Mips/Mips16ISelLowering.cpp
@@ -13,19 +13,14 @@
#define DEBUG_TYPE "mips-lower"
#include "Mips16ISelLowering.h"
#include "MipsRegisterInfo.h"
+#include "MipsTargetMachine.h"
#include "MCTargetDesc/MipsBaseInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include <set>
using namespace llvm;
-static cl::opt<bool>
-Mips16HardFloat("mips16-hard-float", cl::NotHidden,
- cl::desc("MIPS: mips16 hard float enable."),
- cl::init(false));
-
static cl::opt<bool> DontExpandCondPseudos16(
"mips16-dont-expand-cond-pseudo",
cl::init(false),
@@ -34,15 +29,98 @@ static cl::opt<bool> DontExpandCondPseudos16(
cl::Hidden);
namespace {
- std::set<const char*, MipsTargetLowering::LTStr> NoHelperNeeded;
+struct Mips16Libcall {
+ RTLIB::Libcall Libcall;
+ const char *Name;
+
+ bool operator<(const Mips16Libcall &RHS) const {
+ return std::strcmp(Name, RHS.Name) < 0;
+ }
+};
+
+struct Mips16IntrinsicHelperType{
+ const char* Name;
+ const char* Helper;
+
+ bool operator<(const Mips16IntrinsicHelperType &RHS) const {
+ return std::strcmp(Name, RHS.Name) < 0;
+ }
+ bool operator==(const Mips16IntrinsicHelperType &RHS) const {
+ return std::strcmp(Name, RHS.Name) == 0;
+ }
+};
}
+// Libcalls for which no helper is generated. Sorted by name for binary search.
+static const Mips16Libcall HardFloatLibCalls[] = {
+ { RTLIB::ADD_F64, "__mips16_adddf3" },
+ { RTLIB::ADD_F32, "__mips16_addsf3" },
+ { RTLIB::DIV_F64, "__mips16_divdf3" },
+ { RTLIB::DIV_F32, "__mips16_divsf3" },
+ { RTLIB::OEQ_F64, "__mips16_eqdf2" },
+ { RTLIB::OEQ_F32, "__mips16_eqsf2" },
+ { RTLIB::FPEXT_F32_F64, "__mips16_extendsfdf2" },
+ { RTLIB::FPTOSINT_F64_I32, "__mips16_fix_truncdfsi" },
+ { RTLIB::FPTOSINT_F32_I32, "__mips16_fix_truncsfsi" },
+ { RTLIB::SINTTOFP_I32_F64, "__mips16_floatsidf" },
+ { RTLIB::SINTTOFP_I32_F32, "__mips16_floatsisf" },
+ { RTLIB::UINTTOFP_I32_F64, "__mips16_floatunsidf" },
+ { RTLIB::UINTTOFP_I32_F32, "__mips16_floatunsisf" },
+ { RTLIB::OGE_F64, "__mips16_gedf2" },
+ { RTLIB::OGE_F32, "__mips16_gesf2" },
+ { RTLIB::OGT_F64, "__mips16_gtdf2" },
+ { RTLIB::OGT_F32, "__mips16_gtsf2" },
+ { RTLIB::OLE_F64, "__mips16_ledf2" },
+ { RTLIB::OLE_F32, "__mips16_lesf2" },
+ { RTLIB::OLT_F64, "__mips16_ltdf2" },
+ { RTLIB::OLT_F32, "__mips16_ltsf2" },
+ { RTLIB::MUL_F64, "__mips16_muldf3" },
+ { RTLIB::MUL_F32, "__mips16_mulsf3" },
+ { RTLIB::UNE_F64, "__mips16_nedf2" },
+ { RTLIB::UNE_F32, "__mips16_nesf2" },
+ { RTLIB::UNKNOWN_LIBCALL, "__mips16_ret_dc" }, // No associated libcall.
+ { RTLIB::UNKNOWN_LIBCALL, "__mips16_ret_df" }, // No associated libcall.
+ { RTLIB::UNKNOWN_LIBCALL, "__mips16_ret_sc" }, // No associated libcall.
+ { RTLIB::UNKNOWN_LIBCALL, "__mips16_ret_sf" }, // No associated libcall.
+ { RTLIB::SUB_F64, "__mips16_subdf3" },
+ { RTLIB::SUB_F32, "__mips16_subsf3" },
+ { RTLIB::FPROUND_F64_F32, "__mips16_truncdfsf2" },
+ { RTLIB::UO_F64, "__mips16_unorddf2" },
+ { RTLIB::UO_F32, "__mips16_unordsf2" }
+};
+
+static const Mips16IntrinsicHelperType Mips16IntrinsicHelper[] = {
+ {"__fixunsdfsi", "__mips16_call_stub_2" },
+ {"ceil", "__mips16_call_stub_df_2"},
+ {"ceilf", "__mips16_call_stub_sf_1"},
+ {"copysign", "__mips16_call_stub_df_10"},
+ {"copysignf", "__mips16_call_stub_sf_5"},
+ {"cos", "__mips16_call_stub_df_2"},
+ {"cosf", "__mips16_call_stub_sf_1"},
+ {"exp2", "__mips16_call_stub_df_2"},
+ {"exp2f", "__mips16_call_stub_sf_1"},
+ {"floor", "__mips16_call_stub_df_2"},
+ {"floorf", "__mips16_call_stub_sf_1"},
+ {"log2", "__mips16_call_stub_df_2"},
+ {"log2f", "__mips16_call_stub_sf_1"},
+ {"nearbyint", "__mips16_call_stub_df_2"},
+ {"nearbyintf", "__mips16_call_stub_sf_1"},
+ {"rint", "__mips16_call_stub_df_2"},
+ {"rintf", "__mips16_call_stub_sf_1"},
+ {"sin", "__mips16_call_stub_df_2"},
+ {"sinf", "__mips16_call_stub_sf_1"},
+ {"sqrt", "__mips16_call_stub_df_2"},
+ {"sqrtf", "__mips16_call_stub_sf_1"},
+ {"trunc", "__mips16_call_stub_df_2"},
+ {"truncf", "__mips16_call_stub_sf_1"},
+};
+
Mips16TargetLowering::Mips16TargetLowering(MipsTargetMachine &TM)
: MipsTargetLowering(TM) {
//
// set up as if mips32 and then revert so we can test the mechanism
// for switching
- addRegisterClass(MVT::i32, &Mips::CPURegsRegClass);
+ addRegisterClass(MVT::i32, &Mips::GPR32RegClass);
addRegisterClass(MVT::f32, &Mips::FGR32RegClass);
computeRegisterProperties();
clearRegisterClasses();
@@ -50,7 +128,7 @@ Mips16TargetLowering::Mips16TargetLowering(MipsTargetMachine &TM)
// Set up the register classes
addRegisterClass(MVT::i32, &Mips::CPU16RegsRegClass);
- if (Mips16HardFloat)
+ if (Subtarget->inMips16HardFloat())
setMips16HardFloatLibCalls();
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
@@ -67,6 +145,11 @@ Mips16TargetLowering::Mips16TargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
+ setOperationAction(ISD::ROTR, MVT::i32, Expand);
+ setOperationAction(ISD::ROTR, MVT::i64, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i64, Expand);
+
computeRegisterProperties();
}
@@ -91,57 +174,57 @@ Mips16TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case Mips::SelBneZ:
return emitSel16(Mips::BnezRxImm16, MI, BB);
case Mips::SelTBteqZCmpi:
- return emitSeliT16(Mips::BteqzX16, Mips::CmpiRxImmX16, MI, BB);
+ return emitSeliT16(Mips::Bteqz16, Mips::CmpiRxImmX16, MI, BB);
case Mips::SelTBteqZSlti:
- return emitSeliT16(Mips::BteqzX16, Mips::SltiRxImmX16, MI, BB);
+ return emitSeliT16(Mips::Bteqz16, Mips::SltiRxImmX16, MI, BB);
case Mips::SelTBteqZSltiu:
- return emitSeliT16(Mips::BteqzX16, Mips::SltiuRxImmX16, MI, BB);
+ return emitSeliT16(Mips::Bteqz16, Mips::SltiuRxImmX16, MI, BB);
case Mips::SelTBtneZCmpi:
- return emitSeliT16(Mips::BtnezX16, Mips::CmpiRxImmX16, MI, BB);
+ return emitSeliT16(Mips::Btnez16, Mips::CmpiRxImmX16, MI, BB);
case Mips::SelTBtneZSlti:
- return emitSeliT16(Mips::BtnezX16, Mips::SltiRxImmX16, MI, BB);
+ return emitSeliT16(Mips::Btnez16, Mips::SltiRxImmX16, MI, BB);
case Mips::SelTBtneZSltiu:
- return emitSeliT16(Mips::BtnezX16, Mips::SltiuRxImmX16, MI, BB);
+ return emitSeliT16(Mips::Btnez16, Mips::SltiuRxImmX16, MI, BB);
case Mips::SelTBteqZCmp:
- return emitSelT16(Mips::BteqzX16, Mips::CmpRxRy16, MI, BB);
+ return emitSelT16(Mips::Bteqz16, Mips::CmpRxRy16, MI, BB);
case Mips::SelTBteqZSlt:
- return emitSelT16(Mips::BteqzX16, Mips::SltRxRy16, MI, BB);
+ return emitSelT16(Mips::Bteqz16, Mips::SltRxRy16, MI, BB);
case Mips::SelTBteqZSltu:
- return emitSelT16(Mips::BteqzX16, Mips::SltuRxRy16, MI, BB);
+ return emitSelT16(Mips::Bteqz16, Mips::SltuRxRy16, MI, BB);
case Mips::SelTBtneZCmp:
- return emitSelT16(Mips::BtnezX16, Mips::CmpRxRy16, MI, BB);
+ return emitSelT16(Mips::Btnez16, Mips::CmpRxRy16, MI, BB);
case Mips::SelTBtneZSlt:
- return emitSelT16(Mips::BtnezX16, Mips::SltRxRy16, MI, BB);
+ return emitSelT16(Mips::Btnez16, Mips::SltRxRy16, MI, BB);
case Mips::SelTBtneZSltu:
- return emitSelT16(Mips::BtnezX16, Mips::SltuRxRy16, MI, BB);
+ return emitSelT16(Mips::Btnez16, Mips::SltuRxRy16, MI, BB);
case Mips::BteqzT8CmpX16:
- return emitFEXT_T8I816_ins(Mips::BteqzX16, Mips::CmpRxRy16, MI, BB);
+ return emitFEXT_T8I816_ins(Mips::Bteqz16, Mips::CmpRxRy16, MI, BB);
case Mips::BteqzT8SltX16:
- return emitFEXT_T8I816_ins(Mips::BteqzX16, Mips::SltRxRy16, MI, BB);
+ return emitFEXT_T8I816_ins(Mips::Bteqz16, Mips::SltRxRy16, MI, BB);
case Mips::BteqzT8SltuX16:
// TBD: figure out a way to get this or remove the instruction
// altogether.
- return emitFEXT_T8I816_ins(Mips::BteqzX16, Mips::SltuRxRy16, MI, BB);
+ return emitFEXT_T8I816_ins(Mips::Bteqz16, Mips::SltuRxRy16, MI, BB);
case Mips::BtnezT8CmpX16:
- return emitFEXT_T8I816_ins(Mips::BtnezX16, Mips::CmpRxRy16, MI, BB);
+ return emitFEXT_T8I816_ins(Mips::Btnez16, Mips::CmpRxRy16, MI, BB);
case Mips::BtnezT8SltX16:
- return emitFEXT_T8I816_ins(Mips::BtnezX16, Mips::SltRxRy16, MI, BB);
+ return emitFEXT_T8I816_ins(Mips::Btnez16, Mips::SltRxRy16, MI, BB);
case Mips::BtnezT8SltuX16:
// TBD: figure out a way to get this or remove the instruction
// altogether.
- return emitFEXT_T8I816_ins(Mips::BtnezX16, Mips::SltuRxRy16, MI, BB);
+ return emitFEXT_T8I816_ins(Mips::Btnez16, Mips::SltuRxRy16, MI, BB);
case Mips::BteqzT8CmpiX16: return emitFEXT_T8I8I16_ins(
- Mips::BteqzX16, Mips::CmpiRxImm16, Mips::CmpiRxImmX16, MI, BB);
+ Mips::Bteqz16, Mips::CmpiRxImm16, Mips::CmpiRxImmX16, false, MI, BB);
case Mips::BteqzT8SltiX16: return emitFEXT_T8I8I16_ins(
- Mips::BteqzX16, Mips::SltiRxImm16, Mips::SltiRxImmX16, MI, BB);
+ Mips::Bteqz16, Mips::SltiRxImm16, Mips::SltiRxImmX16, true, MI, BB);
case Mips::BteqzT8SltiuX16: return emitFEXT_T8I8I16_ins(
- Mips::BteqzX16, Mips::SltiuRxImm16, Mips::SltiuRxImmX16, MI, BB);
+ Mips::Bteqz16, Mips::SltiuRxImm16, Mips::SltiuRxImmX16, false, MI, BB);
case Mips::BtnezT8CmpiX16: return emitFEXT_T8I8I16_ins(
- Mips::BtnezX16, Mips::CmpiRxImm16, Mips::CmpiRxImmX16, MI, BB);
+ Mips::Btnez16, Mips::CmpiRxImm16, Mips::CmpiRxImmX16, false, MI, BB);
case Mips::BtnezT8SltiX16: return emitFEXT_T8I8I16_ins(
- Mips::BtnezX16, Mips::SltiRxImm16, Mips::SltiRxImmX16, MI, BB);
+ Mips::Btnez16, Mips::SltiRxImm16, Mips::SltiRxImmX16, true, MI, BB);
case Mips::BtnezT8SltiuX16: return emitFEXT_T8I8I16_ins(
- Mips::BtnezX16, Mips::SltiuRxImm16, Mips::SltiuRxImmX16, MI, BB);
+ Mips::Btnez16, Mips::SltiuRxImm16, Mips::SltiuRxImmX16, false, MI, BB);
break;
case Mips::SltCCRxRy16:
return emitFEXT_CCRX16_ins(Mips::SltRxRy16, MI, BB);
@@ -166,47 +249,17 @@ isEligibleForTailCallOptimization(const MipsCC &MipsCCInfo,
return false;
}
-void Mips16TargetLowering::setMips16LibcallName
- (RTLIB::Libcall L, const char *Name) {
- setLibcallName(L, Name);
- NoHelperNeeded.insert(Name);
-}
-
void Mips16TargetLowering::setMips16HardFloatLibCalls() {
- setMips16LibcallName(RTLIB::ADD_F32, "__mips16_addsf3");
- setMips16LibcallName(RTLIB::ADD_F64, "__mips16_adddf3");
- setMips16LibcallName(RTLIB::SUB_F32, "__mips16_subsf3");
- setMips16LibcallName(RTLIB::SUB_F64, "__mips16_subdf3");
- setMips16LibcallName(RTLIB::MUL_F32, "__mips16_mulsf3");
- setMips16LibcallName(RTLIB::MUL_F64, "__mips16_muldf3");
- setMips16LibcallName(RTLIB::DIV_F32, "__mips16_divsf3");
- setMips16LibcallName(RTLIB::DIV_F64, "__mips16_divdf3");
- setMips16LibcallName(RTLIB::FPEXT_F32_F64, "__mips16_extendsfdf2");
- setMips16LibcallName(RTLIB::FPROUND_F64_F32, "__mips16_truncdfsf2");
- setMips16LibcallName(RTLIB::FPTOSINT_F32_I32, "__mips16_fix_truncsfsi");
- setMips16LibcallName(RTLIB::FPTOSINT_F64_I32, "__mips16_fix_truncdfsi");
- setMips16LibcallName(RTLIB::SINTTOFP_I32_F32, "__mips16_floatsisf");
- setMips16LibcallName(RTLIB::SINTTOFP_I32_F64, "__mips16_floatsidf");
- setMips16LibcallName(RTLIB::UINTTOFP_I32_F32, "__mips16_floatunsisf");
- setMips16LibcallName(RTLIB::UINTTOFP_I32_F64, "__mips16_floatunsidf");
- setMips16LibcallName(RTLIB::OEQ_F32, "__mips16_eqsf2");
- setMips16LibcallName(RTLIB::OEQ_F64, "__mips16_eqdf2");
- setMips16LibcallName(RTLIB::UNE_F32, "__mips16_nesf2");
- setMips16LibcallName(RTLIB::UNE_F64, "__mips16_nedf2");
- setMips16LibcallName(RTLIB::OGE_F32, "__mips16_gesf2");
- setMips16LibcallName(RTLIB::OGE_F64, "__mips16_gedf2");
- setMips16LibcallName(RTLIB::OLT_F32, "__mips16_ltsf2");
- setMips16LibcallName(RTLIB::OLT_F64, "__mips16_ltdf2");
- setMips16LibcallName(RTLIB::OLE_F32, "__mips16_lesf2");
- setMips16LibcallName(RTLIB::OLE_F64, "__mips16_ledf2");
- setMips16LibcallName(RTLIB::OGT_F32, "__mips16_gtsf2");
- setMips16LibcallName(RTLIB::OGT_F64, "__mips16_gtdf2");
- setMips16LibcallName(RTLIB::UO_F32, "__mips16_unordsf2");
- setMips16LibcallName(RTLIB::UO_F64, "__mips16_unorddf2");
- setMips16LibcallName(RTLIB::O_F32, "__mips16_unordsf2");
- setMips16LibcallName(RTLIB::O_F64, "__mips16_unorddf2");
-}
+ for (unsigned I = 0; I != array_lengthof(HardFloatLibCalls); ++I) {
+ assert((I == 0 || HardFloatLibCalls[I - 1] < HardFloatLibCalls[I]) &&
+ "Array not sorted!");
+ if (HardFloatLibCalls[I].Libcall != RTLIB::UNKNOWN_LIBCALL)
+ setLibcallName(HardFloatLibCalls[I].Libcall, HardFloatLibCalls[I].Name);
+ }
+ setLibcallName(RTLIB::O_F64, "__mips16_unorddf2");
+ setLibcallName(RTLIB::O_F32, "__mips16_unordsf2");
+}
//
// The Mips16 hard float is a crazy quilt inherited from gcc. I have a much
@@ -371,10 +424,13 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const {
SelectionDAG &DAG = CLI.DAG;
+ MachineFunction &MF = DAG.getMachineFunction();
+ MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
const char* Mips16HelperFunction = 0;
bool NeedMips16Helper = false;
- if (getTargetMachine().Options.UseSoftFloat && Mips16HardFloat) {
+ if (getTargetMachine().Options.UseSoftFloat &&
+ Subtarget->inMips16HardFloat()) {
//
// currently we don't have symbols tagged with the mips16 or mips32
// qualifier so we will assume that we don't know what kind it is.
@@ -382,9 +438,34 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
//
bool LookupHelper = true;
if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(CLI.Callee)) {
- if (NoHelperNeeded.find(S->getSymbol()) != NoHelperNeeded.end()) {
+ Mips16Libcall Find = { RTLIB::UNKNOWN_LIBCALL, S->getSymbol() };
+
+ if (std::binary_search(HardFloatLibCalls, array_endof(HardFloatLibCalls),
+ Find))
LookupHelper = false;
+ else {
+ Mips16IntrinsicHelperType IntrinsicFind = {S->getSymbol(), ""};
+ // one more look at list of intrinsics
+ if (std::binary_search(Mips16IntrinsicHelper,
+ array_endof(Mips16IntrinsicHelper),
+ IntrinsicFind)) {
+ const Mips16IntrinsicHelperType *h =(std::find(Mips16IntrinsicHelper,
+ array_endof(Mips16IntrinsicHelper),
+ IntrinsicFind));
+ Mips16HelperFunction = h->Helper;
+ NeedMips16Helper = true;
+ LookupHelper = false;
+ }
+
}
+ } else if (GlobalAddressSDNode *G =
+ dyn_cast<GlobalAddressSDNode>(CLI.Callee)) {
+ Mips16Libcall Find = { RTLIB::UNKNOWN_LIBCALL,
+ G->getGlobal()->getName().data() };
+
+ if (std::binary_search(HardFloatLibCalls, array_endof(HardFloatLibCalls),
+ Find))
+ LookupHelper = false;
}
if (LookupHelper) Mips16HelperFunction =
getMips16HelperFunction(CLI.RetTy, CLI.Args, NeedMips16Helper);
@@ -400,7 +481,10 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
if (NeedMips16Helper) {
RegsToPass.push_front(std::make_pair(V0Reg, Callee));
JumpTarget = DAG.getExternalSymbol(Mips16HelperFunction, getPointerTy());
- JumpTarget = getAddrGlobal(JumpTarget, DAG, MipsII::MO_GOT);
+ ExternalSymbolSDNode *S = cast<ExternalSymbolSDNode>(JumpTarget);
+ JumpTarget = getAddrGlobal(S, JumpTarget.getValueType(), DAG,
+ MipsII::MO_GOT, Chain,
+ FuncInfo->callPtrInfo(S->getSymbol()));
} else
RegsToPass.push_front(std::make_pair((unsigned)Mips::T9, Callee));
}
@@ -621,7 +705,7 @@ MachineBasicBlock
}
MachineBasicBlock *Mips16TargetLowering::emitFEXT_T8I8I16_ins(
- unsigned BtOpc, unsigned CmpiOpc, unsigned CmpiXOpc,
+ unsigned BtOpc, unsigned CmpiOpc, unsigned CmpiXOpc, bool ImmSigned,
MachineInstr *MI, MachineBasicBlock *BB) const {
if (DontExpandCondPseudos16)
return BB;
@@ -632,7 +716,8 @@ MachineBasicBlock *Mips16TargetLowering::emitFEXT_T8I8I16_ins(
unsigned CmpOpc;
if (isUInt<8>(imm))
CmpOpc = CmpiOpc;
- else if (isUInt<16>(imm))
+ else if ((!ImmSigned && isUInt<16>(imm)) ||
+ (ImmSigned && isInt<16>(imm)))
CmpOpc = CmpiXOpc;
else
llvm_unreachable("immediate field not usable");
diff --git a/lib/Target/Mips/Mips16ISelLowering.h b/lib/Target/Mips/Mips16ISelLowering.h
index b23e2a1f37db..33b953f6ff39 100644
--- a/lib/Target/Mips/Mips16ISelLowering.h
+++ b/lib/Target/Mips/Mips16ISelLowering.h
@@ -32,8 +32,6 @@ namespace llvm {
unsigned NextStackOffset,
const MipsFunctionInfo& FI) const;
- void setMips16LibcallName(RTLIB::Libcall, const char *Name);
-
void setMips16HardFloatLibCalls();
unsigned int
@@ -64,7 +62,7 @@ namespace llvm {
MachineBasicBlock *BB) const;
MachineBasicBlock *emitFEXT_T8I8I16_ins(
- unsigned BtOpc, unsigned CmpiOpc, unsigned CmpiXOpc,
+ unsigned BtOpc, unsigned CmpiOpc, unsigned CmpiXOpc, bool ImmSigned,
MachineInstr *MI, MachineBasicBlock *BB) const;
MachineBasicBlock *emitFEXT_CCRX16_ins(
diff --git a/lib/Target/Mips/Mips16InstrFormats.td b/lib/Target/Mips/Mips16InstrFormats.td
index 4ff62ef3b6f9..da3a1f114af3 100644
--- a/lib/Target/Mips/Mips16InstrFormats.td
+++ b/lib/Target/Mips/Mips16InstrFormats.td
@@ -61,7 +61,7 @@ class MipsInst16<dag outs, dag ins, string asmstr, list<dag> pattern,
// Top 5 bits are the 'opcode' field
let Inst{15-11} = Opcode;
-
+
let Size=2;
field bits<16> SoftFail = 0;
}
@@ -74,7 +74,7 @@ class MipsInst16_32<dag outs, dag ins, string asmstr, list<dag> pattern,
MipsInst16_Base<outs, ins, asmstr, pattern, itin>
{
field bits<32> Inst;
-
+
let Size=4;
field bits<32> SoftFail = 0;
}
@@ -148,6 +148,20 @@ class FRR16<bits<5> _funct, dag outs, dag ins, string asmstr,
let Inst{4-0} = funct;
}
+class FRRBreak16<dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin>:
+ MipsInst16<outs, ins, asmstr, pattern, itin>
+{
+ bits<6> Code;
+ bits<5> funct;
+
+ let Opcode = 0b11101;
+ let funct = 0b00101;
+
+ let Inst{10-5} = Code;
+ let Inst{4-0} = funct;
+}
+
//
// For conversion functions.
//
diff --git a/lib/Target/Mips/Mips16InstrInfo.cpp b/lib/Target/Mips/Mips16InstrInfo.cpp
index 17dd2c07967a..000ea2897f43 100644
--- a/lib/Target/Mips/Mips16InstrInfo.cpp
+++ b/lib/Target/Mips/Mips16InstrInfo.cpp
@@ -10,7 +10,6 @@
// This file contains the Mips16 implementation of the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//
-
#include "Mips16InstrInfo.h"
#include "InstPrinter/MipsInstPrinter.h"
#include "MipsMachineFunction.h"
@@ -20,10 +19,12 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
+#include <cctype>
using namespace llvm;
@@ -36,8 +37,8 @@ static cl::opt<bool> NeverUseSaveRestore(
Mips16InstrInfo::Mips16InstrInfo(MipsTargetMachine &tm)
- : MipsInstrInfo(tm, Mips::BimmX16),
- RI(*tm.getSubtargetImpl(), *this) {}
+ : MipsInstrInfo(tm, Mips::Bimm16),
+ RI(*tm.getSubtargetImpl()) {}
const MipsRegisterInfo &Mips16InstrInfo::getRegisterInfo() const {
return RI;
@@ -72,16 +73,16 @@ void Mips16InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
unsigned Opc = 0;
if (Mips::CPU16RegsRegClass.contains(DestReg) &&
- Mips::CPURegsRegClass.contains(SrcReg))
+ Mips::GPR32RegClass.contains(SrcReg))
Opc = Mips::MoveR3216;
- else if (Mips::CPURegsRegClass.contains(DestReg) &&
+ else if (Mips::GPR32RegClass.contains(DestReg) &&
Mips::CPU16RegsRegClass.contains(SrcReg))
Opc = Mips::Move32R16;
- else if ((SrcReg == Mips::HI) &&
+ else if ((SrcReg == Mips::HI0) &&
(Mips::CPU16RegsRegClass.contains(DestReg)))
Opc = Mips::Mfhi16, SrcReg = 0;
- else if ((SrcReg == Mips::LO) &&
+ else if ((SrcReg == Mips::LO0) &&
(Mips::CPU16RegsRegClass.contains(DestReg)))
Opc = Mips::Mflo16, SrcReg = 0;
@@ -109,8 +110,9 @@ storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
if (Mips::CPU16RegsRegClass.hasSubClassEq(RC))
Opc = Mips::SwRxSpImmX16;
assert(Opc && "Register class not handled!");
- BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FI).addImm(Offset).addMemOperand(MMO);
+ BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill)).
+ addFrameIndex(FI).addImm(Offset)
+ .addMemOperand(MMO);
}
void Mips16InstrInfo::
@@ -145,18 +147,22 @@ bool Mips16InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
/// GetOppositeBranchOpc - Return the inverse of the specified
/// opcode, e.g. turning BEQ to BNE.
-unsigned Mips16InstrInfo::GetOppositeBranchOpc(unsigned Opc) const {
+unsigned Mips16InstrInfo::getOppositeBranchOpc(unsigned Opc) const {
switch (Opc) {
default: llvm_unreachable("Illegal opcode!");
case Mips::BeqzRxImmX16: return Mips::BnezRxImmX16;
case Mips::BnezRxImmX16: return Mips::BeqzRxImmX16;
+ case Mips::BeqzRxImm16: return Mips::BnezRxImm16;
+ case Mips::BnezRxImm16: return Mips::BeqzRxImm16;
case Mips::BteqzT8CmpX16: return Mips::BtnezT8CmpX16;
case Mips::BteqzT8SltX16: return Mips::BtnezT8SltX16;
case Mips::BteqzT8SltiX16: return Mips::BtnezT8SltiX16;
+ case Mips::Btnez16: return Mips::Bteqz16;
case Mips::BtnezX16: return Mips::BteqzX16;
case Mips::BtnezT8CmpiX16: return Mips::BteqzT8CmpiX16;
case Mips::BtnezT8SltuX16: return Mips::BteqzT8SltuX16;
case Mips::BtnezT8SltiuX16: return Mips::BteqzT8SltiuX16;
+ case Mips::Bteqz16: return Mips::Btnez16;
case Mips::BteqzX16: return Mips::BtnezX16;
case Mips::BteqzT8CmpiX16: return Mips::BtnezT8CmpiX16;
case Mips::BteqzT8SltuX16: return Mips::BtnezT8SltuX16;
@@ -323,48 +329,88 @@ Mips16InstrInfo::loadImmediate(unsigned FrameReg,
//
RegScavenger rs;
int32_t lo = Imm & 0xFFFF;
- int32_t hi = ((Imm >> 16) + (lo >> 15)) & 0xFFFF;
NewImm = lo;
- unsigned Reg =0;
- unsigned SpReg = 0;
+ int Reg =0;
+ int SpReg = 0;
+
rs.enterBasicBlock(&MBB);
rs.forward(II);
//
+ // We need to know which registers can be used, in the case where there
+ // are not enough free registers. We exclude all registers that
+ // are used in the instruction that we are helping.
+ // // Consider all allocatable registers in the register class initially
+ BitVector Candidates =
+ RI.getAllocatableSet
+ (*II->getParent()->getParent(), &Mips::CPU16RegsRegClass);
+ // Exclude all the registers being used by the instruction.
+ for (unsigned i = 0, e = II->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = II->getOperand(i);
+ if (MO.isReg() && MO.getReg() != 0 && !MO.isDef() &&
+ !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ Candidates.reset(MO.getReg());
+ }
+ //
+ // If the same register was used and defined in an instruction, then
+ // it will not be in the list of candidates.
+ //
+ // we need to analyze the instruction that we are helping.
+ // we need to know if it defines register x but register x is not
+ // present as an operand of the instruction. this tells
+ // whether the register is live before the instruction. if it's not
+ // then we don't need to save it in case there are no free registers.
+ //
+ int DefReg = 0;
+ for (unsigned i = 0, e = II->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = II->getOperand(i);
+ if (MO.isReg() && MO.isDef()) {
+ DefReg = MO.getReg();
+ break;
+ }
+ }
+ //
+ BitVector Available = rs.getRegsAvailable(&Mips::CPU16RegsRegClass);
+
+ Available &= Candidates;
+ //
// we use T0 for the first register, if we need to save something away.
// we use T1 for the second register, if we need to save something away.
//
unsigned FirstRegSaved =0, SecondRegSaved=0;
unsigned FirstRegSavedTo = 0, SecondRegSavedTo = 0;
- Reg = rs.FindUnusedReg(&Mips::CPU16RegsRegClass);
- if (Reg == 0) {
- FirstRegSaved = Reg = Mips::V0;
- FirstRegSavedTo = Mips::T0;
- copyPhysReg(MBB, II, DL, FirstRegSavedTo, FirstRegSaved, true);
+
+ Reg = Available.find_first();
+
+ if (Reg == -1) {
+ Reg = Candidates.find_first();
+ Candidates.reset(Reg);
+ if (DefReg != Reg) {
+ FirstRegSaved = Reg;
+ FirstRegSavedTo = Mips::T0;
+ copyPhysReg(MBB, II, DL, FirstRegSavedTo, FirstRegSaved, true);
+ }
}
else
- rs.setUsed(Reg);
- BuildMI(MBB, II, DL, get(Mips::LiRxImmX16), Reg).addImm(hi);
- BuildMI(MBB, II, DL, get(Mips::SllX16), Reg).addReg(Reg).
- addImm(16);
+ Available.reset(Reg);
+ BuildMI(MBB, II, DL, get(Mips::LwConstant32), Reg).addImm(Imm);
+ NewImm = 0;
if (FrameReg == Mips::SP) {
- SpReg = rs.FindUnusedReg(&Mips::CPU16RegsRegClass);
- if (SpReg == 0) {
- if (Reg != Mips::V1) {
- SecondRegSaved = SpReg = Mips::V1;
+ SpReg = Available.find_first();
+ if (SpReg == -1) {
+ SpReg = Candidates.find_first();
+ // Candidates.reset(SpReg); // not really needed
+ if (DefReg!= SpReg) {
+ SecondRegSaved = SpReg;
SecondRegSavedTo = Mips::T1;
}
- else {
- SecondRegSaved = SpReg = Mips::V0;
- SecondRegSavedTo = Mips::T0;
- }
- copyPhysReg(MBB, II, DL, SecondRegSavedTo, SecondRegSaved, true);
+ if (SecondRegSaved)
+ copyPhysReg(MBB, II, DL, SecondRegSavedTo, SecondRegSaved, true);
}
- else
- rs.setUsed(SpReg);
-
+ else
+ Available.reset(SpReg);
copyPhysReg(MBB, II, DL, SpReg, Mips::SP, false);
- BuildMI(MBB, II, DL, get(Mips:: AdduRxRyRz16), Reg).addReg(SpReg)
+ BuildMI(MBB, II, DL, get(Mips:: AdduRxRyRz16), Reg).addReg(SpReg, RegState::Kill)
.addReg(Reg);
}
else
@@ -380,8 +426,27 @@ Mips16InstrInfo::loadImmediate(unsigned FrameReg,
return Reg;
}
-unsigned Mips16InstrInfo::GetAnalyzableBrOpc(unsigned Opc) const {
+/// This function generates the sequence of instructions needed to get the
+/// result of adding register REG and immediate IMM.
+unsigned
+Mips16InstrInfo::basicLoadImmediate(
+ unsigned FrameReg,
+ int64_t Imm, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator II, DebugLoc DL,
+ unsigned &NewImm) const {
+ const TargetRegisterClass *RC = &Mips::CPU16RegsRegClass;
+ MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
+ unsigned Reg = RegInfo.createVirtualRegister(RC);
+ BuildMI(MBB, II, DL, get(Mips::LwConstant32), Reg).addImm(Imm);
+ NewImm = 0;
+ return Reg;
+}
+
+unsigned Mips16InstrInfo::getAnalyzableBrOpc(unsigned Opc) const {
return (Opc == Mips::BeqzRxImmX16 || Opc == Mips::BimmX16 ||
+ Opc == Mips::Bimm16 ||
+ Opc == Mips::Bteqz16 || Opc == Mips::Btnez16 ||
+ Opc == Mips::BeqzRxImm16 || Opc == Mips::BnezRxImm16 ||
Opc == Mips::BnezRxImmX16 || Opc == Mips::BteqzX16 ||
Opc == Mips::BteqzT8CmpX16 || Opc == Mips::BteqzT8CmpiX16 ||
Opc == Mips::BteqzT8SltX16 || Opc == Mips::BteqzT8SltuX16 ||
@@ -415,3 +480,69 @@ void Mips16InstrInfo::BuildAddiuSpImm
const MipsInstrInfo *llvm::createMips16InstrInfo(MipsTargetMachine &TM) {
return new Mips16InstrInfo(TM);
}
+
+bool Mips16InstrInfo::validImmediate(unsigned Opcode, unsigned Reg,
+ int64_t Amount) {
+ switch (Opcode) {
+ case Mips::LbRxRyOffMemX16:
+ case Mips::LbuRxRyOffMemX16:
+ case Mips::LhRxRyOffMemX16:
+ case Mips::LhuRxRyOffMemX16:
+ case Mips::SbRxRyOffMemX16:
+ case Mips::ShRxRyOffMemX16:
+ case Mips::LwRxRyOffMemX16:
+ case Mips::SwRxRyOffMemX16:
+ case Mips::SwRxSpImmX16:
+ case Mips::LwRxSpImmX16:
+ return isInt<16>(Amount);
+ case Mips::AddiuRxRyOffMemX16:
+ if ((Reg == Mips::PC) || (Reg == Mips::SP))
+ return isInt<16>(Amount);
+ return isInt<15>(Amount);
+ }
+ llvm_unreachable("unexpected Opcode in validImmediate");
+}
+
+/// Measure the specified inline asm to determine an approximation of its
+/// length.
+/// Comments (which run till the next SeparatorString or newline) do not
+/// count as an instruction.
+/// Any other non-whitespace text is considered an instruction, with
+/// multiple instructions separated by SeparatorString or newlines.
+/// Variable-length instructions are not handled here; this function
+/// may be overloaded in the target code to do that.
+/// We implement the special case of the .space directive taking only an
+/// integer argument, which is the size in bytes. This is used for creating
+/// inline code spacing for testing purposes using inline assembly.
+///
+unsigned Mips16InstrInfo::getInlineAsmLength(const char *Str,
+ const MCAsmInfo &MAI) const {
+
+
+ // Count the number of instructions in the asm.
+ bool atInsnStart = true;
+ unsigned Length = 0;
+ for (; *Str; ++Str) {
+ if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
+ strlen(MAI.getSeparatorString())) == 0)
+ atInsnStart = true;
+ if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
+ if (strncmp(Str, ".space", 6)==0) {
+ char *EStr; int Sz;
+ Sz = strtol(Str+6, &EStr, 10);
+ while (isspace(*EStr)) ++EStr;
+ if (*EStr=='\0') {
+ DEBUG(dbgs() << "parsed .space " << Sz << '\n');
+ return Sz;
+ }
+ }
+ Length += MAI.getMaxInstLength();
+ atInsnStart = false;
+ }
+ if (atInsnStart && strncmp(Str, MAI.getCommentString(),
+ strlen(MAI.getCommentString())) == 0)
+ atInsnStart = false;
+ }
+
+ return Length;
+}
diff --git a/lib/Target/Mips/Mips16InstrInfo.h b/lib/Target/Mips/Mips16InstrInfo.h
index a77a9043bb17..d9a594b537a2 100644
--- a/lib/Target/Mips/Mips16InstrInfo.h
+++ b/lib/Target/Mips/Mips16InstrInfo.h
@@ -64,11 +64,11 @@ public:
virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
- virtual unsigned GetOppositeBranchOpc(unsigned Opc) const;
+ virtual unsigned getOppositeBranchOpc(unsigned Opc) const;
// Adjust SP by FrameSize bytes. Save RA, S0, S1
void makeFrame(unsigned SP, int64_t FrameSize, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
+ MachineBasicBlock::iterator I) const;
// Adjust SP by FrameSize bytes. Restore RA, S0, S1
void restoreFrame(unsigned SP, int64_t FrameSize, MachineBasicBlock &MBB,
@@ -88,6 +88,13 @@ public:
MachineBasicBlock::iterator II, DebugLoc DL,
unsigned &NewImm) const;
+ unsigned basicLoadImmediate(unsigned FrameReg,
+ int64_t Imm, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator II, DebugLoc DL,
+ unsigned &NewImm) const;
+
+ static bool validImmediate(unsigned Opcode, unsigned Reg, int64_t Amount);
+
static bool validSpImm8(int offset) {
return ((offset & 7) == 0) && isInt<11>(offset);
}
@@ -101,8 +108,10 @@ public:
void BuildAddiuSpImm
(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, int64_t Imm) const;
+ unsigned getInlineAsmLength(const char *Str,
+ const MCAsmInfo &MAI) const;
private:
- virtual unsigned GetAnalyzableBrOpc(unsigned Opc) const;
+ virtual unsigned getAnalyzableBrOpc(unsigned Opc) const;
void ExpandRetRA16(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned Opc) const;
diff --git a/lib/Target/Mips/Mips16InstrInfo.td b/lib/Target/Mips/Mips16InstrInfo.td
index aa51aaf46565..7441c78a0330 100644
--- a/lib/Target/Mips/Mips16InstrInfo.td
+++ b/lib/Target/Mips/Mips16InstrInfo.td
@@ -21,17 +21,27 @@ def addr16 :
// Address operand
def mem16 : Operand<i32> {
let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops CPU16Regs, simm16, CPU16Regs);
+ let MIOperandInfo = (ops CPU16Regs, simm16, CPU16RegsPlusSP);
let EncoderMethod = "getMemEncoding";
}
def mem16_ea : Operand<i32> {
let PrintMethod = "printMemOperandEA";
- let MIOperandInfo = (ops CPU16Regs, simm16);
+ let MIOperandInfo = (ops CPU16RegsPlusSP, simm16);
let EncoderMethod = "getMemEncoding";
}
//
+// I-type instruction format
+//
+// this is only used by bimm. the actual assembly value is a 12 bit signed
+// number
+//
+class FI16_ins<bits<5> op, string asmstr, InstrItinClass itin>:
+ FI16<op, (outs), (ins brtarget:$imm16),
+ !strconcat(asmstr, "\t$imm16 # 16 bit inst"), [], itin>;
+
+//
//
// I8 instruction format
//
@@ -41,7 +51,10 @@ class FI816_ins_base<bits<3> _func, string asmstr,
FI816<_func, (outs), (ins simm16:$imm), !strconcat(asmstr, asmstr2),
[], itin>;
-
+class FI816_ins<bits<3> _func, string asmstr,
+ InstrItinClass itin>:
+ FI816_ins_base<_func, asmstr, "\t$imm # 16 bit inst", itin>;
+
class FI816_SP_ins<bits<3> _func, string asmstr,
InstrItinClass itin>:
FI816_ins_base<_func, asmstr, "\t$$sp, $imm # 16 bit inst", itin>;
@@ -60,6 +73,11 @@ class FRI16_ins<bits<5> op, string asmstr,
InstrItinClass itin>:
FRI16_ins_base<op, asmstr, "\t$rx, $imm \t# 16 bit inst", itin>;
+class FRI16_TCP_ins<bits<5> _op, string asmstr,
+ InstrItinClass itin>:
+ FRI16<_op, (outs CPU16Regs:$rx), (ins pcrel16:$imm, i32imm:$size),
+ !strconcat(asmstr, "\t$rx, $imm\t# 16 bit inst"), [], itin>;
+
class FRI16R_ins_base<bits<5> op, string asmstr, string asmstr2,
InstrItinClass itin>:
FRI16<op, (outs), (ins CPU16Regs:$rx, simm16:$imm),
@@ -172,6 +190,11 @@ class FEXT_RI16_B_ins<bits<5> _op, string asmstr,
FEXT_RI16<_op, (outs), (ins CPU16Regs:$rx, brtarget:$imm),
!strconcat(asmstr, "\t$rx, $imm"), [], itin>;
+class FEXT_RI16_TCP_ins<bits<5> _op, string asmstr,
+ InstrItinClass itin>:
+ FEXT_RI16<_op, (outs CPU16Regs:$rx), (ins pcrel16:$imm, i32imm:$size),
+ !strconcat(asmstr, "\t$rx, $imm"), [], itin>;
+
class FEXT_2RI16_ins<bits<5> _op, string asmstr,
InstrItinClass itin>:
FEXT_RI16<_op, (outs CPU16Regs:$rx), (ins CPU16Regs:$rx_, simm16:$imm),
@@ -187,6 +210,11 @@ class FEXT_RI16_SP_explicit_ins<bits<5> _op, string asmstr,
FEXT_RI16<_op, (outs CPU16Regs:$rx), (ins CPUSPReg:$ry, simm16:$imm),
!strconcat(asmstr, "\t$rx, $imm ( $ry ); "), [], itin>;
+class FEXT_RI16_SP_Store_explicit_ins<bits<5> _op, string asmstr,
+ InstrItinClass itin>:
+ FEXT_RI16<_op, (outs), (ins CPU16Regs:$rx, CPUSPReg:$ry, simm16:$imm),
+ !strconcat(asmstr, "\t$rx, $imm ( $ry ); "), [], itin>;
+
//
// EXT-RRI instruction format
//
@@ -215,7 +243,7 @@ class FEXT_RRI_A16_mem_ins<bits<1> op, string asmstr, Operand MemOpnd,
// EXT-SHIFT instruction format
//
class FEXT_SHIFT16_ins<bits<2> _f, string asmstr, InstrItinClass itin>:
- FEXT_SHIFT16<_f, (outs CPU16Regs:$rx), (ins CPU16Regs:$ry, shamt:$sa),
+ FEXT_SHIFT16<_f, (outs CPU16Regs:$rx), (ins CPU16Regs:$ry, uimm5:$sa),
!strconcat(asmstr, "\t$rx, $ry, $sa"), [], itin>;
//
@@ -248,7 +276,7 @@ class FEXT_T8I8I16_ins<string asmstr, string asmstr2>:
// I8_MOVR32 instruction format (used only by the MOVR32 instructio
//
class FI8_MOVR3216_ins<string asmstr, InstrItinClass itin>:
- FI8_MOVR3216<(outs CPU16Regs:$rz), (ins CPURegs:$r32),
+ FI8_MOVR3216<(outs CPU16Regs:$rz), (ins GPR32:$r32),
!strconcat(asmstr, "\t$rz, $r32"), [], itin>;
//
@@ -256,7 +284,7 @@ class FI8_MOVR3216_ins<string asmstr, InstrItinClass itin>:
//
class FI8_MOV32R16_ins<string asmstr, InstrItinClass itin>:
- FI8_MOV32R16<(outs CPURegs:$r32), (ins CPU16Regs:$rz),
+ FI8_MOV32R16<(outs GPR32:$r32), (ins CPU16Regs:$rz),
!strconcat(asmstr, "\t$r32, $rz"), [], itin>;
//
@@ -287,6 +315,11 @@ class FRR16_ins<bits<5> f, string asmstr, InstrItinClass itin> :
!strconcat(asmstr, "\t$rx, $ry"), [], itin> {
}
+class FRRBreakNull16_ins<string asmstr, InstrItinClass itin> :
+ FRRBreak16<(outs), (ins), asmstr, [], itin> {
+ let Code=0;
+}
+
class FRR16R_ins<bits<5> f, string asmstr, InstrItinClass itin> :
FRR16<f, (outs), (ins CPU16Regs:$rx, CPU16Regs:$ry),
!strconcat(asmstr, "\t$rx, $ry"), [], itin> {
@@ -333,6 +366,14 @@ class FRR16_JALRC_ins<bits<1> nd, bits<1> l, bits<1> ra,
FRR16_JALRC<nd, l, ra, (outs), (ins CPU16Regs:$rx),
!strconcat(asmstr, "\t $rx"), [], itin> ;
+class FRR_SF16_ins
+ <bits<5> _funct, bits<3> _subfunc,
+ string asmstr, InstrItinClass itin>:
+ FRR_SF16<_funct, _subfunc, (outs CPU16Regs:$rx), (ins CPU16Regs:$rx_),
+ !strconcat(asmstr, "\t $rx"),
+ [], itin> {
+ let Constraints = "$rx_ = $rx";
+ }
//
// RRR-type instruction format
//
@@ -437,7 +478,7 @@ def Constant32:
MipsPseudo16<(outs), (ins imm32:$imm), "\t.word $imm", []>;
def LwConstant32:
- MipsPseudo16<(outs), (ins CPU16Regs:$rx, imm32:$imm),
+ MipsPseudo16<(outs CPU16Regs:$rx), (ins imm32:$imm, imm32:$constid),
"lw\t$rx, 1f\n\tb\t2f\n\t.align\t2\n1: \t.word\t$imm\n2:", []>;
@@ -549,6 +590,14 @@ def BeqzRxImm16: FRI16_B_ins<0b00100, "beqz", IIAlu>, cbranch16;
//
def BeqzRxImmX16: FEXT_RI16_B_ins<0b00100, "beqz", IIAlu>, cbranch16;
+//
+// Format: B offset MIPS16e
+// Purpose: Unconditional Branch (Extended)
+// To do an unconditional PC-relative branch.
+//
+
+def Bimm16: FI16_ins<0b00010, "b", IIAlu>, branch16;
+
// Format: B offset MIPS16e
// Purpose: Unconditional Branch
// To do an unconditional PC-relative branch.
@@ -569,11 +618,22 @@ def BnezRxImm16: FRI16_B_ins<0b00101, "bnez", IIAlu>, cbranch16;
//
def BnezRxImmX16: FEXT_RI16_B_ins<0b00101, "bnez", IIAlu>, cbranch16;
+
+//
+//Format: BREAK immediate
+// Purpose: Breakpoint
+// To cause a Breakpoint exception.
+
+def Break16: FRRBreakNull16_ins<"break 0", NoItinerary>;
//
// Format: BTEQZ offset MIPS16e
// Purpose: Branch on T Equal to Zero (Extended)
// To test special register T then do a PC-relative conditional branch.
//
+def Bteqz16: FI816_ins<0b000, "bteqz", IIAlu>, cbranch16 {
+ let Uses = [T8];
+}
+
def BteqzX16: FEXT_I816_ins<0b000, "bteqz", IIAlu>, cbranch16 {
let Uses = [T8];
}
@@ -597,6 +657,11 @@ def BteqzT8SltiuX16: FEXT_T8I8I16_ins<"bteqz", "sltiu">,
// Purpose: Branch on T Not Equal to Zero (Extended)
// To test special register T then do a PC-relative conditional branch.
//
+
+def Btnez16: FI816_ins<0b001, "btnez", IIAlu>, cbranch16 {
+ let Uses = [T8];
+}
+
def BtnezX16: FEXT_I816_ins<0b001, "btnez", IIAlu> ,cbranch16 {
let Uses = [T8];
}
@@ -648,7 +713,7 @@ def CmpiRxImmX16: FEXT_RI16R_ins<0b01110, "cmpi", IIAlu> {
// To divide 32-bit signed integers.
//
def DivRxRy16: FRR16_div_ins<0b11010, "div", IIAlu> {
- let Defs = [HI, LO];
+ let Defs = [HI0, LO0];
}
//
@@ -657,7 +722,7 @@ def DivRxRy16: FRR16_div_ins<0b11010, "div", IIAlu> {
// To divide 32-bit unsigned integers.
//
def DivuRxRy16: FRR16_div_ins<0b11011, "divu", IIAlu> {
- let Defs = [HI, LO];
+ let Defs = [HI0, LO0];
}
//
// Format: JAL target MIPS16e
@@ -667,10 +732,8 @@ def DivuRxRy16: FRR16_div_ins<0b11011, "divu", IIAlu> {
//
def Jal16 : FJAL16_ins<0b0, "jal", IIAlu> {
- let isBranch = 1;
let hasDelaySlot = 0; // not true, but we add the nop for now
- let isTerminator=1;
- let isBarrier=1;
+ let isCall=1;
}
//
@@ -753,6 +816,10 @@ def LiRxImm16: FRI16_ins<0b01101, "li", IIAlu>;
//
def LiRxImmX16: FEXT_RI16_ins<0b01101, "li", IIAlu>;
+def LiRxImmAlignX16: FEXT_RI16_ins<0b01101, ".align 2\n\tli", IIAlu> {
+ let isCodeGenOnly = 1;
+}
+
//
// Format: LW ry, offset(rx) MIPS16e
// Purpose: Load Word (Extended)
@@ -766,10 +833,13 @@ def LwRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10011, "lw", mem16, IILoad>, MayLoad{
// Purpose: Load Word (SP-Relative, Extended)
// To load an SP-relative word from memory as a signed value.
//
-def LwRxSpImmX16: FEXT_RI16_SP_explicit_ins<0b10110, "lw", IILoad>, MayLoad{
+def LwRxSpImmX16: FEXT_RI16_SP_explicit_ins<0b10010, "lw", IILoad>, MayLoad{
let Uses = [SP];
}
+def LwRxPcTcp16: FRI16_TCP_ins<0b10110, "lw", IILoad>, MayLoad;
+
+def LwRxPcTcpX16: FEXT_RI16_TCP_ins<0b10110, "lw", IILoad>, MayLoad;
//
// Format: MOVE r32, rz MIPS16e
// Purpose: Move
@@ -790,7 +860,7 @@ def MoveR3216: FI8_MOVR3216_ins<"move", IIAlu>;
// To copy the special purpose HI register to a GPR.
//
def Mfhi16: FRR16_M_ins<0b10000, "mfhi", IIAlu> {
- let Uses = [HI];
+ let Uses = [HI0];
let neverHasSideEffects = 1;
}
@@ -800,7 +870,7 @@ def Mfhi16: FRR16_M_ins<0b10000, "mfhi", IIAlu> {
// To copy the special purpose LO register to a GPR.
//
def Mflo16: FRR16_M_ins<0b10010, "mflo", IIAlu> {
- let Uses = [LO];
+ let Uses = [LO0];
let neverHasSideEffects = 1;
}
@@ -810,13 +880,13 @@ def Mflo16: FRR16_M_ins<0b10010, "mflo", IIAlu> {
def MultRxRy16: FMULT16_ins<"mult", IIAlu> {
let isCommutable = 1;
let neverHasSideEffects = 1;
- let Defs = [HI, LO];
+ let Defs = [HI0, LO0];
}
def MultuRxRy16: FMULT16_ins<"multu", IIAlu> {
let isCommutable = 1;
let neverHasSideEffects = 1;
- let Defs = [HI, LO];
+ let Defs = [HI0, LO0];
}
//
@@ -827,7 +897,7 @@ def MultuRxRy16: FMULT16_ins<"multu", IIAlu> {
def MultRxRyRz16: FMULT16_LO_ins<"mult", IIAlu> {
let isCommutable = 1;
let neverHasSideEffects = 1;
- let Defs = [HI, LO];
+ let Defs = [HI0, LO0];
}
//
@@ -838,7 +908,7 @@ def MultRxRyRz16: FMULT16_LO_ins<"mult", IIAlu> {
def MultuRxRyRz16: FMULT16_LO_ins<"multu", IIAlu> {
let isCommutable = 1;
let neverHasSideEffects = 1;
- let Defs = [HI, LO];
+ let Defs = [HI0, LO0];
}
//
@@ -878,9 +948,9 @@ def OrRxRxRy16: FRxRxRy16_ins<0b01101, "or", IIAlu>, ArithLogic16Defs<1>;
let ra=1, s=0,s0=1,s1=1 in
def RestoreRaF16:
FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size),
- "restore\t$$ra, $$s0, $$s1, $frame_size", [], IILoad >, MayLoad {
+ "restore\t$$ra, $$s0, $$s1, $$s2, $frame_size", [], IILoad >, MayLoad {
let isCodeGenOnly = 1;
- let Defs = [S0, S1, RA, SP];
+ let Defs = [S0, S1, S2, RA, SP];
let Uses = [SP];
}
@@ -906,9 +976,9 @@ def RestoreIncSpF16:
let ra=1, s=1,s0=1,s1=1 in
def SaveRaF16:
FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size),
- "save\t$$ra, $$s0, $$s1, $frame_size", [], IIStore >, MayStore {
+ "save\t$$ra, $$s0, $$s1, $$s2, $frame_size", [], IIStore >, MayStore {
let isCodeGenOnly = 1;
- let Uses = [RA, SP, S0, S1];
+ let Uses = [RA, SP, S0, S1, S2];
let Defs = [SP];
}
@@ -933,6 +1003,22 @@ def SbRxRyOffMemX16:
FEXT_RRI16_mem2_ins<0b11000, "sb", mem16, IIStore>, MayStore;
//
+// Format: SEB rx MIPS16e
+// Purpose: Sign-Extend Byte
+// Sign-extend least significant byte in register rx.
+//
+def SebRx16
+ : FRR_SF16_ins<0b10001, 0b100, "seb", IIAlu>;
+
+//
+// Format: SEH rx MIPS16e
+// Purpose: Sign-Extend Halfword
+// Sign-extend least significant word in register rx.
+//
+def SehRx16
+ : FRR_SF16_ins<0b10001, 0b101, "seh", IIAlu>;
+
+//
// The Sel(T) instructions are pseudos
// T means that they use T8 implicitly.
//
@@ -1057,7 +1143,7 @@ def ShRxRyOffMemX16:
//
// Format: SLL rx, ry, sa MIPS16e
// Purpose: Shift Word Left Logical (Extended)
-// To execute a left-shift of a word by a fixed number of bits—0 to 31 bits.
+// To execute a left-shift of a word by a fixed number of bits-0 to 31 bits.
//
def SllX16: FEXT_SHIFT16_ins<0b00, "sll", IIAlu>;
@@ -1153,7 +1239,7 @@ def SravRxRy16: FRxRxRy16_ins<0b00111, "srav", IIAlu>;
// Format: SRA rx, ry, sa MIPS16e
// Purpose: Shift Word Right Arithmetic (Extended)
// To execute an arithmetic right-shift of a word by a fixed
-// number of bits—1 to 8 bits.
+// number of bits-1 to 8 bits.
//
def SraX16: FEXT_SHIFT16_ins<0b11, "sra", IIAlu>;
@@ -1171,7 +1257,7 @@ def SrlvRxRy16: FRxRxRy16_ins<0b00110, "srlv", IIAlu>;
// Format: SRL rx, ry, sa MIPS16e
// Purpose: Shift Word Right Logical (Extended)
// To execute a logical right-shift of a word by a fixed
-// number of bits—1 to 31 bits.
+// number of bits-1 to 31 bits.
//
def SrlX16: FEXT_SHIFT16_ins<0b10, "srl", IIAlu>;
@@ -1195,7 +1281,8 @@ def SwRxRyOffMemX16:
// Purpose: Store Word rx (SP-Relative)
// To store an SP-relative word to memory.
//
-def SwRxSpImmX16: FEXT_RI16_SP_explicit_ins<0b11010, "sw", IIStore>, MayStore;
+def SwRxSpImmX16: FEXT_RI16_SP_Store_explicit_ins
+ <0b11010, "sw", IIStore>, MayStore;
//
//
@@ -1311,9 +1398,7 @@ def: Mips16Pat<(i32 addr16:$addr),
// Large (>16 bit) immediate loads
-def : Mips16Pat<(i32 imm:$imm),
- (OrRxRxRy16 (SllX16 (LiRxImmX16 (HI16 imm:$imm)), 16),
- (LiRxImmX16 (LO16 imm:$imm)))>;
+def : Mips16Pat<(i32 imm:$imm), (LwConstant32 imm:$imm, -1)>;
// Carry MipsPatterns
def : Mips16Pat<(subc CPU16Regs:$lhs, CPU16Regs:$rhs),
@@ -1354,7 +1439,7 @@ def: Mips16Pat
def: Mips16Pat
<(brcond (i32 (seteq CPU16Regs:$rx, 0)), bb:$targ16),
- (BeqzRxImmX16 CPU16Regs:$rx, bb:$targ16)
+ (BeqzRxImm16 CPU16Regs:$rx, bb:$targ16)
>;
//
@@ -1416,7 +1501,7 @@ def: Mips16Pat
def: Mips16Pat
<(brcond (i32 (setne CPU16Regs:$rx, 0)), bb:$targ16),
- (BnezRxImmX16 CPU16Regs:$rx, bb:$targ16)
+ (BnezRxImm16 CPU16Regs:$rx, bb:$targ16)
>;
//
@@ -1424,7 +1509,7 @@ def: Mips16Pat
//
def: Mips16Pat
<(brcond CPU16Regs:$rx, bb:$targ16),
- (BnezRxImmX16 CPU16Regs:$rx, bb:$targ16)
+ (BnezRxImm16 CPU16Regs:$rx, bb:$targ16)
>;
//
@@ -1454,7 +1539,7 @@ def: Mips16Pat
// (BtnezT8SltuX16 CPU16Regs:$rx, CPU16Regs:$ry, bb:$imm16)
// >;
-def: UncondBranch16_pat<br, BimmX16>;
+def: UncondBranch16_pat<br, Bimm16>;
// Small immediates
def: Mips16Pat<(i32 immSExt16:$in),
@@ -1768,7 +1853,8 @@ def: Mips16Pat<(add CPU16Regs:$hi, (MipsLo tglobaladdr:$lo)),
(AddiuRxRxImmX16 CPU16Regs:$hi, tglobaladdr:$lo)>;
// hi/lo relocs
-
+def : Mips16Pat<(MipsHi tblockaddress:$in),
+ (SllX16 (LiRxImmX16 tblockaddress:$in), 16)>;
def : Mips16Pat<(MipsHi tglobaladdr:$in),
(SllX16 (LiRxImmX16 tglobaladdr:$in), 16)>;
def : Mips16Pat<(MipsHi tjumptable:$in),
@@ -1776,6 +1862,8 @@ def : Mips16Pat<(MipsHi tjumptable:$in),
def : Mips16Pat<(MipsHi tglobaltlsaddr:$in),
(SllX16 (LiRxImmX16 tglobaltlsaddr:$in), 16)>;
+def : Mips16Pat<(MipsLo tblockaddress:$in), (LiRxImmX16 tblockaddress:$in)>;
+
// wrapper_pic
class Wrapper16Pat<SDNode node, Instruction ADDiuOp, RegisterClass RC>:
Mips16Pat<(MipsWrapper RC:$gp, node:$in),
@@ -1789,3 +1877,33 @@ def : Mips16Pat<(i32 (extloadi8 addr16:$src)),
(LbuRxRyOffMemX16 addr16:$src)>;
def : Mips16Pat<(i32 (extloadi16 addr16:$src)),
(LhuRxRyOffMemX16 addr16:$src)>;
+
+def: Mips16Pat<(trap), (Break16)>;
+
+def : Mips16Pat<(sext_inreg CPU16Regs:$val, i8),
+ (SebRx16 CPU16Regs:$val)>;
+
+def : Mips16Pat<(sext_inreg CPU16Regs:$val, i16),
+ (SehRx16 CPU16Regs:$val)>;
+
+def GotPrologue16:
+ MipsPseudo16<
+ (outs CPU16Regs:$rh, CPU16Regs:$rl),
+ (ins simm16:$immHi, simm16:$immLo),
+ ".align 2\n\tli\t$rh, $immHi\n\taddiu\t$rl, $$pc, $immLo\n ",[]> ;
+
+// An operand for the CONSTPOOL_ENTRY pseudo-instruction.
+def cpinst_operand : Operand<i32> {
+ // let PrintMethod = "printCPInstOperand";
+}
+
+// CONSTPOOL_ENTRY - This instruction represents a floating constant pool in
+// the function. The first operand is the ID# for this instruction, the second
+// is the index into the MachineConstantPool that this is, the third is the
+// size in bytes of this constant pool entry.
+//
+let neverHasSideEffects = 1, isNotDuplicable = 1 in
+def CONSTPOOL_ENTRY :
+MipsPseudo16<(outs), (ins cpinst_operand:$instid, cpinst_operand:$cpidx,
+ i32imm:$size), "foo", []>;
+
diff --git a/lib/Target/Mips/Mips16RegisterInfo.cpp b/lib/Target/Mips/Mips16RegisterInfo.cpp
index 7ad18f2b4d98..9d0f2c927e4d 100644
--- a/lib/Target/Mips/Mips16RegisterInfo.cpp
+++ b/lib/Target/Mips/Mips16RegisterInfo.cpp
@@ -41,17 +41,16 @@
using namespace llvm;
-Mips16RegisterInfo::Mips16RegisterInfo(const MipsSubtarget &ST,
- const Mips16InstrInfo &I)
- : MipsRegisterInfo(ST), TII(I) {}
+Mips16RegisterInfo::Mips16RegisterInfo(const MipsSubtarget &ST)
+ : MipsRegisterInfo(ST) {}
bool Mips16RegisterInfo::requiresRegisterScavenging
(const MachineFunction &MF) const {
- return true;
+ return false;
}
bool Mips16RegisterInfo::requiresFrameIndexScavenging
(const MachineFunction &MF) const {
- return true;
+ return false;
}
bool Mips16RegisterInfo::useFPForScavengingIndex
@@ -66,6 +65,7 @@ bool Mips16RegisterInfo::saveScavengerRegister
const TargetRegisterClass *RC,
unsigned Reg) const {
DebugLoc DL;
+ const TargetInstrInfo &TII = *MBB.getParent()->getTarget().getInstrInfo();
TII.copyPhysReg(MBB, I, DL, Mips::T0, Reg, true);
TII.copyPhysReg(MBB, UseMI, DL, Reg, Mips::T0, true);
return true;
@@ -134,11 +134,14 @@ void Mips16RegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n");
- if (!MI.isDebugValue() && ( ((FrameReg != Mips::SP) && !isInt<16>(Offset)) ||
- ((FrameReg == Mips::SP) && !isInt<15>(Offset)) )) {
+ if (!MI.isDebugValue() &&
+ !Mips16InstrInfo::validImmediate(MI.getOpcode(), FrameReg, Offset)) {
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc DL = II->getDebugLoc();
unsigned NewImm;
+ const Mips16InstrInfo &TII =
+ *static_cast<const Mips16InstrInfo*>(
+ MBB.getParent()->getTarget().getInstrInfo());
FrameReg = TII.loadImmediate(FrameReg, Offset, MBB, II, DL, NewImm);
Offset = SignExtend64<16>(NewImm);
IsKill = true;
diff --git a/lib/Target/Mips/Mips16RegisterInfo.h b/lib/Target/Mips/Mips16RegisterInfo.h
index 2b3d2b1a4ecb..13e82a3ffba9 100644
--- a/lib/Target/Mips/Mips16RegisterInfo.h
+++ b/lib/Target/Mips/Mips16RegisterInfo.h
@@ -20,10 +20,8 @@ namespace llvm {
class Mips16InstrInfo;
class Mips16RegisterInfo : public MipsRegisterInfo {
- const Mips16InstrInfo &TII;
public:
- Mips16RegisterInfo(const MipsSubtarget &Subtarget,
- const Mips16InstrInfo &TII);
+ Mips16RegisterInfo(const MipsSubtarget &Subtarget);
bool requiresRegisterScavenging(const MachineFunction &MF) const;
diff --git a/lib/Target/Mips/Mips64InstrInfo.td b/lib/Target/Mips/Mips64InstrInfo.td
index fc533fb03f63..15ef654555d6 100644
--- a/lib/Target/Mips/Mips64InstrInfo.td
+++ b/lib/Target/Mips/Mips64InstrInfo.td
@@ -15,9 +15,6 @@
// Mips Operand, Complex Patterns and Transformations Definitions.
//===----------------------------------------------------------------------===//
-// Instruction operand types
-def shamt_64 : Operand<i64>;
-
// Unsigned Operand
def uimm16_64 : Operand<i64> {
let PrintMethod = "printUnsignedImm";
@@ -34,42 +31,21 @@ def immZExt6 : ImmLeaf<i32, [{return Imm == (Imm & 0x3f);}]>;
//===----------------------------------------------------------------------===//
// Instructions specific format
//===----------------------------------------------------------------------===//
-let DecoderNamespace = "Mips64" in {
-
-multiclass Atomic2Ops64<PatFrag Op> {
- def NAME : Atomic2Ops<Op, CPU64Regs, CPURegs>,
- Requires<[NotN64, HasStdEnc]>;
- def _P8 : Atomic2Ops<Op, CPU64Regs, CPU64Regs>,
- Requires<[IsN64, HasStdEnc]> {
- let isCodeGenOnly = 1;
- }
-}
-
-multiclass AtomicCmpSwap64<PatFrag Op> {
- def NAME : AtomicCmpSwap<Op, CPU64Regs, CPURegs>,
- Requires<[NotN64, HasStdEnc]>;
- def _P8 : AtomicCmpSwap<Op, CPU64Regs, CPU64Regs>,
- Requires<[IsN64, HasStdEnc]> {
- let isCodeGenOnly = 1;
- }
-}
-}
-let usesCustomInserter = 1, Predicates = [HasStdEnc],
- DecoderNamespace = "Mips64" in {
- defm ATOMIC_LOAD_ADD_I64 : Atomic2Ops64<atomic_load_add_64>;
- defm ATOMIC_LOAD_SUB_I64 : Atomic2Ops64<atomic_load_sub_64>;
- defm ATOMIC_LOAD_AND_I64 : Atomic2Ops64<atomic_load_and_64>;
- defm ATOMIC_LOAD_OR_I64 : Atomic2Ops64<atomic_load_or_64>;
- defm ATOMIC_LOAD_XOR_I64 : Atomic2Ops64<atomic_load_xor_64>;
- defm ATOMIC_LOAD_NAND_I64 : Atomic2Ops64<atomic_load_nand_64>;
- defm ATOMIC_SWAP_I64 : Atomic2Ops64<atomic_swap_64>;
- defm ATOMIC_CMP_SWAP_I64 : AtomicCmpSwap64<atomic_cmp_swap_64>;
+let usesCustomInserter = 1 in {
+ def ATOMIC_LOAD_ADD_I64 : Atomic2Ops<atomic_load_add_64, GPR64>;
+ def ATOMIC_LOAD_SUB_I64 : Atomic2Ops<atomic_load_sub_64, GPR64>;
+ def ATOMIC_LOAD_AND_I64 : Atomic2Ops<atomic_load_and_64, GPR64>;
+ def ATOMIC_LOAD_OR_I64 : Atomic2Ops<atomic_load_or_64, GPR64>;
+ def ATOMIC_LOAD_XOR_I64 : Atomic2Ops<atomic_load_xor_64, GPR64>;
+ def ATOMIC_LOAD_NAND_I64 : Atomic2Ops<atomic_load_nand_64, GPR64>;
+ def ATOMIC_SWAP_I64 : Atomic2Ops<atomic_swap_64, GPR64>;
+ def ATOMIC_CMP_SWAP_I64 : AtomicCmpSwap<atomic_cmp_swap_64, GPR64>;
}
/// Pseudo instructions for loading and storing accumulator registers.
-let isPseudo = 1 in {
- defm LOAD_AC128 : LoadM<"load_ac128", ACRegs128>;
- defm STORE_AC128 : StoreM<"store_ac128", ACRegs128>;
+let isPseudo = 1, isCodeGenOnly = 1 in {
+ def LOAD_ACC128 : Load<"", ACC128>;
+ def STORE_ACC128 : Store<"", ACC128>;
}
//===----------------------------------------------------------------------===//
@@ -77,166 +53,174 @@ let isPseudo = 1 in {
//===----------------------------------------------------------------------===//
let DecoderNamespace = "Mips64" in {
/// Arithmetic Instructions (ALU Immediate)
-def DADDi : ArithLogicI<"daddi", simm16_64, CPU64RegsOpnd>, ADDI_FM<0x18>;
-def DADDiu : ArithLogicI<"daddiu", simm16_64, CPU64RegsOpnd, immSExt16, add>,
+def DADDi : ArithLogicI<"daddi", simm16_64, GPR64Opnd>, ADDI_FM<0x18>;
+def DADDiu : ArithLogicI<"daddiu", simm16_64, GPR64Opnd, IIArith,
+ immSExt16, add>,
ADDI_FM<0x19>, IsAsCheapAsAMove;
-def DANDi : ArithLogicI<"andi", uimm16_64, CPU64RegsOpnd, immZExt16, and>,
- ADDI_FM<0xc>;
-def SLTi64 : SetCC_I<"slti", setlt, simm16_64, immSExt16, CPU64Regs>,
+
+let isCodeGenOnly = 1 in {
+def SLTi64 : SetCC_I<"slti", setlt, simm16_64, immSExt16, GPR64Opnd>,
SLTI_FM<0xa>;
-def SLTiu64 : SetCC_I<"sltiu", setult, simm16_64, immSExt16, CPU64Regs>,
+def SLTiu64 : SetCC_I<"sltiu", setult, simm16_64, immSExt16, GPR64Opnd>,
SLTI_FM<0xb>;
-def ORi64 : ArithLogicI<"ori", uimm16_64, CPU64RegsOpnd, immZExt16, or>,
+def ANDi64 : ArithLogicI<"andi", uimm16_64, GPR64Opnd, IILogic, immZExt16,
+ and>,
+ ADDI_FM<0xc>;
+def ORi64 : ArithLogicI<"ori", uimm16_64, GPR64Opnd, IILogic, immZExt16,
+ or>,
ADDI_FM<0xd>;
-def XORi64 : ArithLogicI<"xori", uimm16_64, CPU64RegsOpnd, immZExt16, xor>,
+def XORi64 : ArithLogicI<"xori", uimm16_64, GPR64Opnd, IILogic, immZExt16,
+ xor>,
ADDI_FM<0xe>;
-def LUi64 : LoadUpper<"lui", CPU64Regs, uimm16_64>, LUI_FM;
+def LUi64 : LoadUpper<"lui", GPR64Opnd, uimm16_64>, LUI_FM;
+}
/// Arithmetic Instructions (3-Operand, R-Type)
-def DADD : ArithLogicR<"dadd", CPU64RegsOpnd>, ADD_FM<0, 0x2c>;
-def DADDu : ArithLogicR<"daddu", CPU64RegsOpnd, 1, IIAlu, add>,
+def DADD : ArithLogicR<"dadd", GPR64Opnd>, ADD_FM<0, 0x2c>;
+def DADDu : ArithLogicR<"daddu", GPR64Opnd, 1, IIArith, add>,
ADD_FM<0, 0x2d>;
-def DSUBu : ArithLogicR<"dsubu", CPU64RegsOpnd, 0, IIAlu, sub>,
+def DSUBu : ArithLogicR<"dsubu", GPR64Opnd, 0, IIArith, sub>,
ADD_FM<0, 0x2f>;
-def SLT64 : SetCC_R<"slt", setlt, CPU64Regs>, ADD_FM<0, 0x2a>;
-def SLTu64 : SetCC_R<"sltu", setult, CPU64Regs>, ADD_FM<0, 0x2b>;
-def AND64 : ArithLogicR<"and", CPU64RegsOpnd, 1, IIAlu, and>, ADD_FM<0, 0x24>;
-def OR64 : ArithLogicR<"or", CPU64RegsOpnd, 1, IIAlu, or>, ADD_FM<0, 0x25>;
-def XOR64 : ArithLogicR<"xor", CPU64RegsOpnd, 1, IIAlu, xor>, ADD_FM<0, 0x26>;
-def NOR64 : LogicNOR<"nor", CPU64RegsOpnd>, ADD_FM<0, 0x27>;
+
+let isCodeGenOnly = 1 in {
+def SLT64 : SetCC_R<"slt", setlt, GPR64Opnd>, ADD_FM<0, 0x2a>;
+def SLTu64 : SetCC_R<"sltu", setult, GPR64Opnd>, ADD_FM<0, 0x2b>;
+def AND64 : ArithLogicR<"and", GPR64Opnd, 1, IIArith, and>, ADD_FM<0, 0x24>;
+def OR64 : ArithLogicR<"or", GPR64Opnd, 1, IIArith, or>, ADD_FM<0, 0x25>;
+def XOR64 : ArithLogicR<"xor", GPR64Opnd, 1, IIArith, xor>, ADD_FM<0, 0x26>;
+def NOR64 : LogicNOR<"nor", GPR64Opnd>, ADD_FM<0, 0x27>;
+}
/// Shift Instructions
-def DSLL : shift_rotate_imm<"dsll", shamt, CPU64RegsOpnd, shl, immZExt6>,
+def DSLL : shift_rotate_imm<"dsll", uimm6, GPR64Opnd, shl, immZExt6>,
SRA_FM<0x38, 0>;
-def DSRL : shift_rotate_imm<"dsrl", shamt, CPU64RegsOpnd, srl, immZExt6>,
+def DSRL : shift_rotate_imm<"dsrl", uimm6, GPR64Opnd, srl, immZExt6>,
SRA_FM<0x3a, 0>;
-def DSRA : shift_rotate_imm<"dsra", shamt, CPU64RegsOpnd, sra, immZExt6>,
+def DSRA : shift_rotate_imm<"dsra", uimm6, GPR64Opnd, sra, immZExt6>,
SRA_FM<0x3b, 0>;
-def DSLLV : shift_rotate_reg<"dsllv", CPU64RegsOpnd, shl>, SRLV_FM<0x14, 0>;
-def DSRLV : shift_rotate_reg<"dsrlv", CPU64RegsOpnd, srl>, SRLV_FM<0x16, 0>;
-def DSRAV : shift_rotate_reg<"dsrav", CPU64RegsOpnd, sra>, SRLV_FM<0x17, 0>;
-def DSLL32 : shift_rotate_imm<"dsll32", shamt, CPU64RegsOpnd>, SRA_FM<0x3c, 0>;
-def DSRL32 : shift_rotate_imm<"dsrl32", shamt, CPU64RegsOpnd>, SRA_FM<0x3e, 0>;
-def DSRA32 : shift_rotate_imm<"dsra32", shamt, CPU64RegsOpnd>, SRA_FM<0x3f, 0>;
-}
+def DSLLV : shift_rotate_reg<"dsllv", GPR64Opnd, shl>, SRLV_FM<0x14, 0>;
+def DSRLV : shift_rotate_reg<"dsrlv", GPR64Opnd, srl>, SRLV_FM<0x16, 0>;
+def DSRAV : shift_rotate_reg<"dsrav", GPR64Opnd, sra>, SRLV_FM<0x17, 0>;
+def DSLL32 : shift_rotate_imm<"dsll32", uimm5, GPR64Opnd>, SRA_FM<0x3c, 0>;
+def DSRL32 : shift_rotate_imm<"dsrl32", uimm5, GPR64Opnd>, SRA_FM<0x3e, 0>;
+def DSRA32 : shift_rotate_imm<"dsra32", uimm5, GPR64Opnd>, SRA_FM<0x3f, 0>;
+
// Rotate Instructions
-let Predicates = [HasMips64r2, HasStdEnc],
- DecoderNamespace = "Mips64" in {
- def DROTR : shift_rotate_imm<"drotr", shamt, CPU64RegsOpnd, rotr, immZExt6>,
+let Predicates = [HasMips64r2, HasStdEnc] in {
+ def DROTR : shift_rotate_imm<"drotr", uimm6, GPR64Opnd, rotr, immZExt6>,
SRA_FM<0x3a, 1>;
- def DROTRV : shift_rotate_reg<"drotrv", CPU64RegsOpnd, rotr>,
+ def DROTRV : shift_rotate_reg<"drotrv", GPR64Opnd, rotr>,
SRLV_FM<0x16, 1>;
+ def DROTR32 : shift_rotate_imm<"drotr32", uimm5, GPR64Opnd>, SRA_FM<0x3e, 1>;
}
-let DecoderNamespace = "Mips64" in {
/// Load and Store Instructions
/// aligned
-defm LB64 : LoadM<"lb", CPU64Regs, sextloadi8>, LW_FM<0x20>;
-defm LBu64 : LoadM<"lbu", CPU64Regs, zextloadi8>, LW_FM<0x24>;
-defm LH64 : LoadM<"lh", CPU64Regs, sextloadi16>, LW_FM<0x21>;
-defm LHu64 : LoadM<"lhu", CPU64Regs, zextloadi16>, LW_FM<0x25>;
-defm LW64 : LoadM<"lw", CPU64Regs, sextloadi32>, LW_FM<0x23>;
-defm LWu64 : LoadM<"lwu", CPU64Regs, zextloadi32>, LW_FM<0x27>;
-defm SB64 : StoreM<"sb", CPU64Regs, truncstorei8>, LW_FM<0x28>;
-defm SH64 : StoreM<"sh", CPU64Regs, truncstorei16>, LW_FM<0x29>;
-defm SW64 : StoreM<"sw", CPU64Regs, truncstorei32>, LW_FM<0x2b>;
-defm LD : LoadM<"ld", CPU64Regs, load>, LW_FM<0x37>;
-defm SD : StoreM<"sd", CPU64Regs, store>, LW_FM<0x3f>;
+let isCodeGenOnly = 1 in {
+def LB64 : Load<"lb", GPR64Opnd, sextloadi8, IILoad>, LW_FM<0x20>;
+def LBu64 : Load<"lbu", GPR64Opnd, zextloadi8, IILoad>, LW_FM<0x24>;
+def LH64 : Load<"lh", GPR64Opnd, sextloadi16, IILoad>, LW_FM<0x21>;
+def LHu64 : Load<"lhu", GPR64Opnd, zextloadi16, IILoad>, LW_FM<0x25>;
+def LW64 : Load<"lw", GPR64Opnd, sextloadi32, IILoad>, LW_FM<0x23>;
+def SB64 : Store<"sb", GPR64Opnd, truncstorei8, IIStore>, LW_FM<0x28>;
+def SH64 : Store<"sh", GPR64Opnd, truncstorei16, IIStore>, LW_FM<0x29>;
+def SW64 : Store<"sw", GPR64Opnd, truncstorei32, IIStore>, LW_FM<0x2b>;
+}
+
+def LWu : Load<"lwu", GPR64Opnd, zextloadi32, IILoad>, LW_FM<0x27>;
+def LD : Load<"ld", GPR64Opnd, load, IILoad>, LW_FM<0x37>;
+def SD : Store<"sd", GPR64Opnd, store, IIStore>, LW_FM<0x3f>;
/// load/store left/right
-defm LWL64 : LoadLeftRightM<"lwl", MipsLWL, CPU64Regs>, LW_FM<0x22>;
-defm LWR64 : LoadLeftRightM<"lwr", MipsLWR, CPU64Regs>, LW_FM<0x26>;
-defm SWL64 : StoreLeftRightM<"swl", MipsSWL, CPU64Regs>, LW_FM<0x2a>;
-defm SWR64 : StoreLeftRightM<"swr", MipsSWR, CPU64Regs>, LW_FM<0x2e>;
+let isCodeGenOnly = 1 in {
+def LWL64 : LoadLeftRight<"lwl", MipsLWL, GPR64Opnd, IILoad>, LW_FM<0x22>;
+def LWR64 : LoadLeftRight<"lwr", MipsLWR, GPR64Opnd, IILoad>, LW_FM<0x26>;
+def SWL64 : StoreLeftRight<"swl", MipsSWL, GPR64Opnd, IIStore>, LW_FM<0x2a>;
+def SWR64 : StoreLeftRight<"swr", MipsSWR, GPR64Opnd, IIStore>, LW_FM<0x2e>;
+}
-defm LDL : LoadLeftRightM<"ldl", MipsLDL, CPU64Regs>, LW_FM<0x1a>;
-defm LDR : LoadLeftRightM<"ldr", MipsLDR, CPU64Regs>, LW_FM<0x1b>;
-defm SDL : StoreLeftRightM<"sdl", MipsSDL, CPU64Regs>, LW_FM<0x2c>;
-defm SDR : StoreLeftRightM<"sdr", MipsSDR, CPU64Regs>, LW_FM<0x2d>;
+def LDL : LoadLeftRight<"ldl", MipsLDL, GPR64Opnd, IILoad>, LW_FM<0x1a>;
+def LDR : LoadLeftRight<"ldr", MipsLDR, GPR64Opnd, IILoad>, LW_FM<0x1b>;
+def SDL : StoreLeftRight<"sdl", MipsSDL, GPR64Opnd, IIStore>, LW_FM<0x2c>;
+def SDR : StoreLeftRight<"sdr", MipsSDR, GPR64Opnd, IIStore>, LW_FM<0x2d>;
/// Load-linked, Store-conditional
-let Predicates = [NotN64, HasStdEnc] in {
- def LLD : LLBase<"lld", CPU64RegsOpnd, mem>, LW_FM<0x34>;
- def SCD : SCBase<"scd", CPU64RegsOpnd, mem>, LW_FM<0x3c>;
-}
-
-let Predicates = [IsN64, HasStdEnc], isCodeGenOnly = 1 in {
- def LLD_P8 : LLBase<"lld", CPU64RegsOpnd, mem64>, LW_FM<0x34>;
- def SCD_P8 : SCBase<"scd", CPU64RegsOpnd, mem64>, LW_FM<0x3c>;
-}
+def LLD : LLBase<"lld", GPR64Opnd>, LW_FM<0x34>;
+def SCD : SCBase<"scd", GPR64Opnd>, LW_FM<0x3c>;
/// Jump and Branch Instructions
-def JR64 : IndirectBranch<CPU64Regs>, MTLO_FM<8>;
-def BEQ64 : CBranch<"beq", seteq, CPU64Regs>, BEQ_FM<4>;
-def BNE64 : CBranch<"bne", setne, CPU64Regs>, BEQ_FM<5>;
-def BGEZ64 : CBranchZero<"bgez", setge, CPU64Regs>, BGEZ_FM<1, 1>;
-def BGTZ64 : CBranchZero<"bgtz", setgt, CPU64Regs>, BGEZ_FM<7, 0>;
-def BLEZ64 : CBranchZero<"blez", setle, CPU64Regs>, BGEZ_FM<6, 0>;
-def BLTZ64 : CBranchZero<"bltz", setlt, CPU64Regs>, BGEZ_FM<1, 0>;
+let isCodeGenOnly = 1 in {
+def JR64 : IndirectBranch<"jr", GPR64Opnd>, MTLO_FM<8>;
+def BEQ64 : CBranch<"beq", brtarget, seteq, GPR64Opnd>, BEQ_FM<4>;
+def BNE64 : CBranch<"bne", brtarget, setne, GPR64Opnd>, BEQ_FM<5>;
+def BGEZ64 : CBranchZero<"bgez", brtarget, setge, GPR64Opnd>, BGEZ_FM<1, 1>;
+def BGTZ64 : CBranchZero<"bgtz", brtarget, setgt, GPR64Opnd>, BGEZ_FM<7, 0>;
+def BLEZ64 : CBranchZero<"blez", brtarget, setle, GPR64Opnd>, BGEZ_FM<6, 0>;
+def BLTZ64 : CBranchZero<"bltz", brtarget, setlt, GPR64Opnd>, BGEZ_FM<1, 0>;
+def JALR64 : JumpLinkReg<"jalr", GPR64Opnd>, JALR_FM;
+def JALR64Pseudo : JumpLinkRegPseudo<GPR64Opnd, JALR, RA, GPR32Opnd>;
+def TAILCALL64_R : JumpFR<"tcallr", GPR64Opnd, MipsTailCall>,
+ MTLO_FM<8>, IsTailCall;
}
-let DecoderNamespace = "Mips64" in
-def JALR64 : JumpLinkReg<"jalr", CPU64Regs>, JALR_FM;
-def JALR64Pseudo : JumpLinkRegPseudo<CPU64Regs, JALR64, RA_64>;
-def TAILCALL64_R : JumpFR<CPU64Regs, MipsTailCall>, MTLO_FM<8>, IsTailCall;
-let DecoderNamespace = "Mips64" in {
/// Multiply and Divide Instructions.
-def DMULT : Mult<"dmult", IIImul, CPU64RegsOpnd, [HI64, LO64]>,
+def DMULT : Mult<"dmult", IIImult, GPR64Opnd, [HI0_64, LO0_64]>,
MULT_FM<0, 0x1c>;
-def DMULTu : Mult<"dmultu", IIImul, CPU64RegsOpnd, [HI64, LO64]>,
+def DMULTu : Mult<"dmultu", IIImult, GPR64Opnd, [HI0_64, LO0_64]>,
MULT_FM<0, 0x1d>;
-def PseudoDMULT : MultDivPseudo<DMULT, ACRegs128, CPU64RegsOpnd, MipsMult,
- IIImul>;
-def PseudoDMULTu : MultDivPseudo<DMULTu, ACRegs128, CPU64RegsOpnd, MipsMultu,
- IIImul>;
-def DSDIV : Div<"ddiv", IIIdiv, CPU64RegsOpnd, [HI64, LO64]>, MULT_FM<0, 0x1e>;
-def DUDIV : Div<"ddivu", IIIdiv, CPU64RegsOpnd, [HI64, LO64]>, MULT_FM<0, 0x1f>;
-def PseudoDSDIV : MultDivPseudo<DSDIV, ACRegs128, CPU64RegsOpnd, MipsDivRem,
- IIIdiv, 0>;
-def PseudoDUDIV : MultDivPseudo<DUDIV, ACRegs128, CPU64RegsOpnd, MipsDivRemU,
- IIIdiv, 0>;
-
-def MTHI64 : MoveToLOHI<"mthi", CPU64Regs, [HI64]>, MTLO_FM<0x11>;
-def MTLO64 : MoveToLOHI<"mtlo", CPU64Regs, [LO64]>, MTLO_FM<0x13>;
-def MFHI64 : MoveFromLOHI<"mfhi", CPU64Regs, [HI64]>, MFLO_FM<0x10>;
-def MFLO64 : MoveFromLOHI<"mflo", CPU64Regs, [LO64]>, MFLO_FM<0x12>;
+def PseudoDMULT : MultDivPseudo<DMULT, ACC128, GPR64Opnd, MipsMult,
+ IIImult>;
+def PseudoDMULTu : MultDivPseudo<DMULTu, ACC128, GPR64Opnd, MipsMultu,
+ IIImult>;
+def DSDIV : Div<"ddiv", IIIdiv, GPR64Opnd, [HI0_64, LO0_64]>, MULT_FM<0, 0x1e>;
+def DUDIV : Div<"ddivu", IIIdiv, GPR64Opnd, [HI0_64, LO0_64]>, MULT_FM<0, 0x1f>;
+def PseudoDSDIV : MultDivPseudo<DSDIV, ACC128, GPR64Opnd, MipsDivRem,
+ IIIdiv, 0, 1, 1>;
+def PseudoDUDIV : MultDivPseudo<DUDIV, ACC128, GPR64Opnd, MipsDivRemU,
+ IIIdiv, 0, 1, 1>;
+
+let isCodeGenOnly = 1 in {
+def MTHI64 : MoveToLOHI<"mthi", GPR64Opnd, [HI0_64]>, MTLO_FM<0x11>;
+def MTLO64 : MoveToLOHI<"mtlo", GPR64Opnd, [LO0_64]>, MTLO_FM<0x13>;
+def MFHI64 : MoveFromLOHI<"mfhi", GPR64Opnd, AC0_64>, MFLO_FM<0x10>;
+def MFLO64 : MoveFromLOHI<"mflo", GPR64Opnd, AC0_64>, MFLO_FM<0x12>;
+def PseudoMFHI64 : PseudoMFLOHI<GPR64, ACC128, MipsMFHI>;
+def PseudoMFLO64 : PseudoMFLOHI<GPR64, ACC128, MipsMFLO>;
+def PseudoMTLOHI64 : PseudoMTLOHI<ACC128, GPR64>;
/// Sign Ext In Register Instructions.
-def SEB64 : SignExtInReg<"seb", i8, CPU64Regs>, SEB_FM<0x10, 0x20>;
-def SEH64 : SignExtInReg<"seh", i16, CPU64Regs>, SEB_FM<0x18, 0x20>;
+def SEB64 : SignExtInReg<"seb", i8, GPR64Opnd>, SEB_FM<0x10, 0x20>;
+def SEH64 : SignExtInReg<"seh", i16, GPR64Opnd>, SEB_FM<0x18, 0x20>;
+}
/// Count Leading
-def DCLZ : CountLeading0<"dclz", CPU64RegsOpnd>, CLO_FM<0x24>;
-def DCLO : CountLeading1<"dclo", CPU64RegsOpnd>, CLO_FM<0x25>;
+def DCLZ : CountLeading0<"dclz", GPR64Opnd>, CLO_FM<0x24>;
+def DCLO : CountLeading1<"dclo", GPR64Opnd>, CLO_FM<0x25>;
/// Double Word Swap Bytes/HalfWords
-def DSBH : SubwordSwap<"dsbh", CPU64RegsOpnd>, SEB_FM<2, 0x24>;
-def DSHD : SubwordSwap<"dshd", CPU64RegsOpnd>, SEB_FM<5, 0x24>;
+def DSBH : SubwordSwap<"dsbh", GPR64Opnd>, SEB_FM<2, 0x24>;
+def DSHD : SubwordSwap<"dshd", GPR64Opnd>, SEB_FM<5, 0x24>;
-def LEA_ADDiu64 : EffectiveAddress<"daddiu", CPU64Regs, mem_ea_64>, LW_FM<0x19>;
+def LEA_ADDiu64 : EffectiveAddress<"daddiu", GPR64Opnd>, LW_FM<0x19>;
-}
-let DecoderNamespace = "Mips64" in {
-def RDHWR64 : ReadHardware<CPU64Regs, HW64RegsOpnd>, RDHWR_FM;
+let isCodeGenOnly = 1 in
+def RDHWR64 : ReadHardware<GPR64Opnd, HWRegsOpnd>, RDHWR_FM;
-def DEXT : ExtBase<"dext", CPU64RegsOpnd>, EXT_FM<3>;
-let Pattern = []<dag> in {
- def DEXTU : ExtBase<"dextu", CPU64RegsOpnd>, EXT_FM<2>;
- def DEXTM : ExtBase<"dextm", CPU64RegsOpnd>, EXT_FM<1>;
-}
-def DINS : InsBase<"dins", CPU64RegsOpnd>, EXT_FM<7>;
-let Pattern = []<dag> in {
- def DINSU : InsBase<"dinsu", CPU64RegsOpnd>, EXT_FM<6>;
- def DINSM : InsBase<"dinsm", CPU64RegsOpnd>, EXT_FM<5>;
-}
+def DEXT : ExtBase<"dext", GPR64Opnd, uimm6, MipsExt>, EXT_FM<3>;
+def DEXTU : ExtBase<"dextu", GPR64Opnd, uimm6>, EXT_FM<2>;
+def DEXTM : ExtBase<"dextm", GPR64Opnd, uimm5>, EXT_FM<1>;
+
+def DINS : InsBase<"dins", GPR64Opnd, uimm6, MipsIns>, EXT_FM<7>;
+def DINSU : InsBase<"dinsu", GPR64Opnd, uimm6>, EXT_FM<6>;
+def DINSM : InsBase<"dinsm", GPR64Opnd, uimm5>, EXT_FM<5>;
let isCodeGenOnly = 1, rs = 0, shamt = 0 in {
- def DSLL64_32 : FR<0x00, 0x3c, (outs CPU64Regs:$rd), (ins CPURegs:$rt),
- "dsll\t$rd, $rt, 32", [], IIAlu>;
- def SLL64_32 : FR<0x0, 0x00, (outs CPU64Regs:$rd), (ins CPURegs:$rt),
- "sll\t$rd, $rt, 0", [], IIAlu>;
- def SLL64_64 : FR<0x0, 0x00, (outs CPU64Regs:$rd), (ins CPU64Regs:$rt),
- "sll\t$rd, $rt, 0", [], IIAlu>;
+ def DSLL64_32 : FR<0x00, 0x3c, (outs GPR64:$rd), (ins GPR32:$rt),
+ "dsll\t$rd, $rt, 32", [], IIArith>;
+ def SLL64_32 : FR<0x0, 0x00, (outs GPR64:$rd), (ins GPR32:$rt),
+ "sll\t$rd, $rt, 0", [], IIArith>;
+ def SLL64_64 : FR<0x0, 0x00, (outs GPR64:$rd), (ins GPR64:$rt),
+ "sll\t$rd, $rt, 0", [], IIArith>;
}
}
//===----------------------------------------------------------------------===//
@@ -244,18 +228,12 @@ let isCodeGenOnly = 1, rs = 0, shamt = 0 in {
//===----------------------------------------------------------------------===//
// extended loads
-let Predicates = [NotN64, HasStdEnc] in {
+let Predicates = [HasStdEnc] in {
def : MipsPat<(i64 (extloadi1 addr:$src)), (LB64 addr:$src)>;
def : MipsPat<(i64 (extloadi8 addr:$src)), (LB64 addr:$src)>;
def : MipsPat<(i64 (extloadi16 addr:$src)), (LH64 addr:$src)>;
def : MipsPat<(i64 (extloadi32 addr:$src)), (LW64 addr:$src)>;
}
-let Predicates = [IsN64, HasStdEnc] in {
- def : MipsPat<(i64 (extloadi1 addr:$src)), (LB64_P8 addr:$src)>;
- def : MipsPat<(i64 (extloadi8 addr:$src)), (LB64_P8 addr:$src)>;
- def : MipsPat<(i64 (extloadi16 addr:$src)), (LH64_P8 addr:$src)>;
- def : MipsPat<(i64 (extloadi32 addr:$src)), (LW64_P8 addr:$src)>;
-}
// hi/lo relocs
def : MipsPat<(MipsHi tglobaladdr:$in), (LUi64 tglobaladdr:$in)>;
@@ -273,118 +251,80 @@ def : MipsPat<(MipsLo tglobaltlsaddr:$in),
(DADDiu ZERO_64, tglobaltlsaddr:$in)>;
def : MipsPat<(MipsLo texternalsym:$in), (DADDiu ZERO_64, texternalsym:$in)>;
-def : MipsPat<(add CPU64Regs:$hi, (MipsLo tglobaladdr:$lo)),
- (DADDiu CPU64Regs:$hi, tglobaladdr:$lo)>;
-def : MipsPat<(add CPU64Regs:$hi, (MipsLo tblockaddress:$lo)),
- (DADDiu CPU64Regs:$hi, tblockaddress:$lo)>;
-def : MipsPat<(add CPU64Regs:$hi, (MipsLo tjumptable:$lo)),
- (DADDiu CPU64Regs:$hi, tjumptable:$lo)>;
-def : MipsPat<(add CPU64Regs:$hi, (MipsLo tconstpool:$lo)),
- (DADDiu CPU64Regs:$hi, tconstpool:$lo)>;
-def : MipsPat<(add CPU64Regs:$hi, (MipsLo tglobaltlsaddr:$lo)),
- (DADDiu CPU64Regs:$hi, tglobaltlsaddr:$lo)>;
-
-def : WrapperPat<tglobaladdr, DADDiu, CPU64Regs>;
-def : WrapperPat<tconstpool, DADDiu, CPU64Regs>;
-def : WrapperPat<texternalsym, DADDiu, CPU64Regs>;
-def : WrapperPat<tblockaddress, DADDiu, CPU64Regs>;
-def : WrapperPat<tjumptable, DADDiu, CPU64Regs>;
-def : WrapperPat<tglobaltlsaddr, DADDiu, CPU64Regs>;
-
-defm : BrcondPats<CPU64Regs, BEQ64, BNE64, SLT64, SLTu64, SLTi64, SLTiu64,
+def : MipsPat<(add GPR64:$hi, (MipsLo tglobaladdr:$lo)),
+ (DADDiu GPR64:$hi, tglobaladdr:$lo)>;
+def : MipsPat<(add GPR64:$hi, (MipsLo tblockaddress:$lo)),
+ (DADDiu GPR64:$hi, tblockaddress:$lo)>;
+def : MipsPat<(add GPR64:$hi, (MipsLo tjumptable:$lo)),
+ (DADDiu GPR64:$hi, tjumptable:$lo)>;
+def : MipsPat<(add GPR64:$hi, (MipsLo tconstpool:$lo)),
+ (DADDiu GPR64:$hi, tconstpool:$lo)>;
+def : MipsPat<(add GPR64:$hi, (MipsLo tglobaltlsaddr:$lo)),
+ (DADDiu GPR64:$hi, tglobaltlsaddr:$lo)>;
+
+def : WrapperPat<tglobaladdr, DADDiu, GPR64>;
+def : WrapperPat<tconstpool, DADDiu, GPR64>;
+def : WrapperPat<texternalsym, DADDiu, GPR64>;
+def : WrapperPat<tblockaddress, DADDiu, GPR64>;
+def : WrapperPat<tjumptable, DADDiu, GPR64>;
+def : WrapperPat<tglobaltlsaddr, DADDiu, GPR64>;
+
+defm : BrcondPats<GPR64, BEQ64, BNE64, SLT64, SLTu64, SLTi64, SLTiu64,
ZERO_64>;
+def : MipsPat<(brcond (i32 (setlt i64:$lhs, 1)), bb:$dst),
+ (BLEZ64 i64:$lhs, bb:$dst)>;
+def : MipsPat<(brcond (i32 (setgt i64:$lhs, -1)), bb:$dst),
+ (BGEZ64 i64:$lhs, bb:$dst)>;
+
// setcc patterns
-defm : SeteqPats<CPU64Regs, SLTiu64, XOR64, SLTu64, ZERO_64>;
-defm : SetlePats<CPU64Regs, SLT64, SLTu64>;
-defm : SetgtPats<CPU64Regs, SLT64, SLTu64>;
-defm : SetgePats<CPU64Regs, SLT64, SLTu64>;
-defm : SetgeImmPats<CPU64Regs, SLTi64, SLTiu64>;
+defm : SeteqPats<GPR64, SLTiu64, XOR64, SLTu64, ZERO_64>;
+defm : SetlePats<GPR64, SLT64, SLTu64>;
+defm : SetgtPats<GPR64, SLT64, SLTu64>;
+defm : SetgePats<GPR64, SLT64, SLTu64>;
+defm : SetgeImmPats<GPR64, SLTi64, SLTiu64>;
// truncate
-def : MipsPat<(i32 (trunc CPU64Regs:$src)),
- (SLL (EXTRACT_SUBREG CPU64Regs:$src, sub_32), 0)>,
- Requires<[IsN64, HasStdEnc]>;
+def : MipsPat<(i32 (trunc GPR64:$src)),
+ (SLL (EXTRACT_SUBREG GPR64:$src, sub_32), 0)>,
+ Requires<[HasStdEnc]>;
// 32-to-64-bit extension
-def : MipsPat<(i64 (anyext CPURegs:$src)), (SLL64_32 CPURegs:$src)>;
-def : MipsPat<(i64 (zext CPURegs:$src)), (DSRL (DSLL64_32 CPURegs:$src), 32)>;
-def : MipsPat<(i64 (sext CPURegs:$src)), (SLL64_32 CPURegs:$src)>;
+def : MipsPat<(i64 (anyext GPR32:$src)), (SLL64_32 GPR32:$src)>;
+def : MipsPat<(i64 (zext GPR32:$src)), (DSRL (DSLL64_32 GPR32:$src), 32)>;
+def : MipsPat<(i64 (sext GPR32:$src)), (SLL64_32 GPR32:$src)>;
// Sign extend in register
-def : MipsPat<(i64 (sext_inreg CPU64Regs:$src, i32)),
- (SLL64_64 CPU64Regs:$src)>;
+def : MipsPat<(i64 (sext_inreg GPR64:$src, i32)),
+ (SLL64_64 GPR64:$src)>;
// bswap MipsPattern
-def : MipsPat<(bswap CPU64Regs:$rt), (DSHD (DSBH CPU64Regs:$rt))>;
-
-// mflo/hi patterns.
-def : MipsPat<(i64 (ExtractLOHI ACRegs128:$ac, imm:$lohi_idx)),
- (EXTRACT_SUBREG ACRegs128:$ac, imm:$lohi_idx)>;
+def : MipsPat<(bswap GPR64:$rt), (DSHD (DSBH GPR64:$rt))>;
//===----------------------------------------------------------------------===//
// Instruction aliases
//===----------------------------------------------------------------------===//
def : InstAlias<"move $dst, $src",
- (DADDu CPU64RegsOpnd:$dst, CPU64RegsOpnd:$src, ZERO_64), 1>,
+ (DADDu GPR64Opnd:$dst, GPR64Opnd:$src, ZERO_64), 1>,
Requires<[HasMips64]>;
-def : InstAlias<"move $dst, $src",
- (OR64 CPU64RegsOpnd:$dst, CPU64RegsOpnd:$src, ZERO_64), 1>,
- Requires<[HasMips64]>;
-def : InstAlias<"and $rs, $rt, $imm",
- (DANDi CPU64RegsOpnd:$rs, CPU64RegsOpnd:$rt, uimm16_64:$imm),
- 1>,
- Requires<[HasMips64]>;
-def : InstAlias<"slt $rs, $rt, $imm",
- (SLTi64 CPURegsOpnd:$rs, CPU64Regs:$rt, simm16_64:$imm), 1>,
- Requires<[HasMips64]>;
-def : InstAlias<"xor $rs, $rt, $imm",
- (XORi64 CPU64RegsOpnd:$rs, CPU64RegsOpnd:$rt, uimm16_64:$imm),
- 1>,
- Requires<[HasMips64]>;
-def : InstAlias<"not $rt, $rs",
- (NOR64 CPU64RegsOpnd:$rt, CPU64RegsOpnd:$rs, ZERO_64), 1>,
- Requires<[HasMips64]>;
-def : InstAlias<"j $rs", (JR64 CPU64Regs:$rs), 0>, Requires<[HasMips64]>;
-def : InstAlias<"jalr $rs", (JALR64 RA_64, CPU64Regs:$rs)>,
- Requires<[HasMips64]>;
-def : InstAlias<"jal $rs", (JALR64 RA_64, CPU64Regs:$rs), 0>,
- Requires<[HasMips64]>;
-def : InstAlias<"jal $rd,$rs", (JALR64 CPU64Regs:$rd, CPU64Regs:$rs), 0>,
- Requires<[HasMips64]>;
def : InstAlias<"daddu $rs, $rt, $imm",
- (DADDiu CPU64RegsOpnd:$rs, CPU64RegsOpnd:$rt, simm16_64:$imm),
- 1>;
+ (DADDiu GPR64Opnd:$rs, GPR64Opnd:$rt, simm16_64:$imm),
+ 0>;
def : InstAlias<"dadd $rs, $rt, $imm",
- (DADDi CPU64RegsOpnd:$rs, CPU64RegsOpnd:$rt, simm16_64:$imm),
- 1>;
-def : InstAlias<"or $rs, $rt, $imm",
- (ORi64 CPU64RegsOpnd:$rs, CPU64RegsOpnd:$rt, uimm16_64:$imm),
- 1>, Requires<[HasMips64]>;
-/// Move between CPU and coprocessor registers
+ (DADDi GPR64Opnd:$rs, GPR64Opnd:$rt, simm16_64:$imm),
+ 0>;
-let DecoderNamespace = "Mips64" in {
-def DMFC0_3OP64 : MFC3OP<(outs CPU64RegsOpnd:$rt),
- (ins CPU64RegsOpnd:$rd, uimm16:$sel),
- "dmfc0\t$rt, $rd, $sel">, MFC3OP_FM<0x10, 1>;
-def DMTC0_3OP64 : MFC3OP<(outs CPU64RegsOpnd:$rd, uimm16:$sel),
- (ins CPU64RegsOpnd:$rt),
- "dmtc0\t$rt, $rd, $sel">, MFC3OP_FM<0x10, 5>;
-def DMFC2_3OP64 : MFC3OP<(outs CPU64RegsOpnd:$rt),
- (ins CPU64RegsOpnd:$rd, uimm16:$sel),
- "dmfc2\t$rt, $rd, $sel">, MFC3OP_FM<0x12, 1>;
-def DMTC2_3OP64 : MFC3OP<(outs CPU64RegsOpnd:$rd, uimm16:$sel),
- (ins CPU64RegsOpnd:$rt),
- "dmtc2\t$rt, $rd, $sel">, MFC3OP_FM<0x12, 5>;
+/// Move between CPU and coprocessor registers
+let DecoderNamespace = "Mips64", Predicates = [HasMips64] in {
+def DMFC0 : MFC3OP<"dmfc0", GPR64Opnd>, MFC3OP_FM<0x10, 1>;
+def DMTC0 : MFC3OP<"dmtc0", GPR64Opnd>, MFC3OP_FM<0x10, 5>;
+def DMFC2 : MFC3OP<"dmfc2", GPR64Opnd>, MFC3OP_FM<0x12, 1>;
+def DMTC2 : MFC3OP<"dmtc2", GPR64Opnd>, MFC3OP_FM<0x12, 5>;
}
// Two operand (implicit 0 selector) versions:
-def : InstAlias<"dmfc0 $rt, $rd",
- (DMFC0_3OP64 CPU64RegsOpnd:$rt, CPU64RegsOpnd:$rd, 0), 0>;
-def : InstAlias<"dmtc0 $rt, $rd",
- (DMTC0_3OP64 CPU64RegsOpnd:$rd, 0, CPU64RegsOpnd:$rt), 0>;
-def : InstAlias<"dmfc2 $rt, $rd",
- (DMFC2_3OP64 CPU64RegsOpnd:$rt, CPU64RegsOpnd:$rd, 0), 0>;
-def : InstAlias<"dmtc2 $rt, $rd",
- (DMTC2_3OP64 CPU64RegsOpnd:$rd, 0, CPU64RegsOpnd:$rt), 0>;
+def : InstAlias<"dmfc0 $rt, $rd", (DMFC0 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>;
+def : InstAlias<"dmtc0 $rt, $rd", (DMTC0 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>;
+def : InstAlias<"dmfc2 $rt, $rd", (DMFC2 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>;
+def : InstAlias<"dmtc2 $rt, $rd", (DMTC2 GPR64Opnd:$rt, GPR64Opnd:$rd, 0), 0>;
diff --git a/lib/Target/Mips/MipsAnalyzeImmediate.cpp b/lib/Target/Mips/MipsAnalyzeImmediate.cpp
index 99b163ec33ac..31a9b7d63983 100644
--- a/lib/Target/Mips/MipsAnalyzeImmediate.cpp
+++ b/lib/Target/Mips/MipsAnalyzeImmediate.cpp
@@ -40,7 +40,7 @@ void MipsAnalyzeImmediate::GetInstSeqLsORi(uint64_t Imm, unsigned RemSize,
void MipsAnalyzeImmediate::GetInstSeqLsSLL(uint64_t Imm, unsigned RemSize,
InstSeqLs &SeqLs) {
- unsigned Shamt = CountTrailingZeros_64(Imm);
+ unsigned Shamt = countTrailingZeros(Imm);
GetInstSeqLs(Imm >> Shamt, RemSize - Shamt, SeqLs);
AddInstr(SeqLs, Inst(SLL, Shamt));
}
diff --git a/lib/Target/Mips/MipsAnalyzeImmediate.h b/lib/Target/Mips/MipsAnalyzeImmediate.h
index a094ddae45de..cc09034a9c39 100644
--- a/lib/Target/Mips/MipsAnalyzeImmediate.h
+++ b/lib/Target/Mips/MipsAnalyzeImmediate.h
@@ -22,7 +22,7 @@ namespace llvm {
};
typedef SmallVector<Inst, 7 > InstSeq;
- /// Analyze - Get an instrucion sequence to load immediate Imm. The last
+ /// Analyze - Get an instruction sequence to load immediate Imm. The last
/// instruction in the sequence must be an ADDiu if LastInstrIsADDiu is
/// true;
const InstSeq &Analyze(uint64_t Imm, unsigned Size, bool LastInstrIsADDiu);
@@ -32,19 +32,19 @@ namespace llvm {
/// AddInstr - Add I to all instruction sequences in SeqLs.
void AddInstr(InstSeqLs &SeqLs, const Inst &I);
- /// GetInstSeqLsADDiu - Get instrucion sequences which end with an ADDiu to
+ /// GetInstSeqLsADDiu - Get instruction sequences which end with an ADDiu to
/// load immediate Imm
void GetInstSeqLsADDiu(uint64_t Imm, unsigned RemSize, InstSeqLs &SeqLs);
- /// GetInstSeqLsORi - Get instrucion sequences which end with an ORi to
+ /// GetInstSeqLsORi - Get instrutcion sequences which end with an ORi to
/// load immediate Imm
void GetInstSeqLsORi(uint64_t Imm, unsigned RemSize, InstSeqLs &SeqLs);
- /// GetInstSeqLsSLL - Get instrucion sequences which end with a SLL to
+ /// GetInstSeqLsSLL - Get instruction sequences which end with a SLL to
/// load immediate Imm
void GetInstSeqLsSLL(uint64_t Imm, unsigned RemSize, InstSeqLs &SeqLs);
- /// GetInstSeqLs - Get instrucion sequences to load immediate Imm.
+ /// GetInstSeqLs - Get instruction sequences to load immediate Imm.
void GetInstSeqLs(uint64_t Imm, unsigned RemSize, InstSeqLs &SeqLs);
/// ReplaceADDiuSLLWithLUi - Replace an ADDiu & SLL pair with a LUi.
diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp
index 6e4feda4f531..45c439826422 100644
--- a/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -15,11 +15,11 @@
#define DEBUG_TYPE "mips-asm-printer"
#include "InstPrinter/MipsInstPrinter.h"
#include "MCTargetDesc/MipsBaseInfo.h"
-#include "MCTargetDesc/MipsELFStreamer.h"
#include "Mips.h"
#include "MipsAsmPrinter.h"
#include "MipsInstrInfo.h"
#include "MipsMCInstLower.h"
+#include "MipsTargetStreamer.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
@@ -33,8 +33,8 @@
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instructions.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCELFStreamer.h"
#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/TargetRegistry.h"
@@ -45,12 +45,17 @@
using namespace llvm;
+MipsTargetStreamer &MipsAsmPrinter::getTargetStreamer() {
+ return static_cast<MipsTargetStreamer &>(OutStreamer.getTargetStreamer());
+}
+
bool MipsAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
// Initialize TargetLoweringObjectFile.
if (Subtarget->allowMixed16_32())
const_cast<TargetLoweringObjectFile&>(getObjFileLowering())
.Initialize(OutContext, TM);
MipsFI = MF.getInfo<MipsFunctionInfo>();
+ MCP = MF.getConstantPool();
AsmPrinter::runOnMachineFunction(MF);
return true;
}
@@ -71,6 +76,39 @@ void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) {
return;
}
+ // If we just ended a constant pool, mark it as such.
+ if (InConstantPool && MI->getOpcode() != Mips::CONSTPOOL_ENTRY) {
+ OutStreamer.EmitDataRegion(MCDR_DataRegionEnd);
+ InConstantPool = false;
+ }
+ if (MI->getOpcode() == Mips::CONSTPOOL_ENTRY) {
+ // CONSTPOOL_ENTRY - This instruction represents a floating
+ //constant pool in the function. The first operand is the ID#
+ // for this instruction, the second is the index into the
+ // MachineConstantPool that this is, the third is the size in
+ // bytes of this constant pool entry.
+ // The required alignment is specified on the basic block holding this MI.
+ //
+ unsigned LabelId = (unsigned)MI->getOperand(0).getImm();
+ unsigned CPIdx = (unsigned)MI->getOperand(1).getIndex();
+
+ // If this is the first entry of the pool, mark it.
+ if (!InConstantPool) {
+ OutStreamer.EmitDataRegion(MCDR_DataRegion);
+ InConstantPool = true;
+ }
+
+ OutStreamer.EmitLabel(GetCPISymbol(LabelId));
+
+ const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPIdx];
+ if (MCPE.isMachineConstantPoolEntry())
+ EmitMachineConstantPoolValue(MCPE.Val.MachineCPVal);
+ else
+ EmitGlobalConstant(MCPE.Val.ConstVal);
+ return;
+ }
+
+
MachineBasicBlock::const_instr_iterator I = MI;
MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
@@ -141,7 +179,7 @@ void MipsAsmPrinter::printSavedRegsBitmask(raw_ostream &O) {
const MachineFrameInfo *MFI = MF->getFrameInfo();
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
// size of stack area to which FP callee-saved regs are saved.
- unsigned CPURegSize = Mips::CPURegsRegClass.getSize();
+ unsigned CPURegSize = Mips::GPR32RegClass.getSize();
unsigned FGR32RegSize = Mips::FGR32RegClass.getSize();
unsigned AFGR64RegSize = Mips::AFGR64RegClass.getSize();
bool HasAFGR64Reg = false;
@@ -151,7 +189,7 @@ void MipsAsmPrinter::printSavedRegsBitmask(raw_ostream &O) {
// Set FPU Bitmask.
for (i = 0; i != e; ++i) {
unsigned Reg = CSI[i].getReg();
- if (Mips::CPURegsRegClass.contains(Reg))
+ if (Mips::GPR32RegClass.contains(Reg))
break;
unsigned RegNum = TM.getRegisterInfo()->getEncodingValue(Reg);
@@ -238,16 +276,15 @@ void MipsAsmPrinter::EmitFunctionEntryLabel() {
}
if (Subtarget->inMicroMipsMode())
- if (MipsELFStreamer *MES = dyn_cast<MipsELFStreamer>(&OutStreamer))
- MES->emitMipsSTOCG(*Subtarget, CurrentFnSym,
- (unsigned)ELF::STO_MIPS_MICROMIPS);
+ getTargetStreamer().emitMipsHackSTOCG(CurrentFnSym,
+ (unsigned)ELF::STO_MIPS_MICROMIPS);
OutStreamer.EmitLabel(CurrentFnSym);
}
/// EmitFunctionBodyStart - Targets can override this to emit stuff before
/// the first basic block in the function.
void MipsAsmPrinter::EmitFunctionBodyStart() {
- MCInstLowering.Initialize(Mang, &MF->getContext());
+ MCInstLowering.Initialize(&MF->getContext());
bool IsNakedFunction =
MF->getFunction()->
@@ -284,6 +321,12 @@ void MipsAsmPrinter::EmitFunctionBodyEnd() {
}
OutStreamer.EmitRawText("\t.end\t" + Twine(CurrentFnSym->getName()));
}
+ // Make sure to terminate any constant pools that were at the end
+ // of the function.
+ if (!InConstantPool)
+ return;
+ InConstantPool = false;
+ OutStreamer.EmitDataRegion(MCDR_DataRegionEnd);
}
/// isBlockOnlyReachableByFallthough - Return true if the basic block has
@@ -418,6 +461,11 @@ bool MipsAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
return false;
}
}
+ case 'w':
+ // Print MSA registers for the 'f' constraint
+ // In LLVM, the 'w' modifier doesn't need to do anything.
+ // We can just call printOperand as normal.
+ break;
}
}
@@ -485,7 +533,7 @@ void MipsAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
return;
case MachineOperand::MO_GlobalAddress:
- O << *Mang->getSymbol(MO.getGlobal());
+ O << *getSymbol(MO.getGlobal());
break;
case MachineOperand::MO_BlockAddress: {
@@ -526,6 +574,15 @@ void MipsAsmPrinter::printUnsignedImm(const MachineInstr *MI, int opNum,
printOperand(MI, opNum, O);
}
+void MipsAsmPrinter::printUnsignedImm8(const MachineInstr *MI, int opNum,
+ raw_ostream &O) {
+ const MachineOperand &MO = MI->getOperand(opNum);
+ if (MO.isImm())
+ O << (unsigned short int)(unsigned char)MO.getImm();
+ else
+ printOperand(MI, opNum, O);
+}
+
void MipsAsmPrinter::
printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O) {
// Load/Store memory operands -- imm($reg)
@@ -557,6 +614,15 @@ printFCCOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
void MipsAsmPrinter::EmitStartOfAsmFile(Module &M) {
// FIXME: Use SwitchSection.
+ // TODO: Need to add -mabicalls and -mno-abicalls flags.
+ // Currently we assume that -mabicalls is the default.
+ if (OutStreamer.hasRawTextSupport()) {
+ OutStreamer.EmitRawText(StringRef("\t.abicalls"));
+ Reloc::Model RM = Subtarget->getRelocationModel();
+ if (RM == Reloc::Static && !Subtarget->hasMips64())
+ OutStreamer.EmitRawText(StringRef("\t.option\tpic0"));
+ }
+
// Tell the assembler which ABI we are using
if (OutStreamer.hasRawTextSupport())
OutStreamer.EmitRawText("\t.section .mdebug." +
@@ -578,25 +644,54 @@ void MipsAsmPrinter::EmitStartOfAsmFile(Module &M) {
}
-void MipsAsmPrinter::EmitEndOfAsmFile(Module &M) {
+static void emitELFHeaderFlagsCG(MipsTargetStreamer &TargetStreamer,
+ const MipsSubtarget &Subtarget) {
+ // Update e_header flags
+ unsigned EFlags = 0;
+
+ // TODO: Need to add -mabicalls and -mno-abicalls flags.
+ // Currently we assume that -mabicalls is the default.
+ EFlags |= ELF::EF_MIPS_CPIC;
+
+ if (Subtarget.inMips16Mode())
+ EFlags |= ELF::EF_MIPS_ARCH_ASE_M16;
+ else
+ EFlags |= ELF::EF_MIPS_NOREORDER;
+
+ // Architecture
+ if (Subtarget.hasMips64r2())
+ EFlags |= ELF::EF_MIPS_ARCH_64R2;
+ else if (Subtarget.hasMips64())
+ EFlags |= ELF::EF_MIPS_ARCH_64;
+ else if (Subtarget.hasMips32r2())
+ EFlags |= ELF::EF_MIPS_ARCH_32R2;
+ else
+ EFlags |= ELF::EF_MIPS_ARCH_32;
+
+ if (Subtarget.inMicroMipsMode())
+ EFlags |= ELF::EF_MIPS_MICROMIPS;
- if (OutStreamer.hasRawTextSupport()) return;
+ // ABI
+ if (Subtarget.isABI_O32())
+ EFlags |= ELF::EF_MIPS_ABI_O32;
+ // Relocation Model
+ Reloc::Model RM = Subtarget.getRelocationModel();
+ if (RM == Reloc::PIC_ || RM == Reloc::Default)
+ EFlags |= ELF::EF_MIPS_PIC;
+ else if (RM == Reloc::Static)
+ ; // Do nothing for Reloc::Static
+ else
+ llvm_unreachable("Unsupported relocation model for e_flags");
+
+ TargetStreamer.emitMipsHackELFFlags(EFlags);
+}
+
+void MipsAsmPrinter::EmitEndOfAsmFile(Module &M) {
// Emit Mips ELF register info
Subtarget->getMReginfo().emitMipsReginfoSectionCG(
OutStreamer, getObjFileLowering(), *Subtarget);
- if (MipsELFStreamer *MES = dyn_cast<MipsELFStreamer>(&OutStreamer))
- MES->emitELFHeaderFlagsCG(*Subtarget);
-}
-
-MachineLocation
-MipsAsmPrinter::getDebugValueLocation(const MachineInstr *MI) const {
- // Handles frame addresses emitted in MipsInstrInfo::emitFrameIndexDebugValue.
- assert(MI->getNumOperands() == 4 && "Invalid no. of machine operands!");
- assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm() &&
- "Unexpected MachineOperand types");
- return MachineLocation(MI->getOperand(0).getReg(),
- MI->getOperand(1).getImm());
+ emitELFHeaderFlagsCG(getTargetStreamer(), *Subtarget);
}
void MipsAsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
diff --git a/lib/Target/Mips/MipsAsmPrinter.h b/lib/Target/Mips/MipsAsmPrinter.h
index dbdaf266b75f..11c6acd208d1 100644
--- a/lib/Target/Mips/MipsAsmPrinter.h
+++ b/lib/Target/Mips/MipsAsmPrinter.h
@@ -25,10 +25,12 @@ namespace llvm {
class MCStreamer;
class MachineInstr;
class MachineBasicBlock;
+class MipsTargetStreamer;
class Module;
class raw_ostream;
class LLVM_LIBRARY_VISIBILITY MipsAsmPrinter : public AsmPrinter {
+ MipsTargetStreamer &getTargetStreamer();
void EmitInstrWithMacroNoAT(const MachineInstr *MI);
@@ -40,6 +42,16 @@ private:
// lowerOperand - Convert a MachineOperand into the equivalent MCOperand.
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp);
+ /// MCP - Keep a pointer to constantpool entries of the current
+ /// MachineFunction.
+ const MachineConstantPool *MCP;
+
+ /// InConstantPool - Maintain state when emitting a sequence of constant
+ /// pool entries so we can properly mark them as data regions.
+ bool InConstantPool;
+
+ bool UsingConstantPools;
+
public:
const MipsSubtarget *Subtarget;
@@ -47,8 +59,11 @@ public:
MipsMCInstLower MCInstLowering;
explicit MipsAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer), MCInstLowering(*this) {
+ : AsmPrinter(TM, Streamer), MCP(0), InConstantPool(false),
+ MCInstLowering(*this) {
Subtarget = &TM.getSubtarget<MipsSubtarget>();
+ UsingConstantPools =
+ (Subtarget->inMips16Mode() && Subtarget->useConstantIslands());
}
virtual const char *getPassName() const {
@@ -57,6 +72,12 @@ public:
virtual bool runOnMachineFunction(MachineFunction &MF);
+ virtual void EmitConstantPool() LLVM_OVERRIDE {
+ if (!UsingConstantPools)
+ AsmPrinter::EmitConstantPool();
+ // we emit constant pools customly!
+ }
+
void EmitInstruction(const MachineInstr *MI);
void printSavedRegsBitmask(raw_ostream &O);
void printHex32(unsigned int Value, raw_ostream &O);
@@ -75,13 +96,13 @@ public:
raw_ostream &O);
void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O);
void printUnsignedImm(const MachineInstr *MI, int opNum, raw_ostream &O);
+ void printUnsignedImm8(const MachineInstr *MI, int opNum, raw_ostream &O);
void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O);
void printMemOperandEA(const MachineInstr *MI, int opNum, raw_ostream &O);
void printFCCOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
const char *Modifier = 0);
void EmitStartOfAsmFile(Module &M);
void EmitEndOfAsmFile(Module &M);
- virtual MachineLocation getDebugValueLocation(const MachineInstr *MI) const;
void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
};
}
diff --git a/lib/Target/Mips/MipsCallingConv.td b/lib/Target/Mips/MipsCallingConv.td
index 462def76cc80..66391cb9cb1e 100644
--- a/lib/Target/Mips/MipsCallingConv.td
+++ b/lib/Target/Mips/MipsCallingConv.td
@@ -26,8 +26,10 @@ def RetCC_MipsO32 : CallingConv<[
// f32 are returned in registers F0, F2
CCIfType<[f32], CCAssignToReg<[F0, F2]>>,
- // f64 are returned in register D0, D1
- CCIfType<[f64], CCIfSubtarget<"isNotSingleFloat()", CCAssignToReg<[D0, D1]>>>
+ // f64 arguments are returned in D0_64 and D1_64 in FP64bit mode or
+ // in D0 and D1 in FP32bit mode.
+ CCIfType<[f64], CCIfSubtarget<"isFP64bit()", CCAssignToReg<[D0_64, D1_64]>>>,
+ CCIfType<[f64], CCIfSubtarget<"isNotFP64bit()", CCAssignToReg<[D0, D1]>>>
]>;
//===----------------------------------------------------------------------===//
@@ -149,7 +151,16 @@ def RetCC_MipsEABI : CallingConv<[
//===----------------------------------------------------------------------===//
def CC_MipsO32_FastCC : CallingConv<[
// f64 arguments are passed in double-precision floating pointer registers.
- CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7, D8, D9]>>,
+ CCIfType<[f64], CCIfSubtarget<"isNotFP64bit()",
+ CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7,
+ D8, D9]>>>,
+ CCIfType<[f64], CCIfSubtarget<"isFP64bit()",
+ CCAssignToReg<[D0_64, D1_64, D2_64, D3_64,
+ D4_64, D5_64, D6_64, D7_64,
+ D8_64, D9_64, D10_64, D11_64,
+ D12_64, D13_64, D14_64, D15_64,
+ D16_64, D17_64, D18_64,
+ D19_64]>>>,
// Stack parameter slots for f64 are 64-bit doublewords and 8-byte aligned.
CCIfType<[f64], CCAssignToStack<8, 8>>
@@ -196,6 +207,13 @@ def CC_Mips_FastCC : CallingConv<[
CCDelegateTo<CC_MipsN_FastCC>
]>;
+//==
+
+def CC_Mips16RetHelper : CallingConv<[
+ // Integer arguments are passed in integer registers.
+ CCIfType<[i32], CCAssignToReg<[V0, V1, A0, A1]>>
+]>;
+
//===----------------------------------------------------------------------===//
// Mips Calling Convention Dispatch
//===----------------------------------------------------------------------===//
@@ -217,9 +235,15 @@ def CSR_SingleFloatOnly : CalleeSavedRegs<(add (sequence "F%u", 31, 20), RA, FP,
def CSR_O32 : CalleeSavedRegs<(add (sequence "D%u", 15, 10), RA, FP,
(sequence "S%u", 7, 0))>;
+def CSR_O32_FP64 : CalleeSavedRegs<(add (sequence "D%u_64", 31, 20), RA, FP,
+ (sequence "S%u", 7, 0))>;
+
def CSR_N32 : CalleeSavedRegs<(add D31_64, D29_64, D27_64, D25_64, D24_64,
D23_64, D22_64, D21_64, RA_64, FP_64, GP_64,
(sequence "S%u_64", 7, 0))>;
def CSR_N64 : CalleeSavedRegs<(add (sequence "D%u_64", 31, 24), RA_64, FP_64,
GP_64, (sequence "S%u_64", 7, 0))>;
+
+def CSR_Mips16RetHelper :
+ CalleeSavedRegs<(add V0, V1, (sequence "A%u", 3, 0), S0, S1)>;
diff --git a/lib/Target/Mips/MipsCodeEmitter.cpp b/lib/Target/Mips/MipsCodeEmitter.cpp
index 3fc402ba6423..ca4163d4e58c 100644
--- a/lib/Target/Mips/MipsCodeEmitter.cpp
+++ b/lib/Target/Mips/MipsCodeEmitter.cpp
@@ -65,8 +65,7 @@ class MipsCodeEmitter : public MachineFunctionPass {
public:
MipsCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce)
- : MachineFunctionPass(ID), JTI(0),
- II((const MipsInstrInfo *) tm.getInstrInfo()), TD(tm.getDataLayout()),
+ : MachineFunctionPass(ID), JTI(0), II(0), TD(0),
TM(tm), MCE(mce), MCPEs(0), MJTEs(0),
IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
@@ -106,11 +105,16 @@ private:
const MachineOperand &MO) const;
unsigned getJumpTargetOpValue(const MachineInstr &MI, unsigned OpNo) const;
+ unsigned getJumpTargetOpValueMM(const MachineInstr &MI, unsigned OpNo) const;
+ unsigned getBranchTargetOpValueMM(const MachineInstr &MI,
+ unsigned OpNo) const;
unsigned getBranchTargetOpValue(const MachineInstr &MI, unsigned OpNo) const;
unsigned getMemEncoding(const MachineInstr &MI, unsigned OpNo) const;
+ unsigned getMemEncodingMMImm12(const MachineInstr &MI, unsigned OpNo) const;
unsigned getSizeExtEncoding(const MachineInstr &MI, unsigned OpNo) const;
unsigned getSizeInsEncoding(const MachineInstr &MI, unsigned OpNo) const;
+ unsigned getLSAImmEncoding(const MachineInstr &MI, unsigned OpNo) const;
void emitGlobalAddressUnaligned(const GlobalValue *GV, unsigned Reloc,
int Offset) const;
@@ -187,6 +191,18 @@ unsigned MipsCodeEmitter::getJumpTargetOpValue(const MachineInstr &MI,
return 0;
}
+unsigned MipsCodeEmitter::getJumpTargetOpValueMM(const MachineInstr &MI,
+ unsigned OpNo) const {
+ llvm_unreachable("Unimplemented function.");
+ return 0;
+}
+
+unsigned MipsCodeEmitter::getBranchTargetOpValueMM(const MachineInstr &MI,
+ unsigned OpNo) const {
+ llvm_unreachable("Unimplemented function.");
+ return 0;
+}
+
unsigned MipsCodeEmitter::getBranchTargetOpValue(const MachineInstr &MI,
unsigned OpNo) const {
MachineOperand MO = MI.getOperand(OpNo);
@@ -202,6 +218,12 @@ unsigned MipsCodeEmitter::getMemEncoding(const MachineInstr &MI,
return (getMachineOpValue(MI, MI.getOperand(OpNo+1)) & 0xFFFF) | RegBits;
}
+unsigned MipsCodeEmitter::getMemEncodingMMImm12(const MachineInstr &MI,
+ unsigned OpNo) const {
+ llvm_unreachable("Unimplemented function.");
+ return 0;
+}
+
unsigned MipsCodeEmitter::getSizeExtEncoding(const MachineInstr &MI,
unsigned OpNo) const {
// size is encoded as size-1.
@@ -215,6 +237,12 @@ unsigned MipsCodeEmitter::getSizeInsEncoding(const MachineInstr &MI,
getMachineOpValue(MI, MI.getOperand(OpNo)) - 1;
}
+unsigned MipsCodeEmitter::getLSAImmEncoding(const MachineInstr &MI,
+ unsigned OpNo) const {
+ llvm_unreachable("Unimplemented function.");
+ return 0;
+}
+
/// getMachineOpValue - Return binary encoding of operand. If the machine
/// operand requires relocation, record the relocation and return zero.
unsigned MipsCodeEmitter::getMachineOpValue(const MachineInstr &MI,
@@ -317,6 +345,14 @@ bool MipsCodeEmitter::expandPseudos(MachineBasicBlock::instr_iterator &MI,
BuildMI(MBB, &*MI, MI->getDebugLoc(), II->get(Mips::SLL), Mips::ZERO)
.addReg(Mips::ZERO).addImm(0);
break;
+ case Mips::B:
+ BuildMI(MBB, &*MI, MI->getDebugLoc(), II->get(Mips::BEQ)).addReg(Mips::ZERO)
+ .addReg(Mips::ZERO).addOperand(MI->getOperand(0));
+ break;
+ case Mips::TRAP:
+ BuildMI(MBB, &*MI, MI->getDebugLoc(), II->get(Mips::BREAK)).addImm(0)
+ .addImm(0);
+ break;
case Mips::JALRPseudo:
BuildMI(MBB, &*MI, MI->getDebugLoc(), II->get(Mips::JALR), Mips::RA)
.addReg(MI->getOperand(0).getReg());
diff --git a/lib/Target/Mips/MipsCondMov.td b/lib/Target/Mips/MipsCondMov.td
index 42e4c99f05d6..2de1430a395f 100644
--- a/lib/Target/Mips/MipsCondMov.td
+++ b/lib/Target/Mips/MipsCondMov.td
@@ -16,15 +16,15 @@
// MipsISelLowering::EmitInstrWithCustomInserter if target does not have
// conditional move instructions.
// cond:int, data:int
-class CMov_I_I_FT<string opstr, RegisterClass CRC, RegisterClass DRC,
+class CMov_I_I_FT<string opstr, RegisterOperand CRC, RegisterOperand DRC,
InstrItinClass Itin> :
InstSE<(outs DRC:$rd), (ins DRC:$rs, CRC:$rt, DRC:$F),
- !strconcat(opstr, "\t$rd, $rs, $rt"), [], Itin, FrmFR> {
+ !strconcat(opstr, "\t$rd, $rs, $rt"), [], Itin, FrmFR, opstr> {
let Constraints = "$F = $rd";
}
// cond:int, data:float
-class CMov_I_F_FT<string opstr, RegisterClass CRC, RegisterClass DRC,
+class CMov_I_F_FT<string opstr, RegisterOperand CRC, RegisterOperand DRC,
InstrItinClass Itin> :
InstSE<(outs DRC:$fd), (ins DRC:$fs, CRC:$rt, DRC:$F),
!strconcat(opstr, "\t$fd, $fs, $rt"), [], Itin, FrmFR> {
@@ -32,22 +32,22 @@ class CMov_I_F_FT<string opstr, RegisterClass CRC, RegisterClass DRC,
}
// cond:float, data:int
-class CMov_F_I_FT<string opstr, RegisterClass RC, InstrItinClass Itin,
+class CMov_F_I_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
SDPatternOperator OpNode = null_frag> :
- InstSE<(outs RC:$rd), (ins RC:$rs, RC:$F),
- !strconcat(opstr, "\t$rd, $rs, $$fcc0"),
- [(set RC:$rd, (OpNode RC:$rs, RC:$F))], Itin, FrmFR> {
- let Uses = [FCR31];
+ InstSE<(outs RC:$rd), (ins RC:$rs, FCCRegsOpnd:$fcc, RC:$F),
+ !strconcat(opstr, "\t$rd, $rs, $fcc"),
+ [(set RC:$rd, (OpNode RC:$rs, FCCRegsOpnd:$fcc, RC:$F))],
+ Itin, FrmFR, opstr> {
let Constraints = "$F = $rd";
}
// cond:float, data:float
-class CMov_F_F_FT<string opstr, RegisterClass RC, InstrItinClass Itin,
+class CMov_F_F_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
SDPatternOperator OpNode = null_frag> :
- InstSE<(outs RC:$fd), (ins RC:$fs, RC:$F),
- !strconcat(opstr, "\t$fd, $fs, $$fcc0"),
- [(set RC:$fd, (OpNode RC:$fs, RC:$F))], Itin, FrmFR> {
- let Uses = [FCR31];
+ InstSE<(outs RC:$fd), (ins RC:$fs, FCCRegsOpnd:$fcc, RC:$F),
+ !strconcat(opstr, "\t$fd, $fs, $fcc"),
+ [(set RC:$fd, (OpNode RC:$fs, FCCRegsOpnd:$fcc, RC:$F))],
+ Itin, FrmFR> {
let Constraints = "$F = $fd";
}
@@ -103,151 +103,143 @@ multiclass MovnPats<RegisterClass CRC, RegisterClass DRC, Instruction MOVNInst,
}
// Instantiation of instructions.
-def MOVZ_I_I : CMov_I_I_FT<"movz", CPURegs, CPURegs, NoItinerary>,
+def MOVZ_I_I : MMRel, CMov_I_I_FT<"movz", GPR32Opnd, GPR32Opnd, IIArith>,
ADD_FM<0, 0xa>;
-let Predicates = [HasStdEnc],
- DecoderNamespace = "Mips64" in {
- def MOVZ_I_I64 : CMov_I_I_FT<"movz", CPURegs, CPU64Regs, NoItinerary>,
+
+let Predicates = [HasStdEnc], isCodeGenOnly = 1 in {
+ def MOVZ_I_I64 : CMov_I_I_FT<"movz", GPR32Opnd, GPR64Opnd, IIArith>,
+ ADD_FM<0, 0xa>;
+ def MOVZ_I64_I : CMov_I_I_FT<"movz", GPR64Opnd, GPR32Opnd, IIArith>,
+ ADD_FM<0, 0xa>;
+ def MOVZ_I64_I64 : CMov_I_I_FT<"movz", GPR64Opnd, GPR64Opnd, IIArith>,
ADD_FM<0, 0xa>;
- def MOVZ_I64_I : CMov_I_I_FT<"movz", CPU64Regs, CPURegs, NoItinerary>,
- ADD_FM<0, 0xa> {
- let isCodeGenOnly = 1;
- }
- def MOVZ_I64_I64 : CMov_I_I_FT<"movz", CPU64Regs, CPU64Regs, NoItinerary>,
- ADD_FM<0, 0xa> {
- let isCodeGenOnly = 1;
- }
}
-def MOVN_I_I : CMov_I_I_FT<"movn", CPURegs, CPURegs, NoItinerary>,
+def MOVN_I_I : MMRel, CMov_I_I_FT<"movn", GPR32Opnd, GPR32Opnd, IIArith>,
ADD_FM<0, 0xb>;
-let Predicates = [HasStdEnc],
- DecoderNamespace = "Mips64" in {
- def MOVN_I_I64 : CMov_I_I_FT<"movn", CPURegs, CPU64Regs, NoItinerary>,
+
+let Predicates = [HasStdEnc], isCodeGenOnly = 1 in {
+ def MOVN_I_I64 : CMov_I_I_FT<"movn", GPR32Opnd, GPR64Opnd, IIArith>,
+ ADD_FM<0, 0xb>;
+ def MOVN_I64_I : CMov_I_I_FT<"movn", GPR64Opnd, GPR32Opnd, IIArith>,
+ ADD_FM<0, 0xb>;
+ def MOVN_I64_I64 : CMov_I_I_FT<"movn", GPR64Opnd, GPR64Opnd, IIArith>,
ADD_FM<0, 0xb>;
- def MOVN_I64_I : CMov_I_I_FT<"movn", CPU64Regs, CPURegs, NoItinerary>,
- ADD_FM<0, 0xb> {
- let isCodeGenOnly = 1;
- }
- def MOVN_I64_I64 : CMov_I_I_FT<"movn", CPU64Regs, CPU64Regs, NoItinerary>,
- ADD_FM<0, 0xb> {
- let isCodeGenOnly = 1;
- }
}
-def MOVZ_I_S : CMov_I_F_FT<"movz.s", CPURegs, FGR32, IIFmove>,
+def MOVZ_I_S : CMov_I_F_FT<"movz.s", GPR32Opnd, FGR32Opnd, IIFmove>,
CMov_I_F_FM<18, 16>;
-def MOVZ_I64_S : CMov_I_F_FT<"movz.s", CPU64Regs, FGR32, IIFmove>,
- CMov_I_F_FM<18, 16>, Requires<[HasMips64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
-}
-def MOVN_I_S : CMov_I_F_FT<"movn.s", CPURegs, FGR32, IIFmove>,
+let isCodeGenOnly = 1 in
+def MOVZ_I64_S : CMov_I_F_FT<"movz.s", GPR64Opnd, FGR32Opnd, IIFmove>,
+ CMov_I_F_FM<18, 16>, Requires<[HasMips64, HasStdEnc]>;
+
+def MOVN_I_S : CMov_I_F_FT<"movn.s", GPR32Opnd, FGR32Opnd, IIFmove>,
CMov_I_F_FM<19, 16>;
-def MOVN_I64_S : CMov_I_F_FT<"movn.s", CPU64Regs, FGR32, IIFmove>,
- CMov_I_F_FM<19, 16>, Requires<[HasMips64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
-}
+
+let isCodeGenOnly = 1 in
+def MOVN_I64_S : CMov_I_F_FT<"movn.s", GPR64Opnd, FGR32Opnd, IIFmove>,
+ CMov_I_F_FM<19, 16>, Requires<[HasMips64, HasStdEnc]>;
let Predicates = [NotFP64bit, HasStdEnc] in {
- def MOVZ_I_D32 : CMov_I_F_FT<"movz.d", CPURegs, AFGR64, IIFmove>,
+ def MOVZ_I_D32 : CMov_I_F_FT<"movz.d", GPR32Opnd, AFGR64Opnd, IIFmove>,
CMov_I_F_FM<18, 17>;
- def MOVN_I_D32 : CMov_I_F_FT<"movn.d", CPURegs, AFGR64, IIFmove>,
+ def MOVN_I_D32 : CMov_I_F_FT<"movn.d", GPR32Opnd, AFGR64Opnd, IIFmove>,
CMov_I_F_FM<19, 17>;
}
-let Predicates = [IsFP64bit, HasStdEnc],
- DecoderNamespace = "Mips64" in {
- def MOVZ_I_D64 : CMov_I_F_FT<"movz.d", CPURegs, FGR64, IIFmove>,
+
+let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace = "Mips64" in {
+ def MOVZ_I_D64 : CMov_I_F_FT<"movz.d", GPR32Opnd, FGR64Opnd, IIFmove>,
CMov_I_F_FM<18, 17>;
- def MOVZ_I64_D64 : CMov_I_F_FT<"movz.d", CPU64Regs, FGR64, IIFmove>,
- CMov_I_F_FM<18, 17> {
- let isCodeGenOnly = 1;
- }
- def MOVN_I_D64 : CMov_I_F_FT<"movn.d", CPURegs, FGR64, IIFmove>,
+ def MOVN_I_D64 : CMov_I_F_FT<"movn.d", GPR32Opnd, FGR64Opnd, IIFmove>,
CMov_I_F_FM<19, 17>;
- def MOVN_I64_D64 : CMov_I_F_FT<"movn.d", CPU64Regs, FGR64, IIFmove>,
- CMov_I_F_FM<19, 17> {
- let isCodeGenOnly = 1;
+ let isCodeGenOnly = 1 in {
+ def MOVZ_I64_D64 : CMov_I_F_FT<"movz.d", GPR64Opnd, FGR64Opnd,
+ IIFmove>, CMov_I_F_FM<18, 17>;
+ def MOVN_I64_D64 : CMov_I_F_FT<"movn.d", GPR64Opnd, FGR64Opnd,
+ IIFmove>, CMov_I_F_FM<19, 17>;
}
}
-def MOVT_I : CMov_F_I_FT<"movt", CPURegs, IIAlu, MipsCMovFP_T>, CMov_F_I_FM<1>;
-def MOVT_I64 : CMov_F_I_FT<"movt", CPU64Regs, IIAlu, MipsCMovFP_T>,
- CMov_F_I_FM<1>, Requires<[HasMips64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
-}
+def MOVT_I : MMRel, CMov_F_I_FT<"movt", GPR32Opnd, IIArith, MipsCMovFP_T>,
+ CMov_F_I_FM<1>;
-def MOVF_I : CMov_F_I_FT<"movf", CPURegs, IIAlu, MipsCMovFP_F>, CMov_F_I_FM<0>;
-def MOVF_I64 : CMov_F_I_FT<"movf", CPU64Regs, IIAlu, MipsCMovFP_F>,
- CMov_F_I_FM<0>, Requires<[HasMips64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
-}
+let isCodeGenOnly = 1 in
+def MOVT_I64 : CMov_F_I_FT<"movt", GPR64Opnd, IIArith, MipsCMovFP_T>,
+ CMov_F_I_FM<1>, Requires<[HasMips64, HasStdEnc]>;
+
+def MOVF_I : MMRel, CMov_F_I_FT<"movf", GPR32Opnd, IIArith, MipsCMovFP_F>,
+ CMov_F_I_FM<0>;
-def MOVT_S : CMov_F_F_FT<"movt.s", FGR32, IIFmove, MipsCMovFP_T>,
+let isCodeGenOnly = 1 in
+def MOVF_I64 : CMov_F_I_FT<"movf", GPR64Opnd, IIArith, MipsCMovFP_F>,
+ CMov_F_I_FM<0>, Requires<[HasMips64, HasStdEnc]>;
+
+def MOVT_S : CMov_F_F_FT<"movt.s", FGR32Opnd, IIFmove, MipsCMovFP_T>,
CMov_F_F_FM<16, 1>;
-def MOVF_S : CMov_F_F_FT<"movf.s", FGR32, IIFmove, MipsCMovFP_F>,
+def MOVF_S : CMov_F_F_FT<"movf.s", FGR32Opnd, IIFmove, MipsCMovFP_F>,
CMov_F_F_FM<16, 0>;
let Predicates = [NotFP64bit, HasStdEnc] in {
- def MOVT_D32 : CMov_F_F_FT<"movt.d", AFGR64, IIFmove, MipsCMovFP_T>,
+ def MOVT_D32 : CMov_F_F_FT<"movt.d", AFGR64Opnd, IIFmove, MipsCMovFP_T>,
CMov_F_F_FM<17, 1>;
- def MOVF_D32 : CMov_F_F_FT<"movf.d", AFGR64, IIFmove, MipsCMovFP_F>,
+ def MOVF_D32 : CMov_F_F_FT<"movf.d", AFGR64Opnd, IIFmove, MipsCMovFP_F>,
CMov_F_F_FM<17, 0>;
}
-let Predicates = [IsFP64bit, HasStdEnc],
- DecoderNamespace = "Mips64" in {
- def MOVT_D64 : CMov_F_F_FT<"movt.d", FGR64, IIFmove, MipsCMovFP_T>,
+
+let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace = "Mips64" in {
+ def MOVT_D64 : CMov_F_F_FT<"movt.d", FGR64Opnd, IIFmove, MipsCMovFP_T>,
CMov_F_F_FM<17, 1>;
- def MOVF_D64 : CMov_F_F_FT<"movf.d", FGR64, IIFmove, MipsCMovFP_F>,
+ def MOVF_D64 : CMov_F_F_FT<"movf.d", FGR64Opnd, IIFmove, MipsCMovFP_F>,
CMov_F_F_FM<17, 0>;
}
// Instantiation of conditional move patterns.
-defm : MovzPats0<CPURegs, CPURegs, MOVZ_I_I, SLT, SLTu, SLTi, SLTiu>;
-defm : MovzPats1<CPURegs, CPURegs, MOVZ_I_I, XOR>;
-defm : MovzPats2<CPURegs, CPURegs, MOVZ_I_I, XORi>;
+defm : MovzPats0<GPR32, GPR32, MOVZ_I_I, SLT, SLTu, SLTi, SLTiu>;
+defm : MovzPats1<GPR32, GPR32, MOVZ_I_I, XOR>;
+defm : MovzPats2<GPR32, GPR32, MOVZ_I_I, XORi>;
let Predicates = [HasMips64, HasStdEnc] in {
- defm : MovzPats0<CPURegs, CPU64Regs, MOVZ_I_I64, SLT, SLTu, SLTi, SLTiu>;
- defm : MovzPats0<CPU64Regs, CPURegs, MOVZ_I_I, SLT64, SLTu64, SLTi64,
+ defm : MovzPats0<GPR32, GPR64, MOVZ_I_I64, SLT, SLTu, SLTi, SLTiu>;
+ defm : MovzPats0<GPR64, GPR32, MOVZ_I_I, SLT64, SLTu64, SLTi64,
SLTiu64>;
- defm : MovzPats0<CPU64Regs, CPU64Regs, MOVZ_I_I64, SLT64, SLTu64, SLTi64,
+ defm : MovzPats0<GPR64, GPR64, MOVZ_I_I64, SLT64, SLTu64, SLTi64,
SLTiu64>;
- defm : MovzPats1<CPURegs, CPU64Regs, MOVZ_I_I64, XOR>;
- defm : MovzPats1<CPU64Regs, CPURegs, MOVZ_I64_I, XOR64>;
- defm : MovzPats1<CPU64Regs, CPU64Regs, MOVZ_I64_I64, XOR64>;
- defm : MovzPats2<CPURegs, CPU64Regs, MOVZ_I_I64, XORi>;
- defm : MovzPats2<CPU64Regs, CPURegs, MOVZ_I64_I, XORi64>;
- defm : MovzPats2<CPU64Regs, CPU64Regs, MOVZ_I64_I64, XORi64>;
+ defm : MovzPats1<GPR32, GPR64, MOVZ_I_I64, XOR>;
+ defm : MovzPats1<GPR64, GPR32, MOVZ_I64_I, XOR64>;
+ defm : MovzPats1<GPR64, GPR64, MOVZ_I64_I64, XOR64>;
+ defm : MovzPats2<GPR32, GPR64, MOVZ_I_I64, XORi>;
+ defm : MovzPats2<GPR64, GPR32, MOVZ_I64_I, XORi64>;
+ defm : MovzPats2<GPR64, GPR64, MOVZ_I64_I64, XORi64>;
}
-defm : MovnPats<CPURegs, CPURegs, MOVN_I_I, XOR>;
+defm : MovnPats<GPR32, GPR32, MOVN_I_I, XOR>;
let Predicates = [HasMips64, HasStdEnc] in {
- defm : MovnPats<CPURegs, CPU64Regs, MOVN_I_I64, XOR>;
- defm : MovnPats<CPU64Regs, CPURegs, MOVN_I64_I, XOR64>;
- defm : MovnPats<CPU64Regs, CPU64Regs, MOVN_I64_I64, XOR64>;
+ defm : MovnPats<GPR32, GPR64, MOVN_I_I64, XOR>;
+ defm : MovnPats<GPR64, GPR32, MOVN_I64_I, XOR64>;
+ defm : MovnPats<GPR64, GPR64, MOVN_I64_I64, XOR64>;
}
-defm : MovzPats0<CPURegs, FGR32, MOVZ_I_S, SLT, SLTu, SLTi, SLTiu>;
-defm : MovzPats1<CPURegs, FGR32, MOVZ_I_S, XOR>;
-defm : MovnPats<CPURegs, FGR32, MOVN_I_S, XOR>;
+defm : MovzPats0<GPR32, FGR32, MOVZ_I_S, SLT, SLTu, SLTi, SLTiu>;
+defm : MovzPats1<GPR32, FGR32, MOVZ_I_S, XOR>;
+defm : MovnPats<GPR32, FGR32, MOVN_I_S, XOR>;
let Predicates = [HasMips64, HasStdEnc] in {
- defm : MovzPats0<CPU64Regs, FGR32, MOVZ_I_S, SLT64, SLTu64, SLTi64,
+ defm : MovzPats0<GPR64, FGR32, MOVZ_I_S, SLT64, SLTu64, SLTi64,
SLTiu64>;
- defm : MovzPats1<CPU64Regs, FGR32, MOVZ_I64_S, XOR64>;
- defm : MovnPats<CPU64Regs, FGR32, MOVN_I64_S, XOR64>;
+ defm : MovzPats1<GPR64, FGR32, MOVZ_I64_S, XOR64>;
+ defm : MovnPats<GPR64, FGR32, MOVN_I64_S, XOR64>;
}
let Predicates = [NotFP64bit, HasStdEnc] in {
- defm : MovzPats0<CPURegs, AFGR64, MOVZ_I_D32, SLT, SLTu, SLTi, SLTiu>;
- defm : MovzPats1<CPURegs, AFGR64, MOVZ_I_D32, XOR>;
- defm : MovnPats<CPURegs, AFGR64, MOVN_I_D32, XOR>;
+ defm : MovzPats0<GPR32, AFGR64, MOVZ_I_D32, SLT, SLTu, SLTi, SLTiu>;
+ defm : MovzPats1<GPR32, AFGR64, MOVZ_I_D32, XOR>;
+ defm : MovnPats<GPR32, AFGR64, MOVN_I_D32, XOR>;
}
let Predicates = [IsFP64bit, HasStdEnc] in {
- defm : MovzPats0<CPURegs, FGR64, MOVZ_I_D64, SLT, SLTu, SLTi, SLTiu>;
- defm : MovzPats0<CPU64Regs, FGR64, MOVZ_I_D64, SLT64, SLTu64, SLTi64,
+ defm : MovzPats0<GPR32, FGR64, MOVZ_I_D64, SLT, SLTu, SLTi, SLTiu>;
+ defm : MovzPats0<GPR64, FGR64, MOVZ_I_D64, SLT64, SLTu64, SLTi64,
SLTiu64>;
- defm : MovzPats1<CPURegs, FGR64, MOVZ_I_D64, XOR>;
- defm : MovzPats1<CPU64Regs, FGR64, MOVZ_I64_D64, XOR64>;
- defm : MovnPats<CPURegs, FGR64, MOVN_I_D64, XOR>;
- defm : MovnPats<CPU64Regs, FGR64, MOVN_I64_D64, XOR64>;
+ defm : MovzPats1<GPR32, FGR64, MOVZ_I_D64, XOR>;
+ defm : MovzPats1<GPR64, FGR64, MOVZ_I64_D64, XOR64>;
+ defm : MovnPats<GPR32, FGR64, MOVN_I_D64, XOR>;
+ defm : MovnPats<GPR64, FGR64, MOVN_I64_D64, XOR64>;
}
diff --git a/lib/Target/Mips/MipsConstantIslandPass.cpp b/lib/Target/Mips/MipsConstantIslandPass.cpp
index 1951324cf1a1..c46bbacf6585 100644
--- a/lib/Target/Mips/MipsConstantIslandPass.cpp
+++ b/lib/Target/Mips/MipsConstantIslandPass.cpp
@@ -9,9 +9,7 @@
//
//
// This pass is used to make Pc relative loads of constants.
-// For now, only Mips16 will use this. While it has the same name and
-// uses many ideas from the LLVM ARM Constant Island Pass, it's not intended
-// to reuse any of the code from the ARM version.
+// For now, only Mips16 will use this.
//
// Loading constants inline is expensive on Mips16 and it's in general better
// to place the constant nearby in code space and then it can be loaded with a
@@ -27,32 +25,244 @@
#include "Mips.h"
#include "MCTargetDesc/MipsBaseInfo.h"
+#include "Mips16InstrInfo.h"
+#include "MipsMachineFunction.h"
#include "MipsTargetMachine.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/InstIterator.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/Format.h"
+#include <algorithm>
using namespace llvm;
+STATISTIC(NumCPEs, "Number of constpool entries");
+STATISTIC(NumSplit, "Number of uncond branches inserted");
+STATISTIC(NumCBrFixed, "Number of cond branches fixed");
+STATISTIC(NumUBrFixed, "Number of uncond branches fixed");
+
+// FIXME: This option should be removed once it has received sufficient testing.
+static cl::opt<bool>
+AlignConstantIslands("mips-align-constant-islands", cl::Hidden, cl::init(true),
+ cl::desc("Align constant islands in code"));
+
+
+// Rather than do make check tests with huge amounts of code, we force
+// the test to use this amount.
+//
+static cl::opt<int> ConstantIslandsSmallOffset(
+ "mips-constant-islands-small-offset",
+ cl::init(0),
+ cl::desc("Make small offsets be this amount for testing purposes"),
+ cl::Hidden);
+
+//
+// For testing purposes we tell it to not use relaxed load forms so that it
+// will split blocks.
+//
+static cl::opt<bool> NoLoadRelaxation(
+ "mips-constant-islands-no-load-relaxation",
+ cl::init(false),
+ cl::desc("Don't relax loads to long loads - for testing purposes"),
+ cl::Hidden);
+
+
namespace {
+
+
typedef MachineBasicBlock::iterator Iter;
typedef MachineBasicBlock::reverse_iterator ReverseIter;
+ /// MipsConstantIslands - Due to limited PC-relative displacements, Mips
+ /// requires constant pool entries to be scattered among the instructions
+ /// inside a function. To do this, it completely ignores the normal LLVM
+ /// constant pool; instead, it places constants wherever it feels like with
+ /// special instructions.
+ ///
+ /// The terminology used in this pass includes:
+ /// Islands - Clumps of constants placed in the function.
+ /// Water - Potential places where an island could be formed.
+ /// CPE - A constant pool entry that has been placed somewhere, which
+ /// tracks a list of users.
+
class MipsConstantIslands : public MachineFunctionPass {
+ /// BasicBlockInfo - Information about the offset and size of a single
+ /// basic block.
+ struct BasicBlockInfo {
+ /// Offset - Distance from the beginning of the function to the beginning
+ /// of this basic block.
+ ///
+ /// Offsets are computed assuming worst case padding before an aligned
+ /// block. This means that subtracting basic block offsets always gives a
+ /// conservative estimate of the real distance which may be smaller.
+ ///
+ /// Because worst case padding is used, the computed offset of an aligned
+ /// block may not actually be aligned.
+ unsigned Offset;
+
+ /// Size - Size of the basic block in bytes. If the block contains
+ /// inline assembly, this is a worst case estimate.
+ ///
+ /// The size does not include any alignment padding whether from the
+ /// beginning of the block, or from an aligned jump table at the end.
+ unsigned Size;
+
+ // FIXME: ignore LogAlign for this patch
+ //
+ unsigned postOffset(unsigned LogAlign = 0) const {
+ unsigned PO = Offset + Size;
+ return PO;
+ }
+
+ BasicBlockInfo() : Offset(0), Size(0) {}
+
+ };
+
+ std::vector<BasicBlockInfo> BBInfo;
+
+ /// WaterList - A sorted list of basic blocks where islands could be placed
+ /// (i.e. blocks that don't fall through to the following block, due
+ /// to a return, unreachable, or unconditional branch).
+ std::vector<MachineBasicBlock*> WaterList;
+
+ /// NewWaterList - The subset of WaterList that was created since the
+ /// previous iteration by inserting unconditional branches.
+ SmallSet<MachineBasicBlock*, 4> NewWaterList;
+
+ typedef std::vector<MachineBasicBlock*>::iterator water_iterator;
+
+ /// CPUser - One user of a constant pool, keeping the machine instruction
+ /// pointer, the constant pool being referenced, and the max displacement
+ /// allowed from the instruction to the CP. The HighWaterMark records the
+ /// highest basic block where a new CPEntry can be placed. To ensure this
+ /// pass terminates, the CP entries are initially placed at the end of the
+ /// function and then move monotonically to lower addresses. The
+ /// exception to this rule is when the current CP entry for a particular
+ /// CPUser is out of range, but there is another CP entry for the same
+ /// constant value in range. We want to use the existing in-range CP
+ /// entry, but if it later moves out of range, the search for new water
+ /// should resume where it left off. The HighWaterMark is used to record
+ /// that point.
+ struct CPUser {
+ MachineInstr *MI;
+ MachineInstr *CPEMI;
+ MachineBasicBlock *HighWaterMark;
+ private:
+ unsigned MaxDisp;
+ unsigned LongFormMaxDisp; // mips16 has 16/32 bit instructions
+ // with different displacements
+ unsigned LongFormOpcode;
+ public:
+ bool NegOk;
+ CPUser(MachineInstr *mi, MachineInstr *cpemi, unsigned maxdisp,
+ bool neg,
+ unsigned longformmaxdisp, unsigned longformopcode)
+ : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp),
+ LongFormMaxDisp(longformmaxdisp), LongFormOpcode(longformopcode),
+ NegOk(neg){
+ HighWaterMark = CPEMI->getParent();
+ }
+ /// getMaxDisp - Returns the maximum displacement supported by MI.
+ unsigned getMaxDisp() const {
+ unsigned xMaxDisp = ConstantIslandsSmallOffset?
+ ConstantIslandsSmallOffset: MaxDisp;
+ return xMaxDisp;
+ }
+ void setMaxDisp(unsigned val) {
+ MaxDisp = val;
+ }
+ unsigned getLongFormMaxDisp() const {
+ return LongFormMaxDisp;
+ }
+ unsigned getLongFormOpcode() const {
+ return LongFormOpcode;
+ }
+ };
+
+ /// CPUsers - Keep track of all of the machine instructions that use various
+ /// constant pools and their max displacement.
+ std::vector<CPUser> CPUsers;
+
+ /// CPEntry - One per constant pool entry, keeping the machine instruction
+ /// pointer, the constpool index, and the number of CPUser's which
+ /// reference this entry.
+ struct CPEntry {
+ MachineInstr *CPEMI;
+ unsigned CPI;
+ unsigned RefCount;
+ CPEntry(MachineInstr *cpemi, unsigned cpi, unsigned rc = 0)
+ : CPEMI(cpemi), CPI(cpi), RefCount(rc) {}
+ };
+
+ /// CPEntries - Keep track of all of the constant pool entry machine
+ /// instructions. For each original constpool index (i.e. those that
+ /// existed upon entry to this pass), it keeps a vector of entries.
+ /// Original elements are cloned as we go along; the clones are
+ /// put in the vector of the original element, but have distinct CPIs.
+ std::vector<std::vector<CPEntry> > CPEntries;
+
+ /// ImmBranch - One per immediate branch, keeping the machine instruction
+ /// pointer, conditional or unconditional, the max displacement,
+ /// and (if isCond is true) the corresponding unconditional branch
+ /// opcode.
+ struct ImmBranch {
+ MachineInstr *MI;
+ unsigned MaxDisp : 31;
+ bool isCond : 1;
+ int UncondBr;
+ ImmBranch(MachineInstr *mi, unsigned maxdisp, bool cond, int ubr)
+ : MI(mi), MaxDisp(maxdisp), isCond(cond), UncondBr(ubr) {}
+ };
+
+ /// ImmBranches - Keep track of all the immediate branch instructions.
+ ///
+ std::vector<ImmBranch> ImmBranches;
+
+ /// HasFarJump - True if any far jump instruction has been emitted during
+ /// the branch fix up pass.
+ bool HasFarJump;
+
+ const TargetMachine &TM;
+ bool IsPIC;
+ unsigned ABI;
+ const MipsSubtarget *STI;
+ const Mips16InstrInfo *TII;
+ MipsFunctionInfo *MFI;
+ MachineFunction *MF;
+ MachineConstantPool *MCP;
+
+ unsigned PICLabelUId;
+ bool PrescannedForConstants;
+
+ void initPICLabelUId(unsigned UId) {
+ PICLabelUId = UId;
+ }
+
+
+ unsigned createPICLabelUId() {
+ return PICLabelUId++;
+ }
+
public:
static char ID;
MipsConstantIslands(TargetMachine &tm)
: MachineFunctionPass(ID), TM(tm),
- TII(static_cast<const MipsInstrInfo*>(tm.getInstrInfo())),
IsPIC(TM.getRelocationModel() == Reloc::PIC_),
- ABI(TM.getSubtarget<MipsSubtarget>().getTargetABI()) {}
+ ABI(TM.getSubtarget<MipsSubtarget>().getTargetABI()),
+ STI(&TM.getSubtarget<MipsSubtarget>()), MF(0), MCP(0),
+ PrescannedForConstants(false){}
virtual const char *getPassName() const {
return "Mips Constant Islands";
@@ -60,30 +270,1264 @@ namespace {
bool runOnMachineFunction(MachineFunction &F);
- private:
+ void doInitialPlacement(std::vector<MachineInstr*> &CPEMIs);
+ CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
+ unsigned getCPELogAlign(const MachineInstr *CPEMI);
+ void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
+ unsigned getOffsetOf(MachineInstr *MI) const;
+ unsigned getUserOffset(CPUser&) const;
+ void dumpBBs();
+ void verify();
+
+ bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
+ unsigned Disp, bool NegativeOK);
+ bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
+ const CPUser &U);
+
+ bool isLongFormOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
+ const CPUser &U);
+ void computeBlockSize(MachineBasicBlock *MBB);
+ MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
+ void updateForInsertedWaterBlock(MachineBasicBlock *NewBB);
+ void adjustBBOffsetsAfter(MachineBasicBlock *BB);
+ bool decrementCPEReferenceCount(unsigned CPI, MachineInstr* CPEMI);
+ int findInRangeCPEntry(CPUser& U, unsigned UserOffset);
+ int findLongFormInRangeCPEntry(CPUser& U, unsigned UserOffset);
+ bool findAvailableWater(CPUser&U, unsigned UserOffset,
+ water_iterator &WaterIter);
+ void createNewWater(unsigned CPUserIndex, unsigned UserOffset,
+ MachineBasicBlock *&NewMBB);
+ bool handleConstantPoolUser(unsigned CPUserIndex);
+ void removeDeadCPEMI(MachineInstr *CPEMI);
+ bool removeUnusedCPEntries();
+ bool isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
+ MachineInstr *CPEMI, unsigned Disp, bool NegOk,
+ bool DoDump = false);
+ bool isWaterInRange(unsigned UserOffset, MachineBasicBlock *Water,
+ CPUser &U, unsigned &Growth);
+ bool isBBInRange(MachineInstr *MI, MachineBasicBlock *BB, unsigned Disp);
+ bool fixupImmediateBr(ImmBranch &Br);
+ bool fixupConditionalBr(ImmBranch &Br);
+ bool fixupUnconditionalBr(ImmBranch &Br);
- const TargetMachine &TM;
- const MipsInstrInfo *TII;
- bool IsPIC;
- unsigned ABI;
+ void prescanForConstants();
+
+ private:
};
char MipsConstantIslands::ID = 0;
} // end of anonymous namespace
+
+bool MipsConstantIslands::isLongFormOffsetInRange
+ (unsigned UserOffset, unsigned TrialOffset,
+ const CPUser &U) {
+ return isOffsetInRange(UserOffset, TrialOffset,
+ U.getLongFormMaxDisp(), U.NegOk);
+}
+
+bool MipsConstantIslands::isOffsetInRange
+ (unsigned UserOffset, unsigned TrialOffset,
+ const CPUser &U) {
+ return isOffsetInRange(UserOffset, TrialOffset,
+ U.getMaxDisp(), U.NegOk);
+}
+/// print block size and offset information - debugging
+void MipsConstantIslands::dumpBBs() {
+ DEBUG({
+ for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) {
+ const BasicBlockInfo &BBI = BBInfo[J];
+ dbgs() << format("%08x BB#%u\t", BBI.Offset, J)
+ << format(" size=%#x\n", BBInfo[J].Size);
+ }
+ });
+}
/// createMipsLongBranchPass - Returns a pass that converts branches to long
/// branches.
FunctionPass *llvm::createMipsConstantIslandPass(MipsTargetMachine &tm) {
return new MipsConstantIslands(tm);
}
-bool MipsConstantIslands::runOnMachineFunction(MachineFunction &F) {
+bool MipsConstantIslands::runOnMachineFunction(MachineFunction &mf) {
// The intention is for this to be a mips16 only pass for now
// FIXME:
- // if (!TM.getSubtarget<MipsSubtarget>().inMips16Mode())
- // return false;
+ MF = &mf;
+ MCP = mf.getConstantPool();
+ DEBUG(dbgs() << "constant island machine function " << "\n");
+ if (!TM.getSubtarget<MipsSubtarget>().inMips16Mode() ||
+ !MipsSubtarget::useConstantIslands()) {
+ return false;
+ }
+ TII = (const Mips16InstrInfo*)MF->getTarget().getInstrInfo();
+ MFI = MF->getInfo<MipsFunctionInfo>();
+ DEBUG(dbgs() << "constant island processing " << "\n");
+ //
+ // will need to make predermination if there is any constants we need to
+ // put in constant islands. TBD.
+ //
+ if (!PrescannedForConstants) prescanForConstants();
+
+ HasFarJump = false;
+ // This pass invalidates liveness information when it splits basic blocks.
+ MF->getRegInfo().invalidateLiveness();
+
+ // Renumber all of the machine basic blocks in the function, guaranteeing that
+ // the numbers agree with the position of the block in the function.
+ MF->RenumberBlocks();
+
+ bool MadeChange = false;
+
+ // Perform the initial placement of the constant pool entries. To start with,
+ // we put them all at the end of the function.
+ std::vector<MachineInstr*> CPEMIs;
+ if (!MCP->isEmpty())
+ doInitialPlacement(CPEMIs);
+
+ /// The next UID to take is the first unused one.
+ initPICLabelUId(CPEMIs.size());
+
+ // Do the initial scan of the function, building up information about the
+ // sizes of each block, the location of all the water, and finding all of the
+ // constant pool users.
+ initializeFunctionInfo(CPEMIs);
+ CPEMIs.clear();
+ DEBUG(dumpBBs());
+
+ /// Remove dead constant pool entries.
+ MadeChange |= removeUnusedCPEntries();
+
+ // Iteratively place constant pool entries and fix up branches until there
+ // is no change.
+ unsigned NoCPIters = 0, NoBRIters = 0;
+ (void)NoBRIters;
+ while (true) {
+ DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n');
+ bool CPChange = false;
+ for (unsigned i = 0, e = CPUsers.size(); i != e; ++i)
+ CPChange |= handleConstantPoolUser(i);
+ if (CPChange && ++NoCPIters > 30)
+ report_fatal_error("Constant Island pass failed to converge!");
+ DEBUG(dumpBBs());
+
+ // Clear NewWaterList now. If we split a block for branches, it should
+ // appear as "new water" for the next iteration of constant pool placement.
+ NewWaterList.clear();
+
+ DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n');
+ bool BRChange = false;
+ for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i)
+ BRChange |= fixupImmediateBr(ImmBranches[i]);
+ if (BRChange && ++NoBRIters > 30)
+ report_fatal_error("Branch Fix Up pass failed to converge!");
+ DEBUG(dumpBBs());
+ if (!CPChange && !BRChange)
+ break;
+ MadeChange = true;
+ }
+
+ DEBUG(dbgs() << '\n'; dumpBBs());
+
+ BBInfo.clear();
+ WaterList.clear();
+ CPUsers.clear();
+ CPEntries.clear();
+ ImmBranches.clear();
+ return MadeChange;
+}
+
+/// doInitialPlacement - Perform the initial placement of the constant pool
+/// entries. To start with, we put them all at the end of the function.
+void
+MipsConstantIslands::doInitialPlacement(std::vector<MachineInstr*> &CPEMIs) {
+ // Create the basic block to hold the CPE's.
+ MachineBasicBlock *BB = MF->CreateMachineBasicBlock();
+ MF->push_back(BB);
+
+
+ // MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
+ unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment());
+
+ // Mark the basic block as required by the const-pool.
+ // If AlignConstantIslands isn't set, use 4-byte alignment for everything.
+ BB->setAlignment(AlignConstantIslands ? MaxAlign : 2);
+
+ // The function needs to be as aligned as the basic blocks. The linker may
+ // move functions around based on their alignment.
+ MF->ensureAlignment(BB->getAlignment());
+
+ // Order the entries in BB by descending alignment. That ensures correct
+ // alignment of all entries as long as BB is sufficiently aligned. Keep
+ // track of the insertion point for each alignment. We are going to bucket
+ // sort the entries as they are created.
+ SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxAlign + 1, BB->end());
+
+ // Add all of the constants from the constant pool to the end block, use an
+ // identity mapping of CPI's to CPE's.
+ const std::vector<MachineConstantPoolEntry> &CPs = MCP->getConstants();
+
+ const DataLayout &TD = *MF->getTarget().getDataLayout();
+ for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
+ unsigned Size = TD.getTypeAllocSize(CPs[i].getType());
+ assert(Size >= 4 && "Too small constant pool entry");
+ unsigned Align = CPs[i].getAlignment();
+ assert(isPowerOf2_32(Align) && "Invalid alignment");
+ // Verify that all constant pool entries are a multiple of their alignment.
+ // If not, we would have to pad them out so that instructions stay aligned.
+ assert((Size % Align) == 0 && "CP Entry not multiple of 4 bytes!");
+
+ // Insert CONSTPOOL_ENTRY before entries with a smaller alignment.
+ unsigned LogAlign = Log2_32(Align);
+ MachineBasicBlock::iterator InsAt = InsPoint[LogAlign];
+
+ MachineInstr *CPEMI =
+ BuildMI(*BB, InsAt, DebugLoc(), TII->get(Mips::CONSTPOOL_ENTRY))
+ .addImm(i).addConstantPoolIndex(i).addImm(Size);
+
+ CPEMIs.push_back(CPEMI);
+
+ // Ensure that future entries with higher alignment get inserted before
+ // CPEMI. This is bucket sort with iterators.
+ for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a)
+ if (InsPoint[a] == InsAt)
+ InsPoint[a] = CPEMI;
+ // Add a new CPEntry, but no corresponding CPUser yet.
+ std::vector<CPEntry> CPEs;
+ CPEs.push_back(CPEntry(CPEMI, i));
+ CPEntries.push_back(CPEs);
+ ++NumCPEs;
+ DEBUG(dbgs() << "Moved CPI#" << i << " to end of function, size = "
+ << Size << ", align = " << Align <<'\n');
+ }
+ DEBUG(BB->dump());
+}
+
+/// BBHasFallthrough - Return true if the specified basic block can fallthrough
+/// into the block immediately after it.
+static bool BBHasFallthrough(MachineBasicBlock *MBB) {
+ // Get the next machine basic block in the function.
+ MachineFunction::iterator MBBI = MBB;
+ // Can't fall off end of function.
+ if (llvm::next(MBBI) == MBB->getParent()->end())
+ return false;
+
+ MachineBasicBlock *NextBB = llvm::next(MBBI);
+ for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
+ E = MBB->succ_end(); I != E; ++I)
+ if (*I == NextBB)
+ return true;
+
+ return false;
+}
+
+/// findConstPoolEntry - Given the constpool index and CONSTPOOL_ENTRY MI,
+/// look up the corresponding CPEntry.
+MipsConstantIslands::CPEntry
+*MipsConstantIslands::findConstPoolEntry(unsigned CPI,
+ const MachineInstr *CPEMI) {
+ std::vector<CPEntry> &CPEs = CPEntries[CPI];
+ // Number of entries per constpool index should be small, just do a
+ // linear search.
+ for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
+ if (CPEs[i].CPEMI == CPEMI)
+ return &CPEs[i];
+ }
+ return NULL;
+}
+
+/// getCPELogAlign - Returns the required alignment of the constant pool entry
+/// represented by CPEMI. Alignment is measured in log2(bytes) units.
+unsigned MipsConstantIslands::getCPELogAlign(const MachineInstr *CPEMI) {
+ assert(CPEMI && CPEMI->getOpcode() == Mips::CONSTPOOL_ENTRY);
+
+ // Everything is 4-byte aligned unless AlignConstantIslands is set.
+ if (!AlignConstantIslands)
+ return 2;
+
+ unsigned CPI = CPEMI->getOperand(1).getIndex();
+ assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
+ unsigned Align = MCP->getConstants()[CPI].getAlignment();
+ assert(isPowerOf2_32(Align) && "Invalid CPE alignment");
+ return Log2_32(Align);
+}
+
+/// initializeFunctionInfo - Do the initial scan of the function, building up
+/// information about the sizes of each block, the location of all the water,
+/// and finding all of the constant pool users.
+void MipsConstantIslands::
+initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
+ BBInfo.clear();
+ BBInfo.resize(MF->getNumBlockIDs());
+
+ // First thing, compute the size of all basic blocks, and see if the function
+ // has any inline assembly in it. If so, we have to be conservative about
+ // alignment assumptions, as we don't know for sure the size of any
+ // instructions in the inline assembly.
+ for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E; ++I)
+ computeBlockSize(I);
+
+
+ // Compute block offsets.
+ adjustBBOffsetsAfter(MF->begin());
+
+ // Now go back through the instructions and build up our data structures.
+ for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
+ MBBI != E; ++MBBI) {
+ MachineBasicBlock &MBB = *MBBI;
+
+ // If this block doesn't fall through into the next MBB, then this is
+ // 'water' that a constant pool island could be placed.
+ if (!BBHasFallthrough(&MBB))
+ WaterList.push_back(&MBB);
+ for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
+ I != E; ++I) {
+ if (I->isDebugValue())
+ continue;
+
+ int Opc = I->getOpcode();
+ if (I->isBranch()) {
+ bool isCond = false;
+ unsigned Bits = 0;
+ unsigned Scale = 1;
+ int UOpc = Opc;
+ switch (Opc) {
+ default:
+ continue; // Ignore other branches for now
+ case Mips::Bimm16:
+ Bits = 11;
+ Scale = 2;
+ isCond = false;
+ break;
+ case Mips::BimmX16:
+ Bits = 16;
+ Scale = 2;
+ isCond = false;
+ }
+ // Record this immediate branch.
+ unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
+ ImmBranches.push_back(ImmBranch(I, MaxOffs, isCond, UOpc));
+ }
+
+ if (Opc == Mips::CONSTPOOL_ENTRY)
+ continue;
+
+
+ // Scan the instructions for constant pool operands.
+ for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op)
+ if (I->getOperand(op).isCPI()) {
+
+ // We found one. The addressing mode tells us the max displacement
+ // from the PC that this instruction permits.
+
+ // Basic size info comes from the TSFlags field.
+ unsigned Bits = 0;
+ unsigned Scale = 1;
+ bool NegOk = false;
+ unsigned LongFormBits = 0;
+ unsigned LongFormScale = 0;
+ unsigned LongFormOpcode = 0;
+ switch (Opc) {
+ default:
+ llvm_unreachable("Unknown addressing mode for CP reference!");
+ case Mips::LwRxPcTcp16:
+ Bits = 8;
+ Scale = 4;
+ LongFormOpcode = Mips::LwRxPcTcpX16;
+ LongFormBits = 16;
+ LongFormScale = 1;
+ break;
+ case Mips::LwRxPcTcpX16:
+ Bits = 16;
+ Scale = 1;
+ NegOk = true;
+ break;
+ }
+ // Remember that this is a user of a CP entry.
+ unsigned CPI = I->getOperand(op).getIndex();
+ MachineInstr *CPEMI = CPEMIs[CPI];
+ unsigned MaxOffs = ((1 << Bits)-1) * Scale;
+ unsigned LongFormMaxOffs = ((1 << LongFormBits)-1) * LongFormScale;
+ CPUsers.push_back(CPUser(I, CPEMI, MaxOffs, NegOk,
+ LongFormMaxOffs, LongFormOpcode));
+
+ // Increment corresponding CPEntry reference count.
+ CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
+ assert(CPE && "Cannot find a corresponding CPEntry!");
+ CPE->RefCount++;
+
+ // Instructions can only use one CP entry, don't bother scanning the
+ // rest of the operands.
+ break;
+
+ }
+
+ }
+ }
+
+}
+
+/// computeBlockSize - Compute the size and some alignment information for MBB.
+/// This function updates BBInfo directly.
+void MipsConstantIslands::computeBlockSize(MachineBasicBlock *MBB) {
+ BasicBlockInfo &BBI = BBInfo[MBB->getNumber()];
+ BBI.Size = 0;
+
+ for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
+ ++I)
+ BBI.Size += TII->GetInstSizeInBytes(I);
+
+}
+
+/// getOffsetOf - Return the current offset of the specified machine instruction
+/// from the start of the function. This offset changes as stuff is moved
+/// around inside the function.
+unsigned MipsConstantIslands::getOffsetOf(MachineInstr *MI) const {
+ MachineBasicBlock *MBB = MI->getParent();
+
+ // The offset is composed of two things: the sum of the sizes of all MBB's
+ // before this instruction's block, and the offset from the start of the block
+ // it is in.
+ unsigned Offset = BBInfo[MBB->getNumber()].Offset;
+
+ // Sum instructions before MI in MBB.
+ for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
+ assert(I != MBB->end() && "Didn't find MI in its own basic block?");
+ Offset += TII->GetInstSizeInBytes(I);
+ }
+ return Offset;
+}
+
+/// CompareMBBNumbers - Little predicate function to sort the WaterList by MBB
+/// ID.
+static bool CompareMBBNumbers(const MachineBasicBlock *LHS,
+ const MachineBasicBlock *RHS) {
+ return LHS->getNumber() < RHS->getNumber();
+}
+
+/// updateForInsertedWaterBlock - When a block is newly inserted into the
+/// machine function, it upsets all of the block numbers. Renumber the blocks
+/// and update the arrays that parallel this numbering.
+void MipsConstantIslands::updateForInsertedWaterBlock
+ (MachineBasicBlock *NewBB) {
+ // Renumber the MBB's to keep them consecutive.
+ NewBB->getParent()->RenumberBlocks(NewBB);
+
+ // Insert an entry into BBInfo to align it properly with the (newly
+ // renumbered) block numbers.
+ BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
+
+ // Next, update WaterList. Specifically, we need to add NewMBB as having
+ // available water after it.
+ water_iterator IP =
+ std::lower_bound(WaterList.begin(), WaterList.end(), NewBB,
+ CompareMBBNumbers);
+ WaterList.insert(IP, NewBB);
+}
+
+unsigned MipsConstantIslands::getUserOffset(CPUser &U) const {
+ return getOffsetOf(U.MI);
+}
+
+/// Split the basic block containing MI into two blocks, which are joined by
+/// an unconditional branch. Update data structures and renumber blocks to
+/// account for this change and returns the newly created block.
+MachineBasicBlock *MipsConstantIslands::splitBlockBeforeInstr
+ (MachineInstr *MI) {
+ MachineBasicBlock *OrigBB = MI->getParent();
+
+ // Create a new MBB for the code after the OrigBB.
+ MachineBasicBlock *NewBB =
+ MF->CreateMachineBasicBlock(OrigBB->getBasicBlock());
+ MachineFunction::iterator MBBI = OrigBB; ++MBBI;
+ MF->insert(MBBI, NewBB);
+
+ // Splice the instructions starting with MI over to NewBB.
+ NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
+
+ // Add an unconditional branch from OrigBB to NewBB.
+ // Note the new unconditional branch is not being recorded.
+ // There doesn't seem to be meaningful DebugInfo available; this doesn't
+ // correspond to anything in the source.
+ BuildMI(OrigBB, DebugLoc(), TII->get(Mips::Bimm16)).addMBB(NewBB);
+ ++NumSplit;
+
+ // Update the CFG. All succs of OrigBB are now succs of NewBB.
+ NewBB->transferSuccessors(OrigBB);
+
+ // OrigBB branches to NewBB.
+ OrigBB->addSuccessor(NewBB);
+
+ // Update internal data structures to account for the newly inserted MBB.
+ // This is almost the same as updateForInsertedWaterBlock, except that
+ // the Water goes after OrigBB, not NewBB.
+ MF->RenumberBlocks(NewBB);
+
+ // Insert an entry into BBInfo to align it properly with the (newly
+ // renumbered) block numbers.
+ BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
+
+ // Next, update WaterList. Specifically, we need to add OrigMBB as having
+ // available water after it (but not if it's already there, which happens
+ // when splitting before a conditional branch that is followed by an
+ // unconditional branch - in that case we want to insert NewBB).
+ water_iterator IP =
+ std::lower_bound(WaterList.begin(), WaterList.end(), OrigBB,
+ CompareMBBNumbers);
+ MachineBasicBlock* WaterBB = *IP;
+ if (WaterBB == OrigBB)
+ WaterList.insert(llvm::next(IP), NewBB);
+ else
+ WaterList.insert(IP, OrigBB);
+ NewWaterList.insert(OrigBB);
+
+ // Figure out how large the OrigBB is. As the first half of the original
+ // block, it cannot contain a tablejump. The size includes
+ // the new jump we added. (It should be possible to do this without
+ // recounting everything, but it's very confusing, and this is rarely
+ // executed.)
+ computeBlockSize(OrigBB);
+
+ // Figure out how large the NewMBB is. As the second half of the original
+ // block, it may contain a tablejump.
+ computeBlockSize(NewBB);
+
+ // All BBOffsets following these blocks must be modified.
+ adjustBBOffsetsAfter(OrigBB);
+
+ return NewBB;
+}
+
+
+
+/// isOffsetInRange - Checks whether UserOffset (the location of a constant pool
+/// reference) is within MaxDisp of TrialOffset (a proposed location of a
+/// constant pool entry).
+bool MipsConstantIslands::isOffsetInRange(unsigned UserOffset,
+ unsigned TrialOffset, unsigned MaxDisp,
+ bool NegativeOK) {
+ if (UserOffset <= TrialOffset) {
+ // User before the Trial.
+ if (TrialOffset - UserOffset <= MaxDisp)
+ return true;
+ } else if (NegativeOK) {
+ if (UserOffset - TrialOffset <= MaxDisp)
+ return true;
+ }
+ return false;
+}
+
+/// isWaterInRange - Returns true if a CPE placed after the specified
+/// Water (a basic block) will be in range for the specific MI.
+///
+/// Compute how much the function will grow by inserting a CPE after Water.
+bool MipsConstantIslands::isWaterInRange(unsigned UserOffset,
+ MachineBasicBlock* Water, CPUser &U,
+ unsigned &Growth) {
+ unsigned CPELogAlign = getCPELogAlign(U.CPEMI);
+ unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
+ unsigned NextBlockOffset, NextBlockAlignment;
+ MachineFunction::const_iterator NextBlock = Water;
+ if (++NextBlock == MF->end()) {
+ NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
+ NextBlockAlignment = 0;
+ } else {
+ NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
+ NextBlockAlignment = NextBlock->getAlignment();
+ }
+ unsigned Size = U.CPEMI->getOperand(2).getImm();
+ unsigned CPEEnd = CPEOffset + Size;
+
+ // The CPE may be able to hide in the alignment padding before the next
+ // block. It may also cause more padding to be required if it is more aligned
+ // that the next block.
+ if (CPEEnd > NextBlockOffset) {
+ Growth = CPEEnd - NextBlockOffset;
+ // Compute the padding that would go at the end of the CPE to align the next
+ // block.
+ Growth += OffsetToAlignment(CPEEnd, 1u << NextBlockAlignment);
+
+ // If the CPE is to be inserted before the instruction, that will raise
+ // the offset of the instruction. Also account for unknown alignment padding
+ // in blocks between CPE and the user.
+ if (CPEOffset < UserOffset)
+ UserOffset += Growth;
+ } else
+ // CPE fits in existing padding.
+ Growth = 0;
+
+ return isOffsetInRange(UserOffset, CPEOffset, U);
+}
+
+/// isCPEntryInRange - Returns true if the distance between specific MI and
+/// specific ConstPool entry instruction can fit in MI's displacement field.
+bool MipsConstantIslands::isCPEntryInRange
+ (MachineInstr *MI, unsigned UserOffset,
+ MachineInstr *CPEMI, unsigned MaxDisp,
+ bool NegOk, bool DoDump) {
+ unsigned CPEOffset = getOffsetOf(CPEMI);
+
+ if (DoDump) {
+ DEBUG({
+ unsigned Block = MI->getParent()->getNumber();
+ const BasicBlockInfo &BBI = BBInfo[Block];
+ dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
+ << " max delta=" << MaxDisp
+ << format(" insn address=%#x", UserOffset)
+ << " in BB#" << Block << ": "
+ << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI
+ << format("CPE address=%#x offset=%+d: ", CPEOffset,
+ int(CPEOffset-UserOffset));
+ });
+ }
+
+ return isOffsetInRange(UserOffset, CPEOffset, MaxDisp, NegOk);
+}
+
+#ifndef NDEBUG
+/// BBIsJumpedOver - Return true of the specified basic block's only predecessor
+/// unconditionally branches to its only successor.
+static bool BBIsJumpedOver(MachineBasicBlock *MBB) {
+ if (MBB->pred_size() != 1 || MBB->succ_size() != 1)
+ return false;
+ MachineBasicBlock *Succ = *MBB->succ_begin();
+ MachineBasicBlock *Pred = *MBB->pred_begin();
+ MachineInstr *PredMI = &Pred->back();
+ if (PredMI->getOpcode() == Mips::Bimm16)
+ return PredMI->getOperand(0).getMBB() == Succ;
+ return false;
+}
+#endif
+
+void MipsConstantIslands::adjustBBOffsetsAfter(MachineBasicBlock *BB) {
+ unsigned BBNum = BB->getNumber();
+ for(unsigned i = BBNum + 1, e = MF->getNumBlockIDs(); i < e; ++i) {
+ // Get the offset and known bits at the end of the layout predecessor.
+ // Include the alignment of the current block.
+ unsigned Offset = BBInfo[i - 1].Offset + BBInfo[i - 1].Size;
+ BBInfo[i].Offset = Offset;
+ }
+}
+
+/// decrementCPEReferenceCount - find the constant pool entry with index CPI
+/// and instruction CPEMI, and decrement its refcount. If the refcount
+/// becomes 0 remove the entry and instruction. Returns true if we removed
+/// the entry, false if we didn't.
+
+bool MipsConstantIslands::decrementCPEReferenceCount(unsigned CPI,
+ MachineInstr *CPEMI) {
+ // Find the old entry. Eliminate it if it is no longer used.
+ CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
+ assert(CPE && "Unexpected!");
+ if (--CPE->RefCount == 0) {
+ removeDeadCPEMI(CPEMI);
+ CPE->CPEMI = NULL;
+ --NumCPEs;
+ return true;
+ }
+ return false;
+}
+
+/// LookForCPEntryInRange - see if the currently referenced CPE is in range;
+/// if not, see if an in-range clone of the CPE is in range, and if so,
+/// change the data structures so the user references the clone. Returns:
+/// 0 = no existing entry found
+/// 1 = entry found, and there were no code insertions or deletions
+/// 2 = entry found, and there were code insertions or deletions
+int MipsConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset)
+{
+ MachineInstr *UserMI = U.MI;
+ MachineInstr *CPEMI = U.CPEMI;
+
+ // Check to see if the CPE is already in-range.
+ if (isCPEntryInRange(UserMI, UserOffset, CPEMI, U.getMaxDisp(), U.NegOk,
+ true)) {
+ DEBUG(dbgs() << "In range\n");
+ return 1;
+ }
+
+ // No. Look for previously created clones of the CPE that are in range.
+ unsigned CPI = CPEMI->getOperand(1).getIndex();
+ std::vector<CPEntry> &CPEs = CPEntries[CPI];
+ for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
+ // We already tried this one
+ if (CPEs[i].CPEMI == CPEMI)
+ continue;
+ // Removing CPEs can leave empty entries, skip
+ if (CPEs[i].CPEMI == NULL)
+ continue;
+ if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(),
+ U.NegOk)) {
+ DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
+ << CPEs[i].CPI << "\n");
+ // Point the CPUser node to the replacement
+ U.CPEMI = CPEs[i].CPEMI;
+ // Change the CPI in the instruction operand to refer to the clone.
+ for (unsigned j = 0, e = UserMI->getNumOperands(); j != e; ++j)
+ if (UserMI->getOperand(j).isCPI()) {
+ UserMI->getOperand(j).setIndex(CPEs[i].CPI);
+ break;
+ }
+ // Adjust the refcount of the clone...
+ CPEs[i].RefCount++;
+ // ...and the original. If we didn't remove the old entry, none of the
+ // addresses changed, so we don't need another pass.
+ return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1;
+ }
+ }
+ return 0;
+}
+
+/// LookForCPEntryInRange - see if the currently referenced CPE is in range;
+/// This version checks if the longer form of the instruction can be used to
+/// to satisfy things.
+/// if not, see if an in-range clone of the CPE is in range, and if so,
+/// change the data structures so the user references the clone. Returns:
+/// 0 = no existing entry found
+/// 1 = entry found, and there were no code insertions or deletions
+/// 2 = entry found, and there were code insertions or deletions
+int MipsConstantIslands::findLongFormInRangeCPEntry
+ (CPUser& U, unsigned UserOffset)
+{
+ MachineInstr *UserMI = U.MI;
+ MachineInstr *CPEMI = U.CPEMI;
+
+ // Check to see if the CPE is already in-range.
+ if (isCPEntryInRange(UserMI, UserOffset, CPEMI,
+ U.getLongFormMaxDisp(), U.NegOk,
+ true)) {
+ DEBUG(dbgs() << "In range\n");
+ UserMI->setDesc(TII->get(U.getLongFormOpcode()));
+ U.setMaxDisp(U.getLongFormMaxDisp());
+ return 2; // instruction is longer length now
+ }
+
+ // No. Look for previously created clones of the CPE that are in range.
+ unsigned CPI = CPEMI->getOperand(1).getIndex();
+ std::vector<CPEntry> &CPEs = CPEntries[CPI];
+ for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
+ // We already tried this one
+ if (CPEs[i].CPEMI == CPEMI)
+ continue;
+ // Removing CPEs can leave empty entries, skip
+ if (CPEs[i].CPEMI == NULL)
+ continue;
+ if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI,
+ U.getLongFormMaxDisp(), U.NegOk)) {
+ DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
+ << CPEs[i].CPI << "\n");
+ // Point the CPUser node to the replacement
+ U.CPEMI = CPEs[i].CPEMI;
+ // Change the CPI in the instruction operand to refer to the clone.
+ for (unsigned j = 0, e = UserMI->getNumOperands(); j != e; ++j)
+ if (UserMI->getOperand(j).isCPI()) {
+ UserMI->getOperand(j).setIndex(CPEs[i].CPI);
+ break;
+ }
+ // Adjust the refcount of the clone...
+ CPEs[i].RefCount++;
+ // ...and the original. If we didn't remove the old entry, none of the
+ // addresses changed, so we don't need another pass.
+ return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1;
+ }
+ }
+ return 0;
+}
+
+/// getUnconditionalBrDisp - Returns the maximum displacement that can fit in
+/// the specific unconditional branch instruction.
+static inline unsigned getUnconditionalBrDisp(int Opc) {
+ switch (Opc) {
+ case Mips::Bimm16:
+ return ((1<<10)-1)*2;
+ case Mips::BimmX16:
+ return ((1<<16)-1)*2;
+ default:
+ break;
+ }
+ return ((1<<16)-1)*2;
+}
+
+/// findAvailableWater - Look for an existing entry in the WaterList in which
+/// we can place the CPE referenced from U so it's within range of U's MI.
+/// Returns true if found, false if not. If it returns true, WaterIter
+/// is set to the WaterList entry.
+/// To ensure that this pass
+/// terminates, the CPE location for a particular CPUser is only allowed to
+/// move to a lower address, so search backward from the end of the list and
+/// prefer the first water that is in range.
+bool MipsConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
+ water_iterator &WaterIter) {
+ if (WaterList.empty())
+ return false;
+
+ unsigned BestGrowth = ~0u;
+ for (water_iterator IP = prior(WaterList.end()), B = WaterList.begin();;
+ --IP) {
+ MachineBasicBlock* WaterBB = *IP;
+ // Check if water is in range and is either at a lower address than the
+ // current "high water mark" or a new water block that was created since
+ // the previous iteration by inserting an unconditional branch. In the
+ // latter case, we want to allow resetting the high water mark back to
+ // this new water since we haven't seen it before. Inserting branches
+ // should be relatively uncommon and when it does happen, we want to be
+ // sure to take advantage of it for all the CPEs near that block, so that
+ // we don't insert more branches than necessary.
+ unsigned Growth;
+ if (isWaterInRange(UserOffset, WaterBB, U, Growth) &&
+ (WaterBB->getNumber() < U.HighWaterMark->getNumber() ||
+ NewWaterList.count(WaterBB)) && Growth < BestGrowth) {
+ // This is the least amount of required padding seen so far.
+ BestGrowth = Growth;
+ WaterIter = IP;
+ DEBUG(dbgs() << "Found water after BB#" << WaterBB->getNumber()
+ << " Growth=" << Growth << '\n');
+
+ // Keep looking unless it is perfect.
+ if (BestGrowth == 0)
+ return true;
+ }
+ if (IP == B)
+ break;
+ }
+ return BestGrowth != ~0u;
+}
+
+/// createNewWater - No existing WaterList entry will work for
+/// CPUsers[CPUserIndex], so create a place to put the CPE. The end of the
+/// block is used if in range, and the conditional branch munged so control
+/// flow is correct. Otherwise the block is split to create a hole with an
+/// unconditional branch around it. In either case NewMBB is set to a
+/// block following which the new island can be inserted (the WaterList
+/// is not adjusted).
+void MipsConstantIslands::createNewWater(unsigned CPUserIndex,
+ unsigned UserOffset,
+ MachineBasicBlock *&NewMBB) {
+ CPUser &U = CPUsers[CPUserIndex];
+ MachineInstr *UserMI = U.MI;
+ MachineInstr *CPEMI = U.CPEMI;
+ unsigned CPELogAlign = getCPELogAlign(CPEMI);
+ MachineBasicBlock *UserMBB = UserMI->getParent();
+ const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
+
+ // If the block does not end in an unconditional branch already, and if the
+ // end of the block is within range, make new water there.
+ if (BBHasFallthrough(UserMBB)) {
+ // Size of branch to insert.
+ unsigned Delta = 2;
+ // Compute the offset where the CPE will begin.
+ unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta;
+
+ if (isOffsetInRange(UserOffset, CPEOffset, U)) {
+ DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber()
+ << format(", expected CPE offset %#x\n", CPEOffset));
+ NewMBB = llvm::next(MachineFunction::iterator(UserMBB));
+ // Add an unconditional branch from UserMBB to fallthrough block. Record
+ // it for branch lengthening; this new branch will not get out of range,
+ // but if the preceding conditional branch is out of range, the targets
+ // will be exchanged, and the altered branch may be out of range, so the
+ // machinery has to know about it.
+ int UncondBr = Mips::Bimm16;
+ BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB);
+ unsigned MaxDisp = getUnconditionalBrDisp(UncondBr);
+ ImmBranches.push_back(ImmBranch(&UserMBB->back(),
+ MaxDisp, false, UncondBr));
+ BBInfo[UserMBB->getNumber()].Size += Delta;
+ adjustBBOffsetsAfter(UserMBB);
+ return;
+ }
+ }
+
+ // What a big block. Find a place within the block to split it.
+
+ // Try to split the block so it's fully aligned. Compute the latest split
+ // point where we can add a 4-byte branch instruction, and then align to
+ // LogAlign which is the largest possible alignment in the function.
+ unsigned LogAlign = MF->getAlignment();
+ assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
+ unsigned BaseInsertOffset = UserOffset + U.getMaxDisp();
+ DEBUG(dbgs() << format("Split in middle of big block before %#x",
+ BaseInsertOffset));
+
+ // The 4 in the following is for the unconditional branch we'll be inserting
+ // Alignment of the island is handled
+ // inside isOffsetInRange.
+ BaseInsertOffset -= 4;
+
+ DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
+ << " la=" << LogAlign << '\n');
+
+ // This could point off the end of the block if we've already got constant
+ // pool entries following this block; only the last one is in the water list.
+ // Back past any possible branches (allow for a conditional and a maximally
+ // long unconditional).
+ if (BaseInsertOffset + 8 >= UserBBI.postOffset()) {
+ BaseInsertOffset = UserBBI.postOffset() - 8;
+ DEBUG(dbgs() << format("Move inside block: %#x\n", BaseInsertOffset));
+ }
+ unsigned EndInsertOffset = BaseInsertOffset + 4 +
+ CPEMI->getOperand(2).getImm();
+ MachineBasicBlock::iterator MI = UserMI;
+ ++MI;
+ unsigned CPUIndex = CPUserIndex+1;
+ unsigned NumCPUsers = CPUsers.size();
+ //MachineInstr *LastIT = 0;
+ for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI);
+ Offset < BaseInsertOffset;
+ Offset += TII->GetInstSizeInBytes(MI),
+ MI = llvm::next(MI)) {
+ assert(MI != UserMBB->end() && "Fell off end of block");
+ if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) {
+ CPUser &U = CPUsers[CPUIndex];
+ if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
+ // Shift intertion point by one unit of alignment so it is within reach.
+ BaseInsertOffset -= 1u << LogAlign;
+ EndInsertOffset -= 1u << LogAlign;
+ }
+ // This is overly conservative, as we don't account for CPEMIs being
+ // reused within the block, but it doesn't matter much. Also assume CPEs
+ // are added in order with alignment padding. We may eventually be able
+ // to pack the aligned CPEs better.
+ EndInsertOffset += U.CPEMI->getOperand(2).getImm();
+ CPUIndex++;
+ }
+ }
+
+ --MI;
+ NewMBB = splitBlockBeforeInstr(MI);
+}
+
+/// handleConstantPoolUser - Analyze the specified user, checking to see if it
+/// is out-of-range. If so, pick up the constant pool value and move it some
+/// place in-range. Return true if we changed any addresses (thus must run
+/// another pass of branch lengthening), false otherwise.
+bool MipsConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) {
+ CPUser &U = CPUsers[CPUserIndex];
+ MachineInstr *UserMI = U.MI;
+ MachineInstr *CPEMI = U.CPEMI;
+ unsigned CPI = CPEMI->getOperand(1).getIndex();
+ unsigned Size = CPEMI->getOperand(2).getImm();
+ // Compute this only once, it's expensive.
+ unsigned UserOffset = getUserOffset(U);
+
+ // See if the current entry is within range, or there is a clone of it
+ // in range.
+ int result = findInRangeCPEntry(U, UserOffset);
+ if (result==1) return false;
+ else if (result==2) return true;
+
+
+ // Look for water where we can place this CPE.
+ MachineBasicBlock *NewIsland = MF->CreateMachineBasicBlock();
+ MachineBasicBlock *NewMBB;
+ water_iterator IP;
+ if (findAvailableWater(U, UserOffset, IP)) {
+ DEBUG(dbgs() << "Found water in range\n");
+ MachineBasicBlock *WaterBB = *IP;
+
+ // If the original WaterList entry was "new water" on this iteration,
+ // propagate that to the new island. This is just keeping NewWaterList
+ // updated to match the WaterList, which will be updated below.
+ if (NewWaterList.erase(WaterBB))
+ NewWaterList.insert(NewIsland);
+
+ // The new CPE goes before the following block (NewMBB).
+ NewMBB = llvm::next(MachineFunction::iterator(WaterBB));
+
+ } else {
+ // No water found.
+ // we first see if a longer form of the instrucion could have reached
+ // the constant. in that case we won't bother to split
+ if (!NoLoadRelaxation) {
+ result = findLongFormInRangeCPEntry(U, UserOffset);
+ if (result != 0) return true;
+ }
+ DEBUG(dbgs() << "No water found\n");
+ createNewWater(CPUserIndex, UserOffset, NewMBB);
+
+ // splitBlockBeforeInstr adds to WaterList, which is important when it is
+ // called while handling branches so that the water will be seen on the
+ // next iteration for constant pools, but in this context, we don't want
+ // it. Check for this so it will be removed from the WaterList.
+ // Also remove any entry from NewWaterList.
+ MachineBasicBlock *WaterBB = prior(MachineFunction::iterator(NewMBB));
+ IP = std::find(WaterList.begin(), WaterList.end(), WaterBB);
+ if (IP != WaterList.end())
+ NewWaterList.erase(WaterBB);
+
+ // We are adding new water. Update NewWaterList.
+ NewWaterList.insert(NewIsland);
+ }
+
+ // Remove the original WaterList entry; we want subsequent insertions in
+ // this vicinity to go after the one we're about to insert. This
+ // considerably reduces the number of times we have to move the same CPE
+ // more than once and is also important to ensure the algorithm terminates.
+ if (IP != WaterList.end())
+ WaterList.erase(IP);
+
+ // Okay, we know we can put an island before NewMBB now, do it!
+ MF->insert(NewMBB, NewIsland);
+
+ // Update internal data structures to account for the newly inserted MBB.
+ updateForInsertedWaterBlock(NewIsland);
+
+ // Decrement the old entry, and remove it if refcount becomes 0.
+ decrementCPEReferenceCount(CPI, CPEMI);
+
+ // Now that we have an island to add the CPE to, clone the original CPE and
+ // add it to the island.
+ U.HighWaterMark = NewIsland;
+ U.CPEMI = BuildMI(NewIsland, DebugLoc(), TII->get(Mips::CONSTPOOL_ENTRY))
+ .addImm(ID).addConstantPoolIndex(CPI).addImm(Size);
+ CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
+ ++NumCPEs;
+
+ // Mark the basic block as aligned as required by the const-pool entry.
+ NewIsland->setAlignment(getCPELogAlign(U.CPEMI));
+
+ // Increase the size of the island block to account for the new entry.
+ BBInfo[NewIsland->getNumber()].Size += Size;
+ adjustBBOffsetsAfter(llvm::prior(MachineFunction::iterator(NewIsland)));
+
+ // No existing clone of this CPE is within range.
+ // We will be generating a new clone. Get a UID for it.
+ unsigned ID = createPICLabelUId();
+
+ // Finally, change the CPI in the instruction operand to be ID.
+ for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i)
+ if (UserMI->getOperand(i).isCPI()) {
+ UserMI->getOperand(i).setIndex(ID);
+ break;
+ }
+
+ DEBUG(dbgs() << " Moved CPE to #" << ID << " CPI=" << CPI
+ << format(" offset=%#x\n", BBInfo[NewIsland->getNumber()].Offset));
+
+ return true;
+}
+
+/// removeDeadCPEMI - Remove a dead constant pool entry instruction. Update
+/// sizes and offsets of impacted basic blocks.
+void MipsConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
+ MachineBasicBlock *CPEBB = CPEMI->getParent();
+ unsigned Size = CPEMI->getOperand(2).getImm();
+ CPEMI->eraseFromParent();
+ BBInfo[CPEBB->getNumber()].Size -= Size;
+ // All succeeding offsets have the current size value added in, fix this.
+ if (CPEBB->empty()) {
+ BBInfo[CPEBB->getNumber()].Size = 0;
+
+ // This block no longer needs to be aligned.
+ CPEBB->setAlignment(0);
+ } else
+ // Entries are sorted by descending alignment, so realign from the front.
+ CPEBB->setAlignment(getCPELogAlign(CPEBB->begin()));
+
+ adjustBBOffsetsAfter(CPEBB);
+ // An island has only one predecessor BB and one successor BB. Check if
+ // this BB's predecessor jumps directly to this BB's successor. This
+ // shouldn't happen currently.
+ assert(!BBIsJumpedOver(CPEBB) && "How did this happen?");
+ // FIXME: remove the empty blocks after all the work is done?
+}
+
+/// removeUnusedCPEntries - Remove constant pool entries whose refcounts
+/// are zero.
+bool MipsConstantIslands::removeUnusedCPEntries() {
+ unsigned MadeChange = false;
+ for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
+ std::vector<CPEntry> &CPEs = CPEntries[i];
+ for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) {
+ if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) {
+ removeDeadCPEMI(CPEs[j].CPEMI);
+ CPEs[j].CPEMI = NULL;
+ MadeChange = true;
+ }
+ }
+ }
+ return MadeChange;
+}
+
+/// isBBInRange - Returns true if the distance between specific MI and
+/// specific BB can fit in MI's displacement field.
+bool MipsConstantIslands::isBBInRange
+ (MachineInstr *MI,MachineBasicBlock *DestBB, unsigned MaxDisp) {
+
+unsigned PCAdj = 4;
+
+ unsigned BrOffset = getOffsetOf(MI) + PCAdj;
+ unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
+
+ DEBUG(dbgs() << "Branch of destination BB#" << DestBB->getNumber()
+ << " from BB#" << MI->getParent()->getNumber()
+ << " max delta=" << MaxDisp
+ << " from " << getOffsetOf(MI) << " to " << DestOffset
+ << " offset " << int(DestOffset-BrOffset) << "\t" << *MI);
+
+ if (BrOffset <= DestOffset) {
+ // Branch before the Dest.
+ if (DestOffset-BrOffset <= MaxDisp)
+ return true;
+ } else {
+ if (BrOffset-DestOffset <= MaxDisp)
+ return true;
+ }
return false;
}
+/// fixupImmediateBr - Fix up an immediate branch whose destination is too far
+/// away to fit in its displacement field.
+bool MipsConstantIslands::fixupImmediateBr(ImmBranch &Br) {
+ MachineInstr *MI = Br.MI;
+ MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
+
+ // Check to see if the DestBB is already in-range.
+ if (isBBInRange(MI, DestBB, Br.MaxDisp))
+ return false;
+
+ if (!Br.isCond)
+ return fixupUnconditionalBr(Br);
+ return fixupConditionalBr(Br);
+}
+
+/// fixupUnconditionalBr - Fix up an unconditional branch whose destination is
+/// too far away to fit in its displacement field. If the LR register has been
+/// spilled in the epilogue, then we can use BL to implement a far jump.
+/// Otherwise, add an intermediate branch instruction to a branch.
+bool
+MipsConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
+ MachineInstr *MI = Br.MI;
+ MachineBasicBlock *MBB = MI->getParent();
+ // Use BL to implement far jump.
+ Br.MaxDisp = ((1 << 16)-1) * 2;
+ MI->setDesc(TII->get(Mips::BimmX16));
+ BBInfo[MBB->getNumber()].Size += 2;
+ adjustBBOffsetsAfter(MBB);
+ HasFarJump = true;
+ ++NumUBrFixed;
+
+ DEBUG(dbgs() << " Changed B to long jump " << *MI);
+
+ return true;
+}
+
+/// fixupConditionalBr - Fix up a conditional branch whose destination is too
+/// far away to fit in its displacement field. It is converted to an inverse
+/// conditional branch + an unconditional branch to the destination.
+bool
+MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) {
+ MachineInstr *MI = Br.MI;
+ MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
+
+ // Add an unconditional branch to the destination and invert the branch
+ // condition to jump over it:
+ // blt L1
+ // =>
+ // bge L2
+ // b L1
+ // L2:
+ unsigned CCReg = 0; // FIXME
+ unsigned CC=0; //FIXME
+
+ // If the branch is at the end of its MBB and that has a fall-through block,
+ // direct the updated conditional branch to the fall-through block. Otherwise,
+ // split the MBB before the next instruction.
+ MachineBasicBlock *MBB = MI->getParent();
+ MachineInstr *BMI = &MBB->back();
+ bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
+
+ ++NumCBrFixed;
+ if (BMI != MI) {
+ if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) &&
+ BMI->getOpcode() == Br.UncondBr) {
+ // Last MI in the BB is an unconditional branch. Can we simply invert the
+ // condition and swap destinations:
+ // beq L1
+ // b L2
+ // =>
+ // bne L2
+ // b L1
+ MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
+ if (isBBInRange(MI, NewDest, Br.MaxDisp)) {
+ DEBUG(dbgs() << " Invert Bcc condition and swap its destination with "
+ << *BMI);
+ BMI->getOperand(0).setMBB(DestBB);
+ MI->getOperand(0).setMBB(NewDest);
+ return true;
+ }
+ }
+ }
+
+ if (NeedSplit) {
+ splitBlockBeforeInstr(MI);
+ // No need for the branch to the next block. We're adding an unconditional
+ // branch to the destination.
+ int delta = TII->GetInstSizeInBytes(&MBB->back());
+ BBInfo[MBB->getNumber()].Size -= delta;
+ MBB->back().eraseFromParent();
+ // BBInfo[SplitBB].Offset is wrong temporarily, fixed below
+ }
+ MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB));
+
+ DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber()
+ << " also invert condition and change dest. to BB#"
+ << NextBB->getNumber() << "\n");
+
+ // Insert a new conditional branch and a new unconditional branch.
+ // Also update the ImmBranch as well as adding a new entry for the new branch.
+ BuildMI(MBB, DebugLoc(), TII->get(MI->getOpcode()))
+ .addMBB(NextBB).addImm(CC).addReg(CCReg);
+ Br.MI = &MBB->back();
+ BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
+ BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
+ BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
+ unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
+ ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
+
+ // Remove the old conditional branch. It may or may not still be in MBB.
+ BBInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(MI);
+ MI->eraseFromParent();
+ adjustBBOffsetsAfter(MBB);
+ return true;
+}
+
+
+void MipsConstantIslands::prescanForConstants() {
+ unsigned J = 0;
+ (void)J;
+ PrescannedForConstants = true;
+ for (MachineFunction::iterator B =
+ MF->begin(), E = MF->end(); B != E; ++B) {
+ for (MachineBasicBlock::instr_iterator I =
+ B->instr_begin(), EB = B->instr_end(); I != EB; ++I) {
+ switch(I->getDesc().getOpcode()) {
+ case Mips::LwConstant32: {
+ DEBUG(dbgs() << "constant island constant " << *I << "\n");
+ J = I->getNumOperands();
+ DEBUG(dbgs() << "num operands " << J << "\n");
+ MachineOperand& Literal = I->getOperand(1);
+ if (Literal.isImm()) {
+ int64_t V = Literal.getImm();
+ DEBUG(dbgs() << "literal " << V << "\n");
+ Type *Int32Ty =
+ Type::getInt32Ty(MF->getFunction()->getContext());
+ const Constant *C = ConstantInt::get(Int32Ty, V);
+ unsigned index = MCP->getConstantPoolIndex(C, 4);
+ I->getOperand(2).ChangeToImmediate(index);
+ DEBUG(dbgs() << "constant island constant " << *I << "\n");
+ I->setDesc(TII->get(Mips::LwRxPcTcp16));
+ I->RemoveOperand(1);
+ I->RemoveOperand(1);
+ I->addOperand(MachineOperand::CreateCPI(index, 0));
+ I->addOperand(MachineOperand::CreateImm(4));
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ }
+}
+
diff --git a/lib/Target/Mips/MipsDSPInstrInfo.td b/lib/Target/Mips/MipsDSPInstrInfo.td
index c12878a95209..d26838404451 100644
--- a/lib/Target/Mips/MipsDSPInstrInfo.td
+++ b/lib/Target/Mips/MipsDSPInstrInfo.td
@@ -256,236 +256,235 @@ class PREPEND_ENC : APPEND_FMT<0b00001>;
// Instruction desc.
class ADDU_QB_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- InstrItinClass itin, RegisterClass RCD,
- RegisterClass RCS, RegisterClass RCT = RCS> {
- dag OutOperandList = (outs RCD:$rd);
- dag InOperandList = (ins RCS:$rs, RCT:$rt);
+ InstrItinClass itin, RegisterOperand ROD,
+ RegisterOperand ROS, RegisterOperand ROT = ROS> {
+ dag OutOperandList = (outs ROD:$rd);
+ dag InOperandList = (ins ROS:$rs, ROT:$rt);
string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt");
- list<dag> Pattern = [(set RCD:$rd, (OpNode RCS:$rs, RCT:$rt))];
+ list<dag> Pattern = [(set ROD:$rd, (OpNode ROS:$rs, ROT:$rt))];
InstrItinClass Itinerary = itin;
}
class RADDU_W_QB_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- InstrItinClass itin, RegisterClass RCD,
- RegisterClass RCS = RCD> {
- dag OutOperandList = (outs RCD:$rd);
- dag InOperandList = (ins RCS:$rs);
+ InstrItinClass itin, RegisterOperand ROD,
+ RegisterOperand ROS = ROD> {
+ dag OutOperandList = (outs ROD:$rd);
+ dag InOperandList = (ins ROS:$rs);
string AsmString = !strconcat(instr_asm, "\t$rd, $rs");
- list<dag> Pattern = [(set RCD:$rd, (OpNode RCS:$rs))];
+ list<dag> Pattern = [(set ROD:$rd, (OpNode ROS:$rs))];
InstrItinClass Itinerary = itin;
}
class CMP_EQ_QB_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- InstrItinClass itin, RegisterClass RCS,
- RegisterClass RCT = RCS> {
+ InstrItinClass itin, RegisterOperand ROS,
+ RegisterOperand ROT = ROS> {
dag OutOperandList = (outs);
- dag InOperandList = (ins RCS:$rs, RCT:$rt);
+ dag InOperandList = (ins ROS:$rs, ROT:$rt);
string AsmString = !strconcat(instr_asm, "\t$rs, $rt");
- list<dag> Pattern = [(OpNode RCS:$rs, RCT:$rt)];
+ list<dag> Pattern = [(OpNode ROS:$rs, ROT:$rt)];
InstrItinClass Itinerary = itin;
}
class CMP_EQ_QB_R3_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- InstrItinClass itin, RegisterClass RCD,
- RegisterClass RCS, RegisterClass RCT = RCS> {
- dag OutOperandList = (outs RCD:$rd);
- dag InOperandList = (ins RCS:$rs, RCT:$rt);
+ InstrItinClass itin, RegisterOperand ROD,
+ RegisterOperand ROS, RegisterOperand ROT = ROS> {
+ dag OutOperandList = (outs ROD:$rd);
+ dag InOperandList = (ins ROS:$rs, ROT:$rt);
string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt");
- list<dag> Pattern = [(set RCD:$rd, (OpNode RCS:$rs, RCT:$rt))];
+ list<dag> Pattern = [(set ROD:$rd, (OpNode ROS:$rs, ROT:$rt))];
InstrItinClass Itinerary = itin;
}
class PRECR_SRA_PH_W_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- InstrItinClass itin, RegisterClass RCT,
- RegisterClass RCS = RCT> {
- dag OutOperandList = (outs RCT:$rt);
- dag InOperandList = (ins RCS:$rs, shamt:$sa, RCS:$src);
+ InstrItinClass itin, RegisterOperand ROT,
+ RegisterOperand ROS = ROT> {
+ dag OutOperandList = (outs ROT:$rt);
+ dag InOperandList = (ins ROS:$rs, uimm5:$sa, ROS:$src);
string AsmString = !strconcat(instr_asm, "\t$rt, $rs, $sa");
- list<dag> Pattern = [(set RCT:$rt, (OpNode RCS:$src, RCS:$rs, immZExt5:$sa))];
+ list<dag> Pattern = [(set ROT:$rt, (OpNode ROS:$src, ROS:$rs, immZExt5:$sa))];
InstrItinClass Itinerary = itin;
string Constraints = "$src = $rt";
}
class ABSQ_S_PH_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- InstrItinClass itin, RegisterClass RCD,
- RegisterClass RCT = RCD> {
- dag OutOperandList = (outs RCD:$rd);
- dag InOperandList = (ins RCT:$rt);
+ InstrItinClass itin, RegisterOperand ROD,
+ RegisterOperand ROT = ROD> {
+ dag OutOperandList = (outs ROD:$rd);
+ dag InOperandList = (ins ROT:$rt);
string AsmString = !strconcat(instr_asm, "\t$rd, $rt");
- list<dag> Pattern = [(set RCD:$rd, (OpNode RCT:$rt))];
+ list<dag> Pattern = [(set ROD:$rd, (OpNode ROT:$rt))];
InstrItinClass Itinerary = itin;
}
class REPL_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- ImmLeaf immPat, InstrItinClass itin, RegisterClass RC> {
- dag OutOperandList = (outs RC:$rd);
+ ImmLeaf immPat, InstrItinClass itin, RegisterOperand RO> {
+ dag OutOperandList = (outs RO:$rd);
dag InOperandList = (ins uimm16:$imm);
string AsmString = !strconcat(instr_asm, "\t$rd, $imm");
- list<dag> Pattern = [(set RC:$rd, (OpNode immPat:$imm))];
+ list<dag> Pattern = [(set RO:$rd, (OpNode immPat:$imm))];
InstrItinClass Itinerary = itin;
}
class SHLL_QB_R3_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- InstrItinClass itin, RegisterClass RC> {
- dag OutOperandList = (outs RC:$rd);
- dag InOperandList = (ins RC:$rt, CPURegs:$rs_sa);
+ InstrItinClass itin, RegisterOperand RO> {
+ dag OutOperandList = (outs RO:$rd);
+ dag InOperandList = (ins RO:$rt, GPR32Opnd:$rs_sa);
string AsmString = !strconcat(instr_asm, "\t$rd, $rt, $rs_sa");
- list<dag> Pattern = [(set RC:$rd, (OpNode RC:$rt, CPURegs:$rs_sa))];
+ list<dag> Pattern = [(set RO:$rd, (OpNode RO:$rt, GPR32Opnd:$rs_sa))];
InstrItinClass Itinerary = itin;
}
class SHLL_QB_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
SDPatternOperator ImmPat, InstrItinClass itin,
- RegisterClass RC> {
- dag OutOperandList = (outs RC:$rd);
- dag InOperandList = (ins RC:$rt, uimm16:$rs_sa);
+ RegisterOperand RO> {
+ dag OutOperandList = (outs RO:$rd);
+ dag InOperandList = (ins RO:$rt, uimm16:$rs_sa);
string AsmString = !strconcat(instr_asm, "\t$rd, $rt, $rs_sa");
- list<dag> Pattern = [(set RC:$rd, (OpNode RC:$rt, ImmPat:$rs_sa))];
+ list<dag> Pattern = [(set RO:$rd, (OpNode RO:$rt, ImmPat:$rs_sa))];
InstrItinClass Itinerary = itin;
bit hasSideEffects = 1;
}
class LX_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass itin> {
- dag OutOperandList = (outs CPURegs:$rd);
- dag InOperandList = (ins CPURegs:$base, CPURegs:$index);
+ dag OutOperandList = (outs GPR32Opnd:$rd);
+ dag InOperandList = (ins PtrRC:$base, PtrRC:$index);
string AsmString = !strconcat(instr_asm, "\t$rd, ${index}(${base})");
- list<dag> Pattern = [(set CPURegs:$rd,
- (OpNode CPURegs:$base, CPURegs:$index))];
+ list<dag> Pattern = [(set GPR32Opnd:$rd, (OpNode iPTR:$base, iPTR:$index))];
InstrItinClass Itinerary = itin;
bit mayLoad = 1;
}
class ADDUH_QB_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
- InstrItinClass itin, RegisterClass RCD,
- RegisterClass RCS = RCD, RegisterClass RCT = RCD> {
- dag OutOperandList = (outs RCD:$rd);
- dag InOperandList = (ins RCS:$rs, RCT:$rt);
+ InstrItinClass itin, RegisterOperand ROD,
+ RegisterOperand ROS = ROD, RegisterOperand ROT = ROD> {
+ dag OutOperandList = (outs ROD:$rd);
+ dag InOperandList = (ins ROS:$rs, ROT:$rt);
string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt");
- list<dag> Pattern = [(set RCD:$rd, (OpNode RCS:$rs, RCT:$rt))];
+ list<dag> Pattern = [(set ROD:$rd, (OpNode ROS:$rs, ROT:$rt))];
InstrItinClass Itinerary = itin;
}
class APPEND_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
SDPatternOperator ImmOp, InstrItinClass itin> {
- dag OutOperandList = (outs CPURegs:$rt);
- dag InOperandList = (ins CPURegs:$rs, shamt:$sa, CPURegs:$src);
+ dag OutOperandList = (outs GPR32Opnd:$rt);
+ dag InOperandList = (ins GPR32Opnd:$rs, uimm5:$sa, GPR32Opnd:$src);
string AsmString = !strconcat(instr_asm, "\t$rt, $rs, $sa");
- list<dag> Pattern = [(set CPURegs:$rt,
- (OpNode CPURegs:$src, CPURegs:$rs, ImmOp:$sa))];
+ list<dag> Pattern = [(set GPR32Opnd:$rt,
+ (OpNode GPR32Opnd:$src, GPR32Opnd:$rs, ImmOp:$sa))];
InstrItinClass Itinerary = itin;
string Constraints = "$src = $rt";
}
class EXTR_W_TY1_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass itin> {
- dag OutOperandList = (outs CPURegs:$rt);
- dag InOperandList = (ins ACRegsDSP:$ac, CPURegs:$shift_rs);
+ dag OutOperandList = (outs GPR32Opnd:$rt);
+ dag InOperandList = (ins ACC64DSPOpnd:$ac, GPR32Opnd:$shift_rs);
string AsmString = !strconcat(instr_asm, "\t$rt, $ac, $shift_rs");
InstrItinClass Itinerary = itin;
}
class EXTR_W_TY1_R1_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass itin> {
- dag OutOperandList = (outs CPURegs:$rt);
- dag InOperandList = (ins ACRegsDSP:$ac, uimm16:$shift_rs);
+ dag OutOperandList = (outs GPR32Opnd:$rt);
+ dag InOperandList = (ins ACC64DSPOpnd:$ac, uimm16:$shift_rs);
string AsmString = !strconcat(instr_asm, "\t$rt, $ac, $shift_rs");
InstrItinClass Itinerary = itin;
}
class SHILO_R1_DESC_BASE<string instr_asm, SDPatternOperator OpNode> {
- dag OutOperandList = (outs ACRegsDSP:$ac);
- dag InOperandList = (ins simm16:$shift, ACRegsDSP:$acin);
+ dag OutOperandList = (outs ACC64DSPOpnd:$ac);
+ dag InOperandList = (ins simm16:$shift, ACC64DSPOpnd:$acin);
string AsmString = !strconcat(instr_asm, "\t$ac, $shift");
- list<dag> Pattern = [(set ACRegsDSP:$ac,
- (OpNode immSExt6:$shift, ACRegsDSP:$acin))];
+ list<dag> Pattern = [(set ACC64DSPOpnd:$ac,
+ (OpNode immSExt6:$shift, ACC64DSPOpnd:$acin))];
string Constraints = "$acin = $ac";
}
class SHILO_R2_DESC_BASE<string instr_asm, SDPatternOperator OpNode> {
- dag OutOperandList = (outs ACRegsDSP:$ac);
- dag InOperandList = (ins CPURegs:$rs, ACRegsDSP:$acin);
+ dag OutOperandList = (outs ACC64DSPOpnd:$ac);
+ dag InOperandList = (ins GPR32Opnd:$rs, ACC64DSPOpnd:$acin);
string AsmString = !strconcat(instr_asm, "\t$ac, $rs");
- list<dag> Pattern = [(set ACRegsDSP:$ac,
- (OpNode CPURegs:$rs, ACRegsDSP:$acin))];
+ list<dag> Pattern = [(set ACC64DSPOpnd:$ac,
+ (OpNode GPR32Opnd:$rs, ACC64DSPOpnd:$acin))];
string Constraints = "$acin = $ac";
}
class MTHLIP_DESC_BASE<string instr_asm, SDPatternOperator OpNode> {
- dag OutOperandList = (outs ACRegsDSP:$ac);
- dag InOperandList = (ins CPURegs:$rs, ACRegsDSP:$acin);
+ dag OutOperandList = (outs ACC64DSPOpnd:$ac);
+ dag InOperandList = (ins GPR32Opnd:$rs, ACC64DSPOpnd:$acin);
string AsmString = !strconcat(instr_asm, "\t$rs, $ac");
- list<dag> Pattern = [(set ACRegsDSP:$ac,
- (OpNode CPURegs:$rs, ACRegsDSP:$acin))];
+ list<dag> Pattern = [(set ACC64DSPOpnd:$ac,
+ (OpNode GPR32Opnd:$rs, ACC64DSPOpnd:$acin))];
string Constraints = "$acin = $ac";
}
class RDDSP_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass itin> {
- dag OutOperandList = (outs CPURegs:$rd);
+ dag OutOperandList = (outs GPR32Opnd:$rd);
dag InOperandList = (ins uimm16:$mask);
string AsmString = !strconcat(instr_asm, "\t$rd, $mask");
- list<dag> Pattern = [(set CPURegs:$rd, (OpNode immZExt10:$mask))];
+ list<dag> Pattern = [(set GPR32Opnd:$rd, (OpNode immZExt10:$mask))];
InstrItinClass Itinerary = itin;
}
class WRDSP_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass itin> {
dag OutOperandList = (outs);
- dag InOperandList = (ins CPURegs:$rs, uimm16:$mask);
+ dag InOperandList = (ins GPR32Opnd:$rs, uimm16:$mask);
string AsmString = !strconcat(instr_asm, "\t$rs, $mask");
- list<dag> Pattern = [(OpNode CPURegs:$rs, immZExt10:$mask)];
+ list<dag> Pattern = [(OpNode GPR32Opnd:$rs, immZExt10:$mask)];
InstrItinClass Itinerary = itin;
}
class DPA_W_PH_DESC_BASE<string instr_asm, SDPatternOperator OpNode> {
- dag OutOperandList = (outs ACRegsDSP:$ac);
- dag InOperandList = (ins CPURegs:$rs, CPURegs:$rt, ACRegsDSP:$acin);
+ dag OutOperandList = (outs ACC64DSPOpnd:$ac);
+ dag InOperandList = (ins GPR32Opnd:$rs, GPR32Opnd:$rt, ACC64DSPOpnd:$acin);
string AsmString = !strconcat(instr_asm, "\t$ac, $rs, $rt");
- list<dag> Pattern = [(set ACRegsDSP:$ac,
- (OpNode CPURegs:$rs, CPURegs:$rt, ACRegsDSP:$acin))];
+ list<dag> Pattern = [(set ACC64DSPOpnd:$ac,
+ (OpNode GPR32Opnd:$rs, GPR32Opnd:$rt, ACC64DSPOpnd:$acin))];
string Constraints = "$acin = $ac";
}
class MULT_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass itin> {
- dag OutOperandList = (outs ACRegsDSP:$ac);
- dag InOperandList = (ins CPURegs:$rs, CPURegs:$rt);
+ dag OutOperandList = (outs ACC64DSPOpnd:$ac);
+ dag InOperandList = (ins GPR32Opnd:$rs, GPR32Opnd:$rt);
string AsmString = !strconcat(instr_asm, "\t$ac, $rs, $rt");
- list<dag> Pattern = [(set ACRegsDSP:$ac, (OpNode CPURegs:$rs, CPURegs:$rt))];
+ list<dag> Pattern = [(set ACC64DSPOpnd:$ac, (OpNode GPR32Opnd:$rs, GPR32Opnd:$rt))];
InstrItinClass Itinerary = itin;
- int AddedComplexity = 20;
bit isCommutable = 1;
}
class MADD_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass itin> {
- dag OutOperandList = (outs ACRegsDSP:$ac);
- dag InOperandList = (ins CPURegs:$rs, CPURegs:$rt, ACRegsDSP:$acin);
+ dag OutOperandList = (outs ACC64DSPOpnd:$ac);
+ dag InOperandList = (ins GPR32Opnd:$rs, GPR32Opnd:$rt, ACC64DSPOpnd:$acin);
string AsmString = !strconcat(instr_asm, "\t$ac, $rs, $rt");
- list<dag> Pattern = [(set ACRegsDSP:$ac,
- (OpNode CPURegs:$rs, CPURegs:$rt, ACRegsDSP:$acin))];
+ list<dag> Pattern = [(set ACC64DSPOpnd:$ac,
+ (OpNode GPR32Opnd:$rs, GPR32Opnd:$rt, ACC64DSPOpnd:$acin))];
InstrItinClass Itinerary = itin;
- int AddedComplexity = 20;
string Constraints = "$acin = $ac";
}
-class MFHI_DESC_BASE<string instr_asm, RegisterClass RC, InstrItinClass itin> {
- dag OutOperandList = (outs CPURegs:$rd);
- dag InOperandList = (ins RC:$ac);
+class MFHI_DESC_BASE<string instr_asm, RegisterOperand RO, SDNode OpNode,
+ InstrItinClass itin> {
+ dag OutOperandList = (outs GPR32Opnd:$rd);
+ dag InOperandList = (ins RO:$ac);
string AsmString = !strconcat(instr_asm, "\t$rd, $ac");
+ list<dag> Pattern = [(set GPR32Opnd:$rd, (OpNode RO:$ac))];
InstrItinClass Itinerary = itin;
}
-class MTHI_DESC_BASE<string instr_asm, RegisterClass RC, InstrItinClass itin> {
- dag OutOperandList = (outs RC:$ac);
- dag InOperandList = (ins CPURegs:$rs);
+class MTHI_DESC_BASE<string instr_asm, RegisterOperand RO, InstrItinClass itin> {
+ dag OutOperandList = (outs RO:$ac);
+ dag InOperandList = (ins GPR32Opnd:$rs);
string AsmString = !strconcat(instr_asm, "\t$rs, $ac");
InstrItinClass Itinerary = itin;
}
class BPOSGE32_PSEUDO_DESC_BASE<SDPatternOperator OpNode, InstrItinClass itin> :
- MipsPseudo<(outs CPURegs:$dst), (ins), [(set CPURegs:$dst, (OpNode))]> {
+ MipsPseudo<(outs GPR32Opnd:$dst), (ins), [(set GPR32Opnd:$dst, (OpNode))]> {
bit usesCustomInserter = 1;
}
@@ -501,10 +500,10 @@ class BPOSGE32_DESC_BASE<string instr_asm, InstrItinClass itin> {
class INSV_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
InstrItinClass itin> {
- dag OutOperandList = (outs CPURegs:$rt);
- dag InOperandList = (ins CPURegs:$src, CPURegs:$rs);
+ dag OutOperandList = (outs GPR32Opnd:$rt);
+ dag InOperandList = (ins GPR32Opnd:$src, GPR32Opnd:$rs);
string AsmString = !strconcat(instr_asm, "\t$rt, $rs");
- list<dag> Pattern = [(set CPURegs:$rt, (OpNode CPURegs:$src, CPURegs:$rs))];
+ list<dag> Pattern = [(set GPR32Opnd:$rt, (OpNode GPR32Opnd:$src, GPR32Opnd:$rs))];
InstrItinClass Itinerary = itin;
string Constraints = "$src = $rt";
}
@@ -515,209 +514,209 @@ class INSV_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
// Addition/subtraction
class ADDU_QB_DESC : ADDU_QB_DESC_BASE<"addu.qb", null_frag, NoItinerary,
- DSPRegs, DSPRegs>, IsCommutable,
+ DSPROpnd, DSPROpnd>, IsCommutable,
Defs<[DSPOutFlag20]>;
class ADDU_S_QB_DESC : ADDU_QB_DESC_BASE<"addu_s.qb", int_mips_addu_s_qb,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
IsCommutable, Defs<[DSPOutFlag20]>;
class SUBU_QB_DESC : ADDU_QB_DESC_BASE<"subu.qb", null_frag, NoItinerary,
- DSPRegs, DSPRegs>,
+ DSPROpnd, DSPROpnd>,
Defs<[DSPOutFlag20]>;
class SUBU_S_QB_DESC : ADDU_QB_DESC_BASE<"subu_s.qb", int_mips_subu_s_qb,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
Defs<[DSPOutFlag20]>;
class ADDQ_PH_DESC : ADDU_QB_DESC_BASE<"addq.ph", null_frag, NoItinerary,
- DSPRegs, DSPRegs>, IsCommutable,
+ DSPROpnd, DSPROpnd>, IsCommutable,
Defs<[DSPOutFlag20]>;
class ADDQ_S_PH_DESC : ADDU_QB_DESC_BASE<"addq_s.ph", int_mips_addq_s_ph,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
IsCommutable, Defs<[DSPOutFlag20]>;
class SUBQ_PH_DESC : ADDU_QB_DESC_BASE<"subq.ph", null_frag, NoItinerary,
- DSPRegs, DSPRegs>,
+ DSPROpnd, DSPROpnd>,
Defs<[DSPOutFlag20]>;
class SUBQ_S_PH_DESC : ADDU_QB_DESC_BASE<"subq_s.ph", int_mips_subq_s_ph,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
Defs<[DSPOutFlag20]>;
class ADDQ_S_W_DESC : ADDU_QB_DESC_BASE<"addq_s.w", int_mips_addq_s_w,
- NoItinerary, CPURegs, CPURegs>,
+ NoItinerary, GPR32Opnd, GPR32Opnd>,
IsCommutable, Defs<[DSPOutFlag20]>;
class SUBQ_S_W_DESC : ADDU_QB_DESC_BASE<"subq_s.w", int_mips_subq_s_w,
- NoItinerary, CPURegs, CPURegs>,
+ NoItinerary, GPR32Opnd, GPR32Opnd>,
Defs<[DSPOutFlag20]>;
class ADDSC_DESC : ADDU_QB_DESC_BASE<"addsc", null_frag, NoItinerary,
- CPURegs, CPURegs>, IsCommutable,
+ GPR32Opnd, GPR32Opnd>, IsCommutable,
Defs<[DSPCarry]>;
class ADDWC_DESC : ADDU_QB_DESC_BASE<"addwc", null_frag, NoItinerary,
- CPURegs, CPURegs>,
+ GPR32Opnd, GPR32Opnd>,
IsCommutable, Uses<[DSPCarry]>, Defs<[DSPOutFlag20]>;
class MODSUB_DESC : ADDU_QB_DESC_BASE<"modsub", int_mips_modsub, NoItinerary,
- CPURegs, CPURegs>;
+ GPR32Opnd, GPR32Opnd>;
class RADDU_W_QB_DESC : RADDU_W_QB_DESC_BASE<"raddu.w.qb", int_mips_raddu_w_qb,
- NoItinerary, CPURegs, DSPRegs>;
+ NoItinerary, GPR32Opnd, DSPROpnd>;
// Absolute value
class ABSQ_S_PH_DESC : ABSQ_S_PH_R2_DESC_BASE<"absq_s.ph", int_mips_absq_s_ph,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
Defs<[DSPOutFlag20]>;
class ABSQ_S_W_DESC : ABSQ_S_PH_R2_DESC_BASE<"absq_s.w", int_mips_absq_s_w,
- NoItinerary, CPURegs>,
+ NoItinerary, GPR32Opnd>,
Defs<[DSPOutFlag20]>;
// Precision reduce/expand
class PRECRQ_QB_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrq.qb.ph",
int_mips_precrq_qb_ph,
- NoItinerary, DSPRegs, DSPRegs>;
+ NoItinerary, DSPROpnd, DSPROpnd>;
class PRECRQ_PH_W_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrq.ph.w",
int_mips_precrq_ph_w,
- NoItinerary, DSPRegs, CPURegs>;
+ NoItinerary, DSPROpnd, GPR32Opnd>;
class PRECRQ_RS_PH_W_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrq_rs.ph.w",
int_mips_precrq_rs_ph_w,
- NoItinerary, DSPRegs,
- CPURegs>,
+ NoItinerary, DSPROpnd,
+ GPR32Opnd>,
Defs<[DSPOutFlag22]>;
class PRECRQU_S_QB_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrqu_s.qb.ph",
int_mips_precrqu_s_qb_ph,
- NoItinerary, DSPRegs,
- DSPRegs>,
+ NoItinerary, DSPROpnd,
+ DSPROpnd>,
Defs<[DSPOutFlag22]>;
class PRECEQ_W_PHL_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceq.w.phl",
int_mips_preceq_w_phl,
- NoItinerary, CPURegs, DSPRegs>;
+ NoItinerary, GPR32Opnd, DSPROpnd>;
class PRECEQ_W_PHR_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceq.w.phr",
int_mips_preceq_w_phr,
- NoItinerary, CPURegs, DSPRegs>;
+ NoItinerary, GPR32Opnd, DSPROpnd>;
class PRECEQU_PH_QBL_DESC : ABSQ_S_PH_R2_DESC_BASE<"precequ.ph.qbl",
int_mips_precequ_ph_qbl,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class PRECEQU_PH_QBR_DESC : ABSQ_S_PH_R2_DESC_BASE<"precequ.ph.qbr",
int_mips_precequ_ph_qbr,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class PRECEQU_PH_QBLA_DESC : ABSQ_S_PH_R2_DESC_BASE<"precequ.ph.qbla",
int_mips_precequ_ph_qbla,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class PRECEQU_PH_QBRA_DESC : ABSQ_S_PH_R2_DESC_BASE<"precequ.ph.qbra",
int_mips_precequ_ph_qbra,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class PRECEU_PH_QBL_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceu.ph.qbl",
int_mips_preceu_ph_qbl,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class PRECEU_PH_QBR_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceu.ph.qbr",
int_mips_preceu_ph_qbr,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class PRECEU_PH_QBLA_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceu.ph.qbla",
int_mips_preceu_ph_qbla,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class PRECEU_PH_QBRA_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceu.ph.qbra",
int_mips_preceu_ph_qbra,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
// Shift
class SHLL_QB_DESC : SHLL_QB_R2_DESC_BASE<"shll.qb", null_frag, immZExt3,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
Defs<[DSPOutFlag22]>;
class SHLLV_QB_DESC : SHLL_QB_R3_DESC_BASE<"shllv.qb", int_mips_shll_qb,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
Defs<[DSPOutFlag22]>;
class SHRL_QB_DESC : SHLL_QB_R2_DESC_BASE<"shrl.qb", null_frag, immZExt3,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SHRLV_QB_DESC : SHLL_QB_R3_DESC_BASE<"shrlv.qb", int_mips_shrl_qb,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SHLL_PH_DESC : SHLL_QB_R2_DESC_BASE<"shll.ph", null_frag, immZExt4,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
Defs<[DSPOutFlag22]>;
class SHLLV_PH_DESC : SHLL_QB_R3_DESC_BASE<"shllv.ph", int_mips_shll_ph,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
Defs<[DSPOutFlag22]>;
class SHLL_S_PH_DESC : SHLL_QB_R2_DESC_BASE<"shll_s.ph", int_mips_shll_s_ph,
- immZExt4, NoItinerary, DSPRegs>,
+ immZExt4, NoItinerary, DSPROpnd>,
Defs<[DSPOutFlag22]>;
class SHLLV_S_PH_DESC : SHLL_QB_R3_DESC_BASE<"shllv_s.ph", int_mips_shll_s_ph,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
Defs<[DSPOutFlag22]>;
class SHRA_PH_DESC : SHLL_QB_R2_DESC_BASE<"shra.ph", null_frag, immZExt4,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SHRAV_PH_DESC : SHLL_QB_R3_DESC_BASE<"shrav.ph", int_mips_shra_ph,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SHRA_R_PH_DESC : SHLL_QB_R2_DESC_BASE<"shra_r.ph", int_mips_shra_r_ph,
- immZExt4, NoItinerary, DSPRegs>;
+ immZExt4, NoItinerary, DSPROpnd>;
class SHRAV_R_PH_DESC : SHLL_QB_R3_DESC_BASE<"shrav_r.ph", int_mips_shra_r_ph,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SHLL_S_W_DESC : SHLL_QB_R2_DESC_BASE<"shll_s.w", int_mips_shll_s_w,
- immZExt5, NoItinerary, CPURegs>,
+ immZExt5, NoItinerary, GPR32Opnd>,
Defs<[DSPOutFlag22]>;
class SHLLV_S_W_DESC : SHLL_QB_R3_DESC_BASE<"shllv_s.w", int_mips_shll_s_w,
- NoItinerary, CPURegs>,
+ NoItinerary, GPR32Opnd>,
Defs<[DSPOutFlag22]>;
class SHRA_R_W_DESC : SHLL_QB_R2_DESC_BASE<"shra_r.w", int_mips_shra_r_w,
- immZExt5, NoItinerary, CPURegs>;
+ immZExt5, NoItinerary, GPR32Opnd>;
class SHRAV_R_W_DESC : SHLL_QB_R3_DESC_BASE<"shrav_r.w", int_mips_shra_r_w,
- NoItinerary, CPURegs>;
+ NoItinerary, GPR32Opnd>;
// Multiplication
class MULEU_S_PH_QBL_DESC : ADDU_QB_DESC_BASE<"muleu_s.ph.qbl",
int_mips_muleu_s_ph_qbl,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
Defs<[DSPOutFlag21]>;
class MULEU_S_PH_QBR_DESC : ADDU_QB_DESC_BASE<"muleu_s.ph.qbr",
int_mips_muleu_s_ph_qbr,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
Defs<[DSPOutFlag21]>;
class MULEQ_S_W_PHL_DESC : ADDU_QB_DESC_BASE<"muleq_s.w.phl",
int_mips_muleq_s_w_phl,
- NoItinerary, CPURegs, DSPRegs>,
+ NoItinerary, GPR32Opnd, DSPROpnd>,
IsCommutable, Defs<[DSPOutFlag21]>;
class MULEQ_S_W_PHR_DESC : ADDU_QB_DESC_BASE<"muleq_s.w.phr",
int_mips_muleq_s_w_phr,
- NoItinerary, CPURegs, DSPRegs>,
+ NoItinerary, GPR32Opnd, DSPROpnd>,
IsCommutable, Defs<[DSPOutFlag21]>;
class MULQ_RS_PH_DESC : ADDU_QB_DESC_BASE<"mulq_rs.ph", int_mips_mulq_rs_ph,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
IsCommutable, Defs<[DSPOutFlag21]>;
class MULSAQ_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"mulsaq_s.w.ph",
@@ -737,10 +736,10 @@ class MAQ_SA_W_PHR_DESC : DPA_W_PH_DESC_BASE<"maq_sa.w.phr", MipsMAQ_SA_W_PHR>,
Defs<[DSPOutFlag16_19]>;
// Move from/to hi/lo.
-class MFHI_DESC : MFHI_DESC_BASE<"mfhi", HIRegsDSP, NoItinerary>;
-class MFLO_DESC : MFHI_DESC_BASE<"mflo", LORegsDSP, NoItinerary>;
-class MTHI_DESC : MTHI_DESC_BASE<"mthi", HIRegsDSP, NoItinerary>;
-class MTLO_DESC : MTHI_DESC_BASE<"mtlo", LORegsDSP, NoItinerary>;
+class MFHI_DESC : MFHI_DESC_BASE<"mfhi", ACC64DSPOpnd, MipsMFHI, NoItinerary>;
+class MFLO_DESC : MFHI_DESC_BASE<"mflo", ACC64DSPOpnd, MipsMFLO, NoItinerary>;
+class MTHI_DESC : MTHI_DESC_BASE<"mthi", HI32DSPOpnd, NoItinerary>;
+class MTLO_DESC : MTHI_DESC_BASE<"mtlo", LO32DSPOpnd, NoItinerary>;
// Dot product with accumulate/subtract
class DPAU_H_QBL_DESC : DPA_W_PH_DESC_BASE<"dpau.h.qbl", MipsDPAU_H_QBL>;
@@ -773,67 +772,67 @@ class MSUBU_DSP_DESC : MADD_DESC_BASE<"msubu", MipsMSubu, NoItinerary>;
// Comparison
class CMPU_EQ_QB_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmpu.eq.qb",
int_mips_cmpu_eq_qb, NoItinerary,
- DSPRegs>,
+ DSPROpnd>,
IsCommutable, Defs<[DSPCCond]>;
class CMPU_LT_QB_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmpu.lt.qb",
int_mips_cmpu_lt_qb, NoItinerary,
- DSPRegs>, Defs<[DSPCCond]>;
+ DSPROpnd>, Defs<[DSPCCond]>;
class CMPU_LE_QB_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmpu.le.qb",
int_mips_cmpu_le_qb, NoItinerary,
- DSPRegs>, Defs<[DSPCCond]>;
+ DSPROpnd>, Defs<[DSPCCond]>;
class CMPGU_EQ_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgu.eq.qb",
int_mips_cmpgu_eq_qb,
- NoItinerary, CPURegs, DSPRegs>,
+ NoItinerary, GPR32Opnd, DSPROpnd>,
IsCommutable;
class CMPGU_LT_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgu.lt.qb",
int_mips_cmpgu_lt_qb,
- NoItinerary, CPURegs, DSPRegs>;
+ NoItinerary, GPR32Opnd, DSPROpnd>;
class CMPGU_LE_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgu.le.qb",
int_mips_cmpgu_le_qb,
- NoItinerary, CPURegs, DSPRegs>;
+ NoItinerary, GPR32Opnd, DSPROpnd>;
class CMP_EQ_PH_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmp.eq.ph", int_mips_cmp_eq_ph,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
IsCommutable, Defs<[DSPCCond]>;
class CMP_LT_PH_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmp.lt.ph", int_mips_cmp_lt_ph,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
Defs<[DSPCCond]>;
class CMP_LE_PH_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmp.le.ph", int_mips_cmp_le_ph,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
Defs<[DSPCCond]>;
// Misc
class BITREV_DESC : ABSQ_S_PH_R2_DESC_BASE<"bitrev", int_mips_bitrev,
- NoItinerary, CPURegs>;
+ NoItinerary, GPR32Opnd>;
class PACKRL_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"packrl.ph", int_mips_packrl_ph,
- NoItinerary, DSPRegs, DSPRegs>;
+ NoItinerary, DSPROpnd, DSPROpnd>;
class REPL_QB_DESC : REPL_DESC_BASE<"repl.qb", int_mips_repl_qb, immZExt8,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class REPL_PH_DESC : REPL_DESC_BASE<"repl.ph", int_mips_repl_ph, immZExt10,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class REPLV_QB_DESC : ABSQ_S_PH_R2_DESC_BASE<"replv.qb", int_mips_repl_qb,
- NoItinerary, DSPRegs, CPURegs>;
+ NoItinerary, DSPROpnd, GPR32Opnd>;
class REPLV_PH_DESC : ABSQ_S_PH_R2_DESC_BASE<"replv.ph", int_mips_repl_ph,
- NoItinerary, DSPRegs, CPURegs>;
+ NoItinerary, DSPROpnd, GPR32Opnd>;
class PICK_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"pick.qb", int_mips_pick_qb,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
Uses<[DSPCCond]>;
class PICK_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"pick.ph", int_mips_pick_ph,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
Uses<[DSPCCond]>;
class LWX_DESC : LX_DESC_BASE<"lwx", int_mips_lwx, NoItinerary>;
@@ -905,97 +904,97 @@ class INSV_DESC : INSV_DESC_BASE<"insv", int_mips_insv, NoItinerary>,
// MIPS DSP Rev 2
// Addition/subtraction
class ADDU_PH_DESC : ADDU_QB_DESC_BASE<"addu.ph", int_mips_addu_ph, NoItinerary,
- DSPRegs, DSPRegs>, IsCommutable,
+ DSPROpnd, DSPROpnd>, IsCommutable,
Defs<[DSPOutFlag20]>;
class ADDU_S_PH_DESC : ADDU_QB_DESC_BASE<"addu_s.ph", int_mips_addu_s_ph,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
IsCommutable, Defs<[DSPOutFlag20]>;
class SUBU_PH_DESC : ADDU_QB_DESC_BASE<"subu.ph", int_mips_subu_ph, NoItinerary,
- DSPRegs, DSPRegs>,
+ DSPROpnd, DSPROpnd>,
Defs<[DSPOutFlag20]>;
class SUBU_S_PH_DESC : ADDU_QB_DESC_BASE<"subu_s.ph", int_mips_subu_s_ph,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
Defs<[DSPOutFlag20]>;
class ADDUH_QB_DESC : ADDUH_QB_DESC_BASE<"adduh.qb", int_mips_adduh_qb,
- NoItinerary, DSPRegs>, IsCommutable;
+ NoItinerary, DSPROpnd>, IsCommutable;
class ADDUH_R_QB_DESC : ADDUH_QB_DESC_BASE<"adduh_r.qb", int_mips_adduh_r_qb,
- NoItinerary, DSPRegs>, IsCommutable;
+ NoItinerary, DSPROpnd>, IsCommutable;
class SUBUH_QB_DESC : ADDUH_QB_DESC_BASE<"subuh.qb", int_mips_subuh_qb,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SUBUH_R_QB_DESC : ADDUH_QB_DESC_BASE<"subuh_r.qb", int_mips_subuh_r_qb,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class ADDQH_PH_DESC : ADDUH_QB_DESC_BASE<"addqh.ph", int_mips_addqh_ph,
- NoItinerary, DSPRegs>, IsCommutable;
+ NoItinerary, DSPROpnd>, IsCommutable;
class ADDQH_R_PH_DESC : ADDUH_QB_DESC_BASE<"addqh_r.ph", int_mips_addqh_r_ph,
- NoItinerary, DSPRegs>, IsCommutable;
+ NoItinerary, DSPROpnd>, IsCommutable;
class SUBQH_PH_DESC : ADDUH_QB_DESC_BASE<"subqh.ph", int_mips_subqh_ph,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SUBQH_R_PH_DESC : ADDUH_QB_DESC_BASE<"subqh_r.ph", int_mips_subqh_r_ph,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class ADDQH_W_DESC : ADDUH_QB_DESC_BASE<"addqh.w", int_mips_addqh_w,
- NoItinerary, CPURegs>, IsCommutable;
+ NoItinerary, GPR32Opnd>, IsCommutable;
class ADDQH_R_W_DESC : ADDUH_QB_DESC_BASE<"addqh_r.w", int_mips_addqh_r_w,
- NoItinerary, CPURegs>, IsCommutable;
+ NoItinerary, GPR32Opnd>, IsCommutable;
class SUBQH_W_DESC : ADDUH_QB_DESC_BASE<"subqh.w", int_mips_subqh_w,
- NoItinerary, CPURegs>;
+ NoItinerary, GPR32Opnd>;
class SUBQH_R_W_DESC : ADDUH_QB_DESC_BASE<"subqh_r.w", int_mips_subqh_r_w,
- NoItinerary, CPURegs>;
+ NoItinerary, GPR32Opnd>;
// Comparison
class CMPGDU_EQ_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgdu.eq.qb",
int_mips_cmpgdu_eq_qb,
- NoItinerary, CPURegs, DSPRegs>,
+ NoItinerary, GPR32Opnd, DSPROpnd>,
IsCommutable, Defs<[DSPCCond]>;
class CMPGDU_LT_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgdu.lt.qb",
int_mips_cmpgdu_lt_qb,
- NoItinerary, CPURegs, DSPRegs>,
+ NoItinerary, GPR32Opnd, DSPROpnd>,
Defs<[DSPCCond]>;
class CMPGDU_LE_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgdu.le.qb",
int_mips_cmpgdu_le_qb,
- NoItinerary, CPURegs, DSPRegs>,
+ NoItinerary, GPR32Opnd, DSPROpnd>,
Defs<[DSPCCond]>;
// Absolute
class ABSQ_S_QB_DESC : ABSQ_S_PH_R2_DESC_BASE<"absq_s.qb", int_mips_absq_s_qb,
- NoItinerary, DSPRegs>,
+ NoItinerary, DSPROpnd>,
Defs<[DSPOutFlag20]>;
// Multiplication
class MUL_PH_DESC : ADDUH_QB_DESC_BASE<"mul.ph", null_frag, NoItinerary,
- DSPRegs>, IsCommutable,
+ DSPROpnd>, IsCommutable,
Defs<[DSPOutFlag21]>;
class MUL_S_PH_DESC : ADDUH_QB_DESC_BASE<"mul_s.ph", int_mips_mul_s_ph,
- NoItinerary, DSPRegs>, IsCommutable,
+ NoItinerary, DSPROpnd>, IsCommutable,
Defs<[DSPOutFlag21]>;
class MULQ_S_W_DESC : ADDUH_QB_DESC_BASE<"mulq_s.w", int_mips_mulq_s_w,
- NoItinerary, CPURegs>, IsCommutable,
+ NoItinerary, GPR32Opnd>, IsCommutable,
Defs<[DSPOutFlag21]>;
class MULQ_RS_W_DESC : ADDUH_QB_DESC_BASE<"mulq_rs.w", int_mips_mulq_rs_w,
- NoItinerary, CPURegs>, IsCommutable,
+ NoItinerary, GPR32Opnd>, IsCommutable,
Defs<[DSPOutFlag21]>;
class MULQ_S_PH_DESC : ADDU_QB_DESC_BASE<"mulq_s.ph", int_mips_mulq_s_ph,
- NoItinerary, DSPRegs, DSPRegs>,
+ NoItinerary, DSPROpnd, DSPROpnd>,
IsCommutable, Defs<[DSPOutFlag21]>;
// Dot product with accumulate/subtract
@@ -1026,36 +1025,36 @@ class MULSA_W_PH_DESC : DPA_W_PH_DESC_BASE<"mulsa.w.ph", MipsMULSA_W_PH>;
// Precision reduce/expand
class PRECR_QB_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"precr.qb.ph",
int_mips_precr_qb_ph,
- NoItinerary, DSPRegs, DSPRegs>;
+ NoItinerary, DSPROpnd, DSPROpnd>;
class PRECR_SRA_PH_W_DESC : PRECR_SRA_PH_W_DESC_BASE<"precr_sra.ph.w",
int_mips_precr_sra_ph_w,
- NoItinerary, DSPRegs,
- CPURegs>;
+ NoItinerary, DSPROpnd,
+ GPR32Opnd>;
class PRECR_SRA_R_PH_W_DESC : PRECR_SRA_PH_W_DESC_BASE<"precr_sra_r.ph.w",
int_mips_precr_sra_r_ph_w,
- NoItinerary, DSPRegs,
- CPURegs>;
+ NoItinerary, DSPROpnd,
+ GPR32Opnd>;
// Shift
class SHRA_QB_DESC : SHLL_QB_R2_DESC_BASE<"shra.qb", null_frag, immZExt3,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SHRAV_QB_DESC : SHLL_QB_R3_DESC_BASE<"shrav.qb", int_mips_shra_qb,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SHRA_R_QB_DESC : SHLL_QB_R2_DESC_BASE<"shra_r.qb", int_mips_shra_r_qb,
- immZExt3, NoItinerary, DSPRegs>;
+ immZExt3, NoItinerary, DSPROpnd>;
class SHRAV_R_QB_DESC : SHLL_QB_R3_DESC_BASE<"shrav_r.qb", int_mips_shra_r_qb,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SHRL_PH_DESC : SHLL_QB_R2_DESC_BASE<"shrl.ph", null_frag, immZExt4,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
class SHRLV_PH_DESC : SHLL_QB_R3_DESC_BASE<"shrlv.ph", int_mips_shrl_ph,
- NoItinerary, DSPRegs>;
+ NoItinerary, DSPROpnd>;
// Misc
class APPEND_DESC : APPEND_DESC_BASE<"append", int_mips_append, immZExt5,
@@ -1240,24 +1239,24 @@ def PREPEND : PREPEND_ENC, PREPEND_DESC;
}
// Pseudos.
-let isPseudo = 1 in {
+let isPseudo = 1, isCodeGenOnly = 1 in {
// Pseudo instructions for loading and storing accumulator registers.
- defm LOAD_AC_DSP : LoadM<"load_ac_dsp", ACRegsDSP>;
- defm STORE_AC_DSP : StoreM<"store_ac_dsp", ACRegsDSP>;
+ def LOAD_ACC64DSP : Load<"", ACC64DSPOpnd>;
+ def STORE_ACC64DSP : Store<"", ACC64DSPOpnd>;
// Pseudos for loading and storing ccond field of DSP control register.
- defm LOAD_CCOND_DSP : LoadM<"load_ccond_dsp", DSPCC>;
- defm STORE_CCOND_DSP : StoreM<"store_ccond_dsp", DSPCC>;
+ def LOAD_CCOND_DSP : Load<"load_ccond_dsp", DSPCC>;
+ def STORE_CCOND_DSP : Store<"store_ccond_dsp", DSPCC>;
}
// Pseudo CMP and PICK instructions.
class PseudoCMP<Instruction RealInst> :
- PseudoDSP<(outs DSPCC:$cmp), (ins DSPRegs:$rs, DSPRegs:$rt), []>,
- PseudoInstExpansion<(RealInst DSPRegs:$rs, DSPRegs:$rt)>, NeverHasSideEffects;
+ PseudoDSP<(outs DSPCC:$cmp), (ins DSPROpnd:$rs, DSPROpnd:$rt), []>,
+ PseudoInstExpansion<(RealInst DSPROpnd:$rs, DSPROpnd:$rt)>, NeverHasSideEffects;
class PseudoPICK<Instruction RealInst> :
- PseudoDSP<(outs DSPRegs:$rd), (ins DSPCC:$cmp, DSPRegs:$rs, DSPRegs:$rt), []>,
- PseudoInstExpansion<(RealInst DSPRegs:$rd, DSPRegs:$rs, DSPRegs:$rt)>,
+ PseudoDSP<(outs DSPROpnd:$rd), (ins DSPCC:$cmp, DSPROpnd:$rs, DSPROpnd:$rt), []>,
+ PseudoInstExpansion<(RealInst DSPROpnd:$rd, DSPROpnd:$rs, DSPROpnd:$rt)>,
NeverHasSideEffects;
def PseudoCMP_EQ_PH : PseudoCMP<CMP_EQ_PH>;
@@ -1270,6 +1269,8 @@ def PseudoCMPU_LE_QB : PseudoCMP<CMPU_LE_QB>;
def PseudoPICK_PH : PseudoPICK<PICK_PH>;
def PseudoPICK_QB : PseudoPICK<PICK_QB>;
+def PseudoMTLOHI_DSP : PseudoMTLOHI<ACC64DSP, GPR32>;
+
// Patterns.
class DSPPat<dag pattern, dag result, Predicate pred = HasDSP> :
Pat<pattern, result>, Requires<[pred]>;
@@ -1279,19 +1280,19 @@ class BitconvertPat<ValueType DstVT, ValueType SrcVT, RegisterClass DstRC,
DSPPat<(DstVT (bitconvert (SrcVT SrcRC:$src))),
(COPY_TO_REGCLASS SrcRC:$src, DstRC)>;
-def : BitconvertPat<i32, v2i16, CPURegs, DSPRegs>;
-def : BitconvertPat<i32, v4i8, CPURegs, DSPRegs>;
-def : BitconvertPat<v2i16, i32, DSPRegs, CPURegs>;
-def : BitconvertPat<v4i8, i32, DSPRegs, CPURegs>;
+def : BitconvertPat<i32, v2i16, GPR32, DSPR>;
+def : BitconvertPat<i32, v4i8, GPR32, DSPR>;
+def : BitconvertPat<v2i16, i32, DSPR, GPR32>;
+def : BitconvertPat<v4i8, i32, DSPR, GPR32>;
def : DSPPat<(v2i16 (load addr:$a)),
- (v2i16 (COPY_TO_REGCLASS (LW addr:$a), DSPRegs))>;
+ (v2i16 (COPY_TO_REGCLASS (LW addr:$a), DSPR))>;
def : DSPPat<(v4i8 (load addr:$a)),
- (v4i8 (COPY_TO_REGCLASS (LW addr:$a), DSPRegs))>;
-def : DSPPat<(store (v2i16 DSPRegs:$val), addr:$a),
- (SW (COPY_TO_REGCLASS DSPRegs:$val, CPURegs), addr:$a)>;
-def : DSPPat<(store (v4i8 DSPRegs:$val), addr:$a),
- (SW (COPY_TO_REGCLASS DSPRegs:$val, CPURegs), addr:$a)>;
+ (v4i8 (COPY_TO_REGCLASS (LW addr:$a), DSPR))>;
+def : DSPPat<(store (v2i16 DSPR:$val), addr:$a),
+ (SW (COPY_TO_REGCLASS DSPR:$val, GPR32), addr:$a)>;
+def : DSPPat<(store (v4i8 DSPR:$val), addr:$a),
+ (SW (COPY_TO_REGCLASS DSPR:$val, GPR32), addr:$a)>;
// Binary operations.
class DSPBinPat<Instruction Inst, ValueType ValTy, SDPatternOperator Node,
@@ -1336,7 +1337,7 @@ class DSPSetCCPat<Instruction Cmp, Instruction Pick, ValueType ValTy,
CondCode CC> :
DSPPat<(ValTy (MipsSETCC_DSP ValTy:$a, ValTy:$b, CC)),
(ValTy (Pick (ValTy (Cmp ValTy:$a, ValTy:$b)),
- (ValTy (COPY_TO_REGCLASS (ADDiu ZERO, -1), DSPRegs)),
+ (ValTy (COPY_TO_REGCLASS (ADDiu ZERO, -1), DSPR)),
(ValTy ZERO)))>;
class DSPSetCCPatInv<Instruction Cmp, Instruction Pick, ValueType ValTy,
@@ -1344,7 +1345,7 @@ class DSPSetCCPatInv<Instruction Cmp, Instruction Pick, ValueType ValTy,
DSPPat<(ValTy (MipsSETCC_DSP ValTy:$a, ValTy:$b, CC)),
(ValTy (Pick (ValTy (Cmp ValTy:$a, ValTy:$b)),
(ValTy ZERO),
- (ValTy (COPY_TO_REGCLASS (ADDiu ZERO, -1), DSPRegs))))>;
+ (ValTy (COPY_TO_REGCLASS (ADDiu ZERO, -1), DSPR))))>;
class DSPSelectCCPat<Instruction Cmp, Instruction Pick, ValueType ValTy,
CondCode CC> :
@@ -1384,12 +1385,12 @@ def : DSPSelectCCPatInv<PseudoCMPU_LE_QB, PseudoPICK_QB, v4i8, SETUGT>;
// Extr patterns.
class EXTR_W_TY1_R2_Pat<SDPatternOperator OpNode, Instruction Instr> :
- DSPPat<(i32 (OpNode CPURegs:$rs, ACRegsDSP:$ac)),
- (Instr ACRegsDSP:$ac, CPURegs:$rs)>;
+ DSPPat<(i32 (OpNode GPR32:$rs, ACC64DSP:$ac)),
+ (Instr ACC64DSP:$ac, GPR32:$rs)>;
class EXTR_W_TY1_R1_Pat<SDPatternOperator OpNode, Instruction Instr> :
- DSPPat<(i32 (OpNode immZExt5:$shift, ACRegsDSP:$ac)),
- (Instr ACRegsDSP:$ac, immZExt5:$shift)>;
+ DSPPat<(i32 (OpNode immZExt5:$shift, ACC64DSP:$ac)),
+ (Instr ACC64DSP:$ac, immZExt5:$shift)>;
def : EXTR_W_TY1_R1_Pat<MipsEXTP, EXTP>;
def : EXTR_W_TY1_R2_Pat<MipsEXTP, EXTPV>;
@@ -1404,11 +1405,6 @@ def : EXTR_W_TY1_R2_Pat<MipsEXTR_RS_W, EXTRV_RS_W>;
def : EXTR_W_TY1_R1_Pat<MipsEXTR_S_H, EXTR_S_H>;
def : EXTR_W_TY1_R2_Pat<MipsEXTR_S_H, EXTRV_S_H>;
-// mflo/hi patterns.
-let AddedComplexity = 20 in
-def : DSPPat<(i32 (ExtractLOHI ACRegsDSP:$ac, imm:$lohi_idx)),
- (EXTRACT_SUBREG ACRegsDSP:$ac, imm:$lohi_idx)>;
-
// Indexed load patterns.
class IndexedLoadPat<SDPatternOperator LoadNode, Instruction Instr> :
DSPPat<(i32 (LoadNode (add i32:$base, i32:$index))),
diff --git a/lib/Target/Mips/MipsDelaySlotFiller.cpp b/lib/Target/Mips/MipsDelaySlotFiller.cpp
index d07a595af38a..ffbd83b1bbbf 100644
--- a/lib/Target/Mips/MipsDelaySlotFiller.cpp
+++ b/lib/Target/Mips/MipsDelaySlotFiller.cpp
@@ -177,7 +177,7 @@ namespace {
class Filler : public MachineFunctionPass {
public:
Filler(TargetMachine &tm)
- : MachineFunctionPass(ID), TM(tm), TII(tm.getInstrInfo()) { }
+ : MachineFunctionPass(ID), TM(tm) { }
virtual const char *getPassName() const {
return "Mips Delay Slot Filler";
@@ -243,7 +243,6 @@ namespace {
bool terminateSearch(const MachineInstr &Candidate) const;
TargetMachine &TM;
- const TargetInstrInfo *TII;
static char ID;
};
@@ -422,8 +421,7 @@ bool LoadFromStackOrConst::hasHazard_(const MachineInstr &MI) {
return false;
if (const PseudoSourceValue *PSV = dyn_cast<const PseudoSourceValue>(V))
- return !PSV->PseudoSourceValue::isConstant(0) &&
- (V != PseudoSourceValue::getStack());
+ return !PSV->isConstant(0) && V != PseudoSourceValue::getStack();
return true;
}
@@ -438,7 +436,7 @@ bool MemDefsUses::hasHazard_(const MachineInstr &MI) {
// Check underlying object list.
if (getUnderlyingObjects(MI, Objs)) {
- for (SmallVector<const Value *, 4>::const_iterator I = Objs.begin();
+ for (SmallVectorImpl<const Value *>::const_iterator I = Objs.begin();
I != Objs.end(); ++I)
HasHazard |= updateDefsUses(*I, MI.mayStore());
@@ -474,7 +472,7 @@ getUnderlyingObjects(const MachineInstr &MI,
SmallVector<Value *, 4> Objs;
GetUnderlyingObjects(const_cast<Value *>(V), Objs);
- for (SmallVector<Value*, 4>::iterator I = Objs.begin(), E = Objs.end();
+ for (SmallVectorImpl<Value *>::iterator I = Objs.begin(), E = Objs.end();
I != E; ++I) {
if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(*I)) {
if (PSV->isAliased(MFI))
@@ -514,6 +512,8 @@ bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
}
// Bundle the NOP to the instruction with the delay slot.
+ const MipsInstrInfo *TII =
+ static_cast<const MipsInstrInfo*>(TM.getInstrInfo());
BuildMI(MBB, llvm::next(I), I->getDebugLoc(), TII->get(Mips::NOP));
MIBundleBuilder(MBB, I, llvm::next(llvm::next(I)));
}
@@ -562,14 +562,13 @@ bool Filler::searchBackward(MachineBasicBlock &MBB, Iter Slot) const {
RegDU.init(*Slot);
- if (searchRange(MBB, ReverseIter(Slot), MBB.rend(), RegDU, MemDU, Filler)) {
- MBB.splice(llvm::next(Slot), &MBB, llvm::next(Filler).base());
- MIBundleBuilder(MBB, Slot, llvm::next(llvm::next(Slot)));
- ++UsefulSlots;
- return true;
- }
+ if (!searchRange(MBB, ReverseIter(Slot), MBB.rend(), RegDU, MemDU, Filler))
+ return false;
- return false;
+ MBB.splice(llvm::next(Slot), &MBB, llvm::next(Filler).base());
+ MIBundleBuilder(MBB, Slot, llvm::next(llvm::next(Slot)));
+ ++UsefulSlots;
+ return true;
}
bool Filler::searchForward(MachineBasicBlock &MBB, Iter Slot) const {
@@ -583,14 +582,13 @@ bool Filler::searchForward(MachineBasicBlock &MBB, Iter Slot) const {
RegDU.setCallerSaved(*Slot);
- if (searchRange(MBB, llvm::next(Slot), MBB.end(), RegDU, NM, Filler)) {
- MBB.splice(llvm::next(Slot), &MBB, Filler);
- MIBundleBuilder(MBB, Slot, llvm::next(llvm::next(Slot)));
- ++UsefulSlots;
- return true;
- }
+ if (!searchRange(MBB, llvm::next(Slot), MBB.end(), RegDU, NM, Filler))
+ return false;
- return false;
+ MBB.splice(llvm::next(Slot), &MBB, Filler);
+ MIBundleBuilder(MBB, Slot, llvm::next(llvm::next(Slot)));
+ ++UsefulSlots;
+ return true;
}
bool Filler::searchSuccBBs(MachineBasicBlock &MBB, Iter Slot) const {
diff --git a/lib/Target/Mips/MipsISelDAGToDAG.cpp b/lib/Target/Mips/MipsISelDAGToDAG.cpp
index 968e5364842f..c417bd593413 100644
--- a/lib/Target/Mips/MipsISelDAGToDAG.cpp
+++ b/lib/Target/Mips/MipsISelDAGToDAG.cpp
@@ -57,7 +57,8 @@ bool MipsDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
/// GOT address into a register.
SDNode *MipsDAGToDAGISel::getGlobalBaseReg() {
unsigned GlobalBaseReg = MF->getInfo<MipsFunctionInfo>()->getGlobalBaseReg();
- return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
+ return CurDAG->getRegister(GlobalBaseReg,
+ getTargetLowering()->getPointerTy()).getNode();
}
/// ComplexPattern used on MipsInstrInfo
@@ -68,6 +69,12 @@ bool MipsDAGToDAGISel::selectAddrRegImm(SDValue Addr, SDValue &Base,
return false;
}
+bool MipsDAGToDAGISel::selectAddrRegReg(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
bool MipsDAGToDAGISel::selectAddrDefault(SDValue Addr, SDValue &Base,
SDValue &Offset) const {
llvm_unreachable("Unimplemented function.");
@@ -80,12 +87,83 @@ bool MipsDAGToDAGISel::selectIntAddr(SDValue Addr, SDValue &Base,
return false;
}
+bool MipsDAGToDAGISel::selectIntAddrMM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
bool MipsDAGToDAGISel::selectAddr16(SDNode *Parent, SDValue N, SDValue &Base,
SDValue &Offset, SDValue &Alias) {
llvm_unreachable("Unimplemented function.");
return false;
}
+bool MipsDAGToDAGISel::selectVSplat(SDNode *N, APInt &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatUimm1(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatUimm2(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatUimm3(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatUimm4(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatUimm6(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatUimm8(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatUimmPow2(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatUimmInvPow2(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatMaskL(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
+bool MipsDAGToDAGISel::selectVSplatMaskR(SDValue N, SDValue &Imm) const {
+ llvm_unreachable("Unimplemented function.");
+ return false;
+}
+
/// Select instructions not customized! Used for
/// expanded, promoted and normal instructions
SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
@@ -97,6 +175,7 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
// If we have a custom node, we already have selected!
if (Node->isMachineOpcode()) {
DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
+ Node->setNodeId(-1);
return NULL;
}
diff --git a/lib/Target/Mips/MipsISelDAGToDAG.h b/lib/Target/Mips/MipsISelDAGToDAG.h
index cf0f9c58aa9c..a4d9da532b2e 100644
--- a/lib/Target/Mips/MipsISelDAGToDAG.h
+++ b/lib/Target/Mips/MipsISelDAGToDAG.h
@@ -57,6 +57,11 @@ private:
virtual bool selectAddrRegImm(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
+ // Complex Pattern.
+ /// (reg + reg).
+ virtual bool selectAddrRegReg(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const;
+
/// Fall back on this function if all else fails.
virtual bool selectAddrDefault(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
@@ -65,9 +70,42 @@ private:
virtual bool selectIntAddr(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
+ virtual bool selectIntAddrMM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const;
+
virtual bool selectAddr16(SDNode *Parent, SDValue N, SDValue &Base,
SDValue &Offset, SDValue &Alias);
+ /// \brief Select constant vector splats.
+ virtual bool selectVSplat(SDNode *N, APInt &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm1.
+ virtual bool selectVSplatUimm1(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm2.
+ virtual bool selectVSplatUimm2(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm3.
+ virtual bool selectVSplatUimm3(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm4.
+ virtual bool selectVSplatUimm4(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm5.
+ virtual bool selectVSplatUimm5(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm6.
+ virtual bool selectVSplatUimm6(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm8.
+ virtual bool selectVSplatUimm8(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a simm5.
+ virtual bool selectVSplatSimm5(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value is a power of 2.
+ virtual bool selectVSplatUimmPow2(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value is the inverse of a
+ /// power of 2.
+ virtual bool selectVSplatUimmInvPow2(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value is a run of set bits
+ /// ending at the most significant bit
+ virtual bool selectVSplatMaskL(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value is a run of set bits
+ /// starting at bit zero.
+ virtual bool selectVSplatMaskR(SDValue N, SDValue &Imm) const;
+
virtual SDNode *Select(SDNode *N);
virtual std::pair<bool, SDNode*> selectNode(SDNode *Node) = 0;
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp
index 4d76181f9215..1e8250c847fe 100644
--- a/lib/Target/Mips/MipsISelLowering.cpp
+++ b/lib/Target/Mips/MipsISelLowering.cpp
@@ -20,6 +20,7 @@
#include "MipsTargetMachine.h"
#include "MipsTargetObjectFile.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -34,6 +35,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include <cctype>
using namespace llvm;
@@ -43,6 +45,11 @@ static cl::opt<bool>
LargeGOT("mxgot", cl::Hidden,
cl::desc("MIPS: Enable GOT larger than 64k."), cl::init(false));
+static cl::opt<bool>
+NoZeroDivCheck("mno-check-zero-division", cl::Hidden,
+ cl::desc("MIPS: Don't trap on integer division by zero."),
+ cl::init(false));
+
static const uint16_t O32IntRegs[4] = {
Mips::A0, Mips::A1, Mips::A2, Mips::A3
};
@@ -62,10 +69,10 @@ static const uint16_t Mips64DPRegs[8] = {
// For example, if I is 0x003ff800, (Pos, Size) = (11, 11).
static bool isShiftedMask(uint64_t I, uint64_t &Pos, uint64_t &Size) {
if (!isShiftedMask_64(I))
- return false;
+ return false;
Size = CountPopulation_64(I);
- Pos = CountTrailingZeros_64(I);
+ Pos = countTrailingZeros(I);
return true;
}
@@ -74,72 +81,35 @@ SDValue MipsTargetLowering::getGlobalReg(SelectionDAG &DAG, EVT Ty) const {
return DAG.getRegister(FI->getGlobalBaseReg(), Ty);
}
-static SDValue getTargetNode(SDValue Op, SelectionDAG &DAG, unsigned Flag) {
- EVT Ty = Op.getValueType();
+SDValue MipsTargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
+ SelectionDAG &DAG,
+ unsigned Flag) const {
+ return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty, 0, Flag);
+}
- if (GlobalAddressSDNode *N = dyn_cast<GlobalAddressSDNode>(Op))
- return DAG.getTargetGlobalAddress(N->getGlobal(), Op.getDebugLoc(), Ty, 0,
- Flag);
- if (ExternalSymbolSDNode *N = dyn_cast<ExternalSymbolSDNode>(Op))
- return DAG.getTargetExternalSymbol(N->getSymbol(), Ty, Flag);
- if (BlockAddressSDNode *N = dyn_cast<BlockAddressSDNode>(Op))
- return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag);
- if (JumpTableSDNode *N = dyn_cast<JumpTableSDNode>(Op))
- return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag);
- if (ConstantPoolSDNode *N = dyn_cast<ConstantPoolSDNode>(Op))
- return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlignment(),
- N->getOffset(), Flag);
-
- llvm_unreachable("Unexpected node type.");
- return SDValue();
+SDValue MipsTargetLowering::getTargetNode(ExternalSymbolSDNode *N, EVT Ty,
+ SelectionDAG &DAG,
+ unsigned Flag) const {
+ return DAG.getTargetExternalSymbol(N->getSymbol(), Ty, Flag);
}
-static SDValue getAddrNonPIC(SDValue Op, SelectionDAG &DAG) {
- DebugLoc DL = Op.getDebugLoc();
- EVT Ty = Op.getValueType();
- SDValue Hi = getTargetNode(Op, DAG, MipsII::MO_ABS_HI);
- SDValue Lo = getTargetNode(Op, DAG, MipsII::MO_ABS_LO);
- return DAG.getNode(ISD::ADD, DL, Ty,
- DAG.getNode(MipsISD::Hi, DL, Ty, Hi),
- DAG.getNode(MipsISD::Lo, DL, Ty, Lo));
+SDValue MipsTargetLowering::getTargetNode(BlockAddressSDNode *N, EVT Ty,
+ SelectionDAG &DAG,
+ unsigned Flag) const {
+ return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag);
}
-SDValue MipsTargetLowering::getAddrLocal(SDValue Op, SelectionDAG &DAG,
- bool HasMips64) const {
- DebugLoc DL = Op.getDebugLoc();
- EVT Ty = Op.getValueType();
- unsigned GOTFlag = HasMips64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT;
- SDValue GOT = DAG.getNode(MipsISD::Wrapper, DL, Ty, getGlobalReg(DAG, Ty),
- getTargetNode(Op, DAG, GOTFlag));
- SDValue Load = DAG.getLoad(Ty, DL, DAG.getEntryNode(), GOT,
- MachinePointerInfo::getGOT(), false, false, false,
- 0);
- unsigned LoFlag = HasMips64 ? MipsII::MO_GOT_OFST : MipsII::MO_ABS_LO;
- SDValue Lo = DAG.getNode(MipsISD::Lo, DL, Ty, getTargetNode(Op, DAG, LoFlag));
- return DAG.getNode(ISD::ADD, DL, Ty, Load, Lo);
-}
-
-SDValue MipsTargetLowering::getAddrGlobal(SDValue Op, SelectionDAG &DAG,
+SDValue MipsTargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
+ SelectionDAG &DAG,
unsigned Flag) const {
- DebugLoc DL = Op.getDebugLoc();
- EVT Ty = Op.getValueType();
- SDValue Tgt = DAG.getNode(MipsISD::Wrapper, DL, Ty, getGlobalReg(DAG, Ty),
- getTargetNode(Op, DAG, Flag));
- return DAG.getLoad(Ty, DL, DAG.getEntryNode(), Tgt,
- MachinePointerInfo::getGOT(), false, false, false, 0);
+ return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag);
}
-SDValue MipsTargetLowering::getAddrGlobalLargeGOT(SDValue Op, SelectionDAG &DAG,
- unsigned HiFlag,
- unsigned LoFlag) const {
- DebugLoc DL = Op.getDebugLoc();
- EVT Ty = Op.getValueType();
- SDValue Hi = DAG.getNode(MipsISD::Hi, DL, Ty, getTargetNode(Op, DAG, HiFlag));
- Hi = DAG.getNode(ISD::ADD, DL, Ty, Hi, getGlobalReg(DAG, Ty));
- SDValue Wrapper = DAG.getNode(MipsISD::Wrapper, DL, Ty, Hi,
- getTargetNode(Op, DAG, LoFlag));
- return DAG.getLoad(Ty, DL, DAG.getEntryNode(), Wrapper,
- MachinePointerInfo::getGOT(), false, false, false, 0);
+SDValue MipsTargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
+ SelectionDAG &DAG,
+ unsigned Flag) const {
+ return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlignment(),
+ N->getOffset(), Flag);
}
const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
@@ -156,9 +126,10 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
case MipsISD::FPCmp: return "MipsISD::FPCmp";
case MipsISD::CMovFP_T: return "MipsISD::CMovFP_T";
case MipsISD::CMovFP_F: return "MipsISD::CMovFP_F";
- case MipsISD::FPRound: return "MipsISD::FPRound";
- case MipsISD::ExtractLOHI: return "MipsISD::ExtractLOHI";
- case MipsISD::InsertLOHI: return "MipsISD::InsertLOHI";
+ case MipsISD::TruncIntFP: return "MipsISD::TruncIntFP";
+ case MipsISD::MFHI: return "MipsISD::MFHI";
+ case MipsISD::MFLO: return "MipsISD::MFLO";
+ case MipsISD::MTLOHI: return "MipsISD::MTLOHI";
case MipsISD::Mult: return "MipsISD::Mult";
case MipsISD::Multu: return "MipsISD::Multu";
case MipsISD::MAdd: return "MipsISD::MAdd";
@@ -202,6 +173,30 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
case MipsISD::SHRL_DSP: return "MipsISD::SHRL_DSP";
case MipsISD::SETCC_DSP: return "MipsISD::SETCC_DSP";
case MipsISD::SELECT_CC_DSP: return "MipsISD::SELECT_CC_DSP";
+ case MipsISD::VALL_ZERO: return "MipsISD::VALL_ZERO";
+ case MipsISD::VANY_ZERO: return "MipsISD::VANY_ZERO";
+ case MipsISD::VALL_NONZERO: return "MipsISD::VALL_NONZERO";
+ case MipsISD::VANY_NONZERO: return "MipsISD::VANY_NONZERO";
+ case MipsISD::VCEQ: return "MipsISD::VCEQ";
+ case MipsISD::VCLE_S: return "MipsISD::VCLE_S";
+ case MipsISD::VCLE_U: return "MipsISD::VCLE_U";
+ case MipsISD::VCLT_S: return "MipsISD::VCLT_S";
+ case MipsISD::VCLT_U: return "MipsISD::VCLT_U";
+ case MipsISD::VSMAX: return "MipsISD::VSMAX";
+ case MipsISD::VSMIN: return "MipsISD::VSMIN";
+ case MipsISD::VUMAX: return "MipsISD::VUMAX";
+ case MipsISD::VUMIN: return "MipsISD::VUMIN";
+ case MipsISD::VEXTRACT_SEXT_ELT: return "MipsISD::VEXTRACT_SEXT_ELT";
+ case MipsISD::VEXTRACT_ZEXT_ELT: return "MipsISD::VEXTRACT_ZEXT_ELT";
+ case MipsISD::VNOR: return "MipsISD::VNOR";
+ case MipsISD::VSHF: return "MipsISD::VSHF";
+ case MipsISD::SHF: return "MipsISD::SHF";
+ case MipsISD::ILVEV: return "MipsISD::ILVEV";
+ case MipsISD::ILVOD: return "MipsISD::ILVOD";
+ case MipsISD::ILVL: return "MipsISD::ILVL";
+ case MipsISD::ILVR: return "MipsISD::ILVR";
+ case MipsISD::PCKEV: return "MipsISD::PCKEV";
+ case MipsISD::PCKOD: return "MipsISD::PCKOD";
default: return NULL;
}
}
@@ -250,6 +245,7 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::VASTART, MVT::Other, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
if (!TM.Options.NoNaNsFPMath) {
setOperationAction(ISD::FABS, MVT::f32, Custom);
@@ -265,6 +261,7 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::SELECT, MVT::i64, Custom);
setOperationAction(ISD::LOAD, MVT::i64, Custom);
setOperationAction(ISD::STORE, MVT::i64, Custom);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
}
if (!HasMips64) {
@@ -339,11 +336,6 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::FNEG, MVT::f64, Expand);
}
- setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
- setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
- setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
- setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
-
setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
setOperationAction(ISD::VAARG, MVT::Other, Expand);
@@ -383,6 +375,8 @@ MipsTargetLowering(MipsTargetMachine &TM)
setTruncStoreAction(MVT::i64, MVT::i32, Custom);
}
+ setOperationAction(ISD::TRAP, MVT::Other, Legal);
+
setTargetDAGCombine(ISD::SDIVREM);
setTargetDAGCombine(ISD::UDIVREM);
setTargetDAGCombine(ISD::SELECT);
@@ -407,7 +401,7 @@ const MipsTargetLowering *MipsTargetLowering::create(MipsTargetMachine &TM) {
return llvm::createMipsSETargetLowering(TM);
}
-EVT MipsTargetLowering::getSetCCResultType(EVT VT) const {
+EVT MipsTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
if (!VT.isVector())
return MVT::i32;
return VT.changeVectorElementTypeToInteger();
@@ -420,11 +414,11 @@ static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
EVT Ty = N->getValueType(0);
- unsigned LO = (Ty == MVT::i32) ? Mips::LO : Mips::LO64;
- unsigned HI = (Ty == MVT::i32) ? Mips::HI : Mips::HI64;
+ unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;
+ unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;
unsigned Opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem16 :
MipsISD::DivRemU16;
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
SDValue DivRem = DAG.getNode(Opc, DL, MVT::Glue,
N->getOperand(0), N->getOperand(1));
@@ -502,7 +496,7 @@ static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op) {
return Op;
SDValue RHS = Op.getOperand(1);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
// Assume the 3rd operand is a CondCodeSDNode. Add code to check the type of
// node if necessary.
@@ -514,12 +508,13 @@ static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op) {
// Creates and returns a CMovFPT/F node.
static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True,
- SDValue False, DebugLoc DL) {
+ SDValue False, SDLoc DL) {
ConstantSDNode *CC = cast<ConstantSDNode>(Cond.getOperand(2));
bool invert = invertFPCondCodeUser((Mips::CondCode)CC->getSExtValue());
+ SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
return DAG.getNode((invert ? MipsISD::CMovFP_F : MipsISD::CMovFP_T), DL,
- True.getValueType(), True, False, Cond);
+ True.getValueType(), True, FCC0, False, Cond);
}
static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
@@ -545,7 +540,7 @@ static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
if (!CN || CN->getZExtValue())
return SDValue();
- const DebugLoc DL = N->getDebugLoc();
+ const SDLoc DL(N);
ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
SDValue True = N->getOperand(1);
@@ -561,7 +556,7 @@ static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
// Pattern match EXT.
// $dst = and ((sra or srl) $src , pos), (2**size - 1)
// => ext $dst, $src, size, pos
- if (DCI.isBeforeLegalizeOps() || !Subtarget->hasMips32r2())
+ if (DCI.isBeforeLegalizeOps() || !Subtarget->hasExtractInsert())
return SDValue();
SDValue ShiftRight = N->getOperand(0), Mask = N->getOperand(1);
@@ -590,7 +585,7 @@ static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits())
return SDValue();
- return DAG.getNode(MipsISD::Ext, N->getDebugLoc(), ValTy,
+ return DAG.getNode(MipsISD::Ext, SDLoc(N), ValTy,
ShiftRight.getOperand(0), DAG.getConstant(Pos, MVT::i32),
DAG.getConstant(SMSize, MVT::i32));
}
@@ -602,7 +597,7 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
// $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1),
// where mask1 = (2**size - 1) << pos, mask0 = ~mask1
// => ins $dst, $src, size, pos, $src1
- if (DCI.isBeforeLegalizeOps() || !Subtarget->hasMips32r2())
+ if (DCI.isBeforeLegalizeOps() || !Subtarget->hasExtractInsert())
return SDValue();
SDValue And0 = N->getOperand(0), And1 = N->getOperand(1);
@@ -644,7 +639,7 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits()))
return SDValue();
- return DAG.getNode(MipsISD::Ins, N->getDebugLoc(), ValTy, Shl.getOperand(0),
+ return DAG.getNode(MipsISD::Ins, SDLoc(N), ValTy, Shl.getOperand(0),
DAG.getConstant(SMPos0, MVT::i32),
DAG.getConstant(SMSize0, MVT::i32), And0.getOperand(0));
}
@@ -669,7 +664,7 @@ static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
EVT ValTy = N->getValueType(0);
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
SDValue Add1 = DAG.getNode(ISD::ADD, DL, ValTy, N->getOperand(0),
Add.getOperand(0));
@@ -744,6 +739,7 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const
case ISD::LOAD: return lowerLOAD(Op, DAG);
case ISD::STORE: return lowerSTORE(Op, DAG);
case ISD::ADD: return lowerADD(Op, DAG);
+ case ISD::FP_TO_SINT: return lowerFP_TO_SINT(Op, DAG);
}
return SDValue();
}
@@ -763,6 +759,30 @@ addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
return VReg;
}
+static MachineBasicBlock *expandPseudoDIV(MachineInstr *MI,
+ MachineBasicBlock &MBB,
+ const TargetInstrInfo &TII,
+ bool Is64Bit) {
+ if (NoZeroDivCheck)
+ return &MBB;
+
+ // Insert instruction "teq $divisor_reg, $zero, 7".
+ MachineBasicBlock::iterator I(MI);
+ MachineInstrBuilder MIB;
+ MachineOperand &Divisor = MI->getOperand(2);
+ MIB = BuildMI(MBB, llvm::next(I), MI->getDebugLoc(), TII.get(Mips::TEQ))
+ .addReg(Divisor.getReg(), getKillRegState(Divisor.isKill()))
+ .addReg(Mips::ZERO).addImm(7);
+
+ // Use the 32-bit sub-register if this is a 64-bit division.
+ if (Is64Bit)
+ MIB->getOperand(0).setSubReg(Mips::sub_32);
+
+ // Clear Divisor's kill flag.
+ Divisor.setIsKill(false);
+ return &MBB;
+}
+
MachineBasicBlock *
MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const {
@@ -770,108 +790,82 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
default:
llvm_unreachable("Unexpected instr type to insert");
case Mips::ATOMIC_LOAD_ADD_I8:
- case Mips::ATOMIC_LOAD_ADD_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, Mips::ADDu);
case Mips::ATOMIC_LOAD_ADD_I16:
- case Mips::ATOMIC_LOAD_ADD_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, Mips::ADDu);
case Mips::ATOMIC_LOAD_ADD_I32:
- case Mips::ATOMIC_LOAD_ADD_I32_P8:
return emitAtomicBinary(MI, BB, 4, Mips::ADDu);
case Mips::ATOMIC_LOAD_ADD_I64:
- case Mips::ATOMIC_LOAD_ADD_I64_P8:
return emitAtomicBinary(MI, BB, 8, Mips::DADDu);
case Mips::ATOMIC_LOAD_AND_I8:
- case Mips::ATOMIC_LOAD_AND_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, Mips::AND);
case Mips::ATOMIC_LOAD_AND_I16:
- case Mips::ATOMIC_LOAD_AND_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, Mips::AND);
case Mips::ATOMIC_LOAD_AND_I32:
- case Mips::ATOMIC_LOAD_AND_I32_P8:
return emitAtomicBinary(MI, BB, 4, Mips::AND);
case Mips::ATOMIC_LOAD_AND_I64:
- case Mips::ATOMIC_LOAD_AND_I64_P8:
return emitAtomicBinary(MI, BB, 8, Mips::AND64);
case Mips::ATOMIC_LOAD_OR_I8:
- case Mips::ATOMIC_LOAD_OR_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, Mips::OR);
case Mips::ATOMIC_LOAD_OR_I16:
- case Mips::ATOMIC_LOAD_OR_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, Mips::OR);
case Mips::ATOMIC_LOAD_OR_I32:
- case Mips::ATOMIC_LOAD_OR_I32_P8:
return emitAtomicBinary(MI, BB, 4, Mips::OR);
case Mips::ATOMIC_LOAD_OR_I64:
- case Mips::ATOMIC_LOAD_OR_I64_P8:
return emitAtomicBinary(MI, BB, 8, Mips::OR64);
case Mips::ATOMIC_LOAD_XOR_I8:
- case Mips::ATOMIC_LOAD_XOR_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, Mips::XOR);
case Mips::ATOMIC_LOAD_XOR_I16:
- case Mips::ATOMIC_LOAD_XOR_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, Mips::XOR);
case Mips::ATOMIC_LOAD_XOR_I32:
- case Mips::ATOMIC_LOAD_XOR_I32_P8:
return emitAtomicBinary(MI, BB, 4, Mips::XOR);
case Mips::ATOMIC_LOAD_XOR_I64:
- case Mips::ATOMIC_LOAD_XOR_I64_P8:
return emitAtomicBinary(MI, BB, 8, Mips::XOR64);
case Mips::ATOMIC_LOAD_NAND_I8:
- case Mips::ATOMIC_LOAD_NAND_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, 0, true);
case Mips::ATOMIC_LOAD_NAND_I16:
- case Mips::ATOMIC_LOAD_NAND_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, 0, true);
case Mips::ATOMIC_LOAD_NAND_I32:
- case Mips::ATOMIC_LOAD_NAND_I32_P8:
return emitAtomicBinary(MI, BB, 4, 0, true);
case Mips::ATOMIC_LOAD_NAND_I64:
- case Mips::ATOMIC_LOAD_NAND_I64_P8:
return emitAtomicBinary(MI, BB, 8, 0, true);
case Mips::ATOMIC_LOAD_SUB_I8:
- case Mips::ATOMIC_LOAD_SUB_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, Mips::SUBu);
case Mips::ATOMIC_LOAD_SUB_I16:
- case Mips::ATOMIC_LOAD_SUB_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, Mips::SUBu);
case Mips::ATOMIC_LOAD_SUB_I32:
- case Mips::ATOMIC_LOAD_SUB_I32_P8:
return emitAtomicBinary(MI, BB, 4, Mips::SUBu);
case Mips::ATOMIC_LOAD_SUB_I64:
- case Mips::ATOMIC_LOAD_SUB_I64_P8:
return emitAtomicBinary(MI, BB, 8, Mips::DSUBu);
case Mips::ATOMIC_SWAP_I8:
- case Mips::ATOMIC_SWAP_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, 0);
case Mips::ATOMIC_SWAP_I16:
- case Mips::ATOMIC_SWAP_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, 0);
case Mips::ATOMIC_SWAP_I32:
- case Mips::ATOMIC_SWAP_I32_P8:
return emitAtomicBinary(MI, BB, 4, 0);
case Mips::ATOMIC_SWAP_I64:
- case Mips::ATOMIC_SWAP_I64_P8:
return emitAtomicBinary(MI, BB, 8, 0);
case Mips::ATOMIC_CMP_SWAP_I8:
- case Mips::ATOMIC_CMP_SWAP_I8_P8:
return emitAtomicCmpSwapPartword(MI, BB, 1);
case Mips::ATOMIC_CMP_SWAP_I16:
- case Mips::ATOMIC_CMP_SWAP_I16_P8:
return emitAtomicCmpSwapPartword(MI, BB, 2);
case Mips::ATOMIC_CMP_SWAP_I32:
- case Mips::ATOMIC_CMP_SWAP_I32_P8:
return emitAtomicCmpSwap(MI, BB, 4);
case Mips::ATOMIC_CMP_SWAP_I64:
- case Mips::ATOMIC_CMP_SWAP_I64_P8:
return emitAtomicCmpSwap(MI, BB, 8);
+ case Mips::PseudoSDIV:
+ case Mips::PseudoUDIV:
+ return expandPseudoDIV(MI, *BB, *getTargetMachine().getInstrInfo(), false);
+ case Mips::PseudoDSDIV:
+ case Mips::PseudoDUDIV:
+ return expandPseudoDIV(MI, *BB, *getTargetMachine().getInstrInfo(), true);
}
}
@@ -891,16 +885,16 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
unsigned LL, SC, AND, NOR, ZERO, BEQ;
if (Size == 4) {
- LL = IsN64 ? Mips::LL_P8 : Mips::LL;
- SC = IsN64 ? Mips::SC_P8 : Mips::SC;
+ LL = Mips::LL;
+ SC = Mips::SC;
AND = Mips::AND;
NOR = Mips::NOR;
ZERO = Mips::ZERO;
BEQ = Mips::BEQ;
}
else {
- LL = IsN64 ? Mips::LLD_P8 : Mips::LLD;
- SC = IsN64 ? Mips::SCD_P8 : Mips::SCD;
+ LL = Mips::LLD;
+ SC = Mips::SCD;
AND = Mips::AND64;
NOR = Mips::NOR64;
ZERO = Mips::ZERO_64;
@@ -926,8 +920,7 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
// Transfer the remainder of BB and its successor edges to exitMBB.
exitMBB->splice(exitMBB->begin(), BB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- BB->end());
+ llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
// thisMBB:
@@ -958,7 +951,7 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
BuildMI(BB, DL, TII->get(SC), Success).addReg(StoreVal).addReg(Ptr).addImm(0);
BuildMI(BB, DL, TII->get(BEQ)).addReg(Success).addReg(ZERO).addMBB(loopMBB);
- MI->eraseFromParent(); // The instruction is gone now.
+ MI->eraseFromParent(); // The instruction is gone now.
return exitMBB;
}
@@ -969,15 +962,13 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
unsigned Size, unsigned BinOpcode,
bool Nand) const {
assert((Size == 1 || Size == 2) &&
- "Unsupported size for EmitAtomicBinaryPartial.");
+ "Unsupported size for EmitAtomicBinaryPartial.");
MachineFunction *MF = BB->getParent();
MachineRegisterInfo &RegInfo = MF->getRegInfo();
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
- unsigned LL = IsN64 ? Mips::LL_P8 : Mips::LL;
- unsigned SC = IsN64 ? Mips::SC_P8 : Mips::SC;
unsigned Dest = MI->getOperand(0).getReg();
unsigned Ptr = MI->getOperand(1).getReg();
@@ -1039,13 +1030,20 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
BuildMI(BB, DL, TII->get(Mips::AND), AlignedAddr)
.addReg(Ptr).addReg(MaskLSB2);
BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2).addReg(Ptr).addImm(3);
- BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
+ if (Subtarget->isLittle()) {
+ BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
+ } else {
+ unsigned Off = RegInfo.createVirtualRegister(RC);
+ BuildMI(BB, DL, TII->get(Mips::XORi), Off)
+ .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
+ BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
+ }
BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
.addReg(Mips::ZERO).addImm(MaskImm);
BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
- .addReg(ShiftAmt).addReg(MaskUpper);
+ .addReg(MaskUpper).addReg(ShiftAmt);
BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
- BuildMI(BB, DL, TII->get(Mips::SLLV), Incr2).addReg(ShiftAmt).addReg(Incr);
+ BuildMI(BB, DL, TII->get(Mips::SLLV), Incr2).addReg(Incr).addReg(ShiftAmt);
// atomic.load.binop
// loopMBB:
@@ -1067,7 +1065,7 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
// beq success,$0,loopMBB
BB = loopMBB;
- BuildMI(BB, DL, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
+ BuildMI(BB, DL, TII->get(Mips::LL), OldVal).addReg(AlignedAddr).addImm(0);
if (Nand) {
// and andres, oldval, incr2
// nor binopres, $0, andres
@@ -1081,7 +1079,7 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
// and newval, binopres, mask
BuildMI(BB, DL, TII->get(BinOpcode), BinOpRes).addReg(OldVal).addReg(Incr2);
BuildMI(BB, DL, TII->get(Mips::AND), NewVal).addReg(BinOpRes).addReg(Mask);
- } else {// atomic.swap
+ } else { // atomic.swap
// and newval, incr2, mask
BuildMI(BB, DL, TII->get(Mips::AND), NewVal).addReg(Incr2).addReg(Mask);
}
@@ -1090,7 +1088,7 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
.addReg(OldVal).addReg(Mask2);
BuildMI(BB, DL, TII->get(Mips::OR), StoreVal)
.addReg(MaskedOldVal0).addReg(NewVal);
- BuildMI(BB, DL, TII->get(SC), Success)
+ BuildMI(BB, DL, TII->get(Mips::SC), Success)
.addReg(StoreVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::BEQ))
.addReg(Success).addReg(Mips::ZERO).addMBB(loopMBB);
@@ -1106,21 +1104,20 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
BuildMI(BB, DL, TII->get(Mips::AND), MaskedOldVal1)
.addReg(OldVal).addReg(Mask);
BuildMI(BB, DL, TII->get(Mips::SRLV), SrlRes)
- .addReg(ShiftAmt).addReg(MaskedOldVal1);
+ .addReg(MaskedOldVal1).addReg(ShiftAmt);
BuildMI(BB, DL, TII->get(Mips::SLL), SllRes)
.addReg(SrlRes).addImm(ShiftImm);
BuildMI(BB, DL, TII->get(Mips::SRA), Dest)
.addReg(SllRes).addImm(ShiftImm);
- MI->eraseFromParent(); // The instruction is gone now.
+ MI->eraseFromParent(); // The instruction is gone now.
return exitMBB;
}
-MachineBasicBlock *
-MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
- MachineBasicBlock *BB,
- unsigned Size) const {
+MachineBasicBlock * MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
+ MachineBasicBlock *BB,
+ unsigned Size) const {
assert((Size == 4 || Size == 8) && "Unsupported size for EmitAtomicCmpSwap.");
MachineFunction *MF = BB->getParent();
@@ -1131,15 +1128,14 @@ MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
unsigned LL, SC, ZERO, BNE, BEQ;
if (Size == 4) {
- LL = IsN64 ? Mips::LL_P8 : Mips::LL;
- SC = IsN64 ? Mips::SC_P8 : Mips::SC;
+ LL = Mips::LL;
+ SC = Mips::SC;
ZERO = Mips::ZERO;
BNE = Mips::BNE;
BEQ = Mips::BEQ;
- }
- else {
- LL = IsN64 ? Mips::LLD_P8 : Mips::LLD;
- SC = IsN64 ? Mips::SCD_P8 : Mips::SCD;
+ } else {
+ LL = Mips::LLD;
+ SC = Mips::SCD;
ZERO = Mips::ZERO_64;
BNE = Mips::BNE64;
BEQ = Mips::BEQ64;
@@ -1194,7 +1190,7 @@ MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
BuildMI(BB, DL, TII->get(BEQ))
.addReg(Success).addReg(ZERO).addMBB(loop1MBB);
- MI->eraseFromParent(); // The instruction is gone now.
+ MI->eraseFromParent(); // The instruction is gone now.
return exitMBB;
}
@@ -1211,8 +1207,6 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
- unsigned LL = IsN64 ? Mips::LL_P8 : Mips::LL;
- unsigned SC = IsN64 ? Mips::SC_P8 : Mips::SC;
unsigned Dest = MI->getOperand(0).getReg();
unsigned Ptr = MI->getOperand(1).getReg();
@@ -1282,27 +1276,34 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
BuildMI(BB, DL, TII->get(Mips::AND), AlignedAddr)
.addReg(Ptr).addReg(MaskLSB2);
BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2).addReg(Ptr).addImm(3);
- BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
+ if (Subtarget->isLittle()) {
+ BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
+ } else {
+ unsigned Off = RegInfo.createVirtualRegister(RC);
+ BuildMI(BB, DL, TII->get(Mips::XORi), Off)
+ .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
+ BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
+ }
BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
.addReg(Mips::ZERO).addImm(MaskImm);
BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
- .addReg(ShiftAmt).addReg(MaskUpper);
+ .addReg(MaskUpper).addReg(ShiftAmt);
BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedCmpVal)
.addReg(CmpVal).addImm(MaskImm);
BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedCmpVal)
- .addReg(ShiftAmt).addReg(MaskedCmpVal);
+ .addReg(MaskedCmpVal).addReg(ShiftAmt);
BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedNewVal)
.addReg(NewVal).addImm(MaskImm);
BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedNewVal)
- .addReg(ShiftAmt).addReg(MaskedNewVal);
+ .addReg(MaskedNewVal).addReg(ShiftAmt);
// loop1MBB:
// ll oldval,0(alginedaddr)
// and maskedoldval0,oldval,mask
// bne maskedoldval0,shiftedcmpval,sinkMBB
BB = loop1MBB;
- BuildMI(BB, DL, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
+ BuildMI(BB, DL, TII->get(Mips::LL), OldVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::AND), MaskedOldVal0)
.addReg(OldVal).addReg(Mask);
BuildMI(BB, DL, TII->get(Mips::BNE))
@@ -1318,7 +1319,7 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
.addReg(OldVal).addReg(Mask2);
BuildMI(BB, DL, TII->get(Mips::OR), StoreVal)
.addReg(MaskedOldVal1).addReg(ShiftedNewVal);
- BuildMI(BB, DL, TII->get(SC), Success)
+ BuildMI(BB, DL, TII->get(Mips::SC), Success)
.addReg(StoreVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::BEQ))
.addReg(Success).addReg(Mips::ZERO).addMBB(loop1MBB);
@@ -1331,7 +1332,7 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
int64_t ShiftImm = (Size == 1) ? 24 : 16;
BuildMI(BB, DL, TII->get(Mips::SRLV), SrlRes)
- .addReg(ShiftAmt).addReg(MaskedOldVal0);
+ .addReg(MaskedOldVal0).addReg(ShiftAmt);
BuildMI(BB, DL, TII->get(Mips::SLL), SllRes)
.addReg(SrlRes).addImm(ShiftImm);
BuildMI(BB, DL, TII->get(Mips::SRA), Dest)
@@ -1349,7 +1350,7 @@ SDValue MipsTargetLowering::lowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
SDValue Chain = Op.getOperand(0);
SDValue Table = Op.getOperand(1);
SDValue Index = Op.getOperand(2);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT PTy = getPointerTy();
unsigned EntrySize =
DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(*getDataLayout());
@@ -1375,14 +1376,12 @@ SDValue MipsTargetLowering::lowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::BRIND, DL, MVT::Other, Chain, Addr);
}
-SDValue MipsTargetLowering::
-lowerBRCOND(SDValue Op, SelectionDAG &DAG) const
-{
+SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
// The first operand is the chain, the second is the condition, the third is
// the block to branch to if the condition is true.
SDValue Chain = Op.getOperand(0);
SDValue Dest = Op.getOperand(2);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue CondRes = createFPCmp(DAG, Op.getOperand(1));
@@ -1395,8 +1394,9 @@ lowerBRCOND(SDValue Op, SelectionDAG &DAG) const
(Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue();
unsigned Opc = invertFPCondCodeUser(CC) ? Mips::BRANCH_F : Mips::BRANCH_T;
SDValue BrCode = DAG.getConstant(Opc, MVT::i32);
+ SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
return DAG.getNode(MipsISD::FPBrcond, DL, Op.getValueType(), Chain, BrCode,
- Dest, CondRes);
+ FCC0, Dest, CondRes);
}
SDValue MipsTargetLowering::
@@ -1409,15 +1409,16 @@ lowerSELECT(SDValue Op, SelectionDAG &DAG) const
return Op;
return createCMovFP(DAG, Cond, Op.getOperand(1), Op.getOperand(2),
- Op.getDebugLoc());
+ SDLoc(Op));
}
SDValue MipsTargetLowering::
lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
{
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT Ty = Op.getOperand(0).getValueType();
- SDValue Cond = DAG.getNode(ISD::SETCC, DL, getSetCCResultType(Ty),
+ SDValue Cond = DAG.getNode(ISD::SETCC, DL,
+ getSetCCResultType(*DAG.getContext(), Ty),
Op.getOperand(0), Op.getOperand(1),
Op.getOperand(4));
@@ -1434,14 +1435,16 @@ SDValue MipsTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const {
SDValue True = DAG.getConstant(1, MVT::i32);
SDValue False = DAG.getConstant(0, MVT::i32);
- return createCMovFP(DAG, Cond, True, False, Op.getDebugLoc());
+ return createCMovFP(DAG, Cond, True, False, SDLoc(Op));
}
SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const {
// FIXME there isn't actually debug info here
- DebugLoc DL = Op.getDebugLoc();
- const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
+ SDLoc DL(Op);
+ EVT Ty = Op.getValueType();
+ GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
+ const GlobalValue *GV = N->getGlobal();
if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64) {
const MipsTargetObjectFile &TLOF =
@@ -1458,26 +1461,31 @@ SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
}
// %hi/%lo relocation
- return getAddrNonPIC(Op, DAG);
+ return getAddrNonPIC(N, Ty, DAG);
}
if (GV->hasInternalLinkage() || (GV->hasLocalLinkage() && !isa<Function>(GV)))
- return getAddrLocal(Op, DAG, HasMips64);
+ return getAddrLocal(N, Ty, DAG, HasMips64);
if (LargeGOT)
- return getAddrGlobalLargeGOT(Op, DAG, MipsII::MO_GOT_HI16,
- MipsII::MO_GOT_LO16);
+ return getAddrGlobalLargeGOT(N, Ty, DAG, MipsII::MO_GOT_HI16,
+ MipsII::MO_GOT_LO16, DAG.getEntryNode(),
+ MachinePointerInfo::getGOT());
- return getAddrGlobal(Op, DAG,
- HasMips64 ? MipsII::MO_GOT_DISP : MipsII::MO_GOT16);
+ return getAddrGlobal(N, Ty, DAG,
+ HasMips64 ? MipsII::MO_GOT_DISP : MipsII::MO_GOT16,
+ DAG.getEntryNode(), MachinePointerInfo::getGOT());
}
SDValue MipsTargetLowering::lowerBlockAddress(SDValue Op,
SelectionDAG &DAG) const {
+ BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
+ EVT Ty = Op.getValueType();
+
if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64)
- return getAddrNonPIC(Op, DAG);
+ return getAddrNonPIC(N, Ty, DAG);
- return getAddrLocal(Op, DAG, HasMips64);
+ return getAddrLocal(N, Ty, DAG, HasMips64);
}
SDValue MipsTargetLowering::
@@ -1488,7 +1496,7 @@ lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
// Local Exec TLS Model.
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
- DebugLoc DL = GA->getDebugLoc();
+ SDLoc DL(GA);
const GlobalValue *GV = GA->getGlobal();
EVT PtrVT = getPointerTy();
@@ -1564,10 +1572,13 @@ lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
SDValue MipsTargetLowering::
lowerJumpTable(SDValue Op, SelectionDAG &DAG) const
{
+ JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
+ EVT Ty = Op.getValueType();
+
if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64)
- return getAddrNonPIC(Op, DAG);
+ return getAddrNonPIC(N, Ty, DAG);
- return getAddrLocal(Op, DAG, HasMips64);
+ return getAddrLocal(N, Ty, DAG, HasMips64);
}
SDValue MipsTargetLowering::
@@ -1582,18 +1593,20 @@ lowerConstantPool(SDValue Op, SelectionDAG &DAG) const
// SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, MVT::i32, CP);
// SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
// ResNode = DAG.getNode(ISD::ADD, MVT::i32, GOT, GPRelNode);
+ ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
+ EVT Ty = Op.getValueType();
if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64)
- return getAddrNonPIC(Op, DAG);
+ return getAddrNonPIC(N, Ty, DAG);
- return getAddrLocal(Op, DAG, HasMips64);
+ return getAddrLocal(N, Ty, DAG, HasMips64);
}
SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
getPointerTy());
@@ -1604,12 +1617,13 @@ SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
MachinePointerInfo(SV), false, false, 0);
}
-static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG, bool HasR2) {
+static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG,
+ bool HasExtractInsert) {
EVT TyX = Op.getOperand(0).getValueType();
EVT TyY = Op.getOperand(1).getValueType();
SDValue Const1 = DAG.getConstant(1, MVT::i32);
SDValue Const31 = DAG.getConstant(31, MVT::i32);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue Res;
// If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
@@ -1623,7 +1637,7 @@ static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG, bool HasR2) {
DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(1),
Const1);
- if (HasR2) {
+ if (HasExtractInsert) {
// ext E, Y, 31, 1 ; extract bit31 of Y
// ins X, E, 31, 1 ; insert extracted bit at bit31 of X
SDValue E = DAG.getNode(MipsISD::Ext, DL, MVT::i32, Y, Const31, Const1);
@@ -1649,18 +1663,19 @@ static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG, bool HasR2) {
return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
}
-static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG, bool HasR2) {
+static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG,
+ bool HasExtractInsert) {
unsigned WidthX = Op.getOperand(0).getValueSizeInBits();
unsigned WidthY = Op.getOperand(1).getValueSizeInBits();
EVT TyX = MVT::getIntegerVT(WidthX), TyY = MVT::getIntegerVT(WidthY);
SDValue Const1 = DAG.getConstant(1, MVT::i32);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
// Bitcast to integer nodes.
SDValue X = DAG.getNode(ISD::BITCAST, DL, TyX, Op.getOperand(0));
SDValue Y = DAG.getNode(ISD::BITCAST, DL, TyY, Op.getOperand(1));
- if (HasR2) {
+ if (HasExtractInsert) {
// ext E, Y, width(Y) - 1, 1 ; extract bit width(Y)-1 of Y
// ins X, E, width(X) - 1, 1 ; insert extracted bit at bit width(X)-1 of X
SDValue E = DAG.getNode(MipsISD::Ext, DL, TyY, Y,
@@ -1700,14 +1715,15 @@ static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG, bool HasR2) {
SDValue
MipsTargetLowering::lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
if (Subtarget->hasMips64())
- return lowerFCOPYSIGN64(Op, DAG, Subtarget->hasMips32r2());
+ return lowerFCOPYSIGN64(Op, DAG, Subtarget->hasExtractInsert());
- return lowerFCOPYSIGN32(Op, DAG, Subtarget->hasMips32r2());
+ return lowerFCOPYSIGN32(Op, DAG, Subtarget->hasExtractInsert());
}
-static SDValue lowerFABS32(SDValue Op, SelectionDAG &DAG, bool HasR2) {
+static SDValue lowerFABS32(SDValue Op, SelectionDAG &DAG,
+ bool HasExtractInsert) {
SDValue Res, Const1 = DAG.getConstant(1, MVT::i32);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
// If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
// to i32.
@@ -1717,7 +1733,7 @@ static SDValue lowerFABS32(SDValue Op, SelectionDAG &DAG, bool HasR2) {
Const1);
// Clear MSB.
- if (HasR2)
+ if (HasExtractInsert)
Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32,
DAG.getRegister(Mips::ZERO, MVT::i32),
DAG.getConstant(31, MVT::i32), Const1, X);
@@ -1734,15 +1750,16 @@ static SDValue lowerFABS32(SDValue Op, SelectionDAG &DAG, bool HasR2) {
return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
}
-static SDValue lowerFABS64(SDValue Op, SelectionDAG &DAG, bool HasR2) {
+static SDValue lowerFABS64(SDValue Op, SelectionDAG &DAG,
+ bool HasExtractInsert) {
SDValue Res, Const1 = DAG.getConstant(1, MVT::i32);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
// Bitcast to integer node.
SDValue X = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(0));
// Clear MSB.
- if (HasR2)
+ if (HasExtractInsert)
Res = DAG.getNode(MipsISD::Ins, DL, MVT::i64,
DAG.getRegister(Mips::ZERO_64, MVT::i64),
DAG.getConstant(63, MVT::i32), Const1, X);
@@ -1757,9 +1774,9 @@ static SDValue lowerFABS64(SDValue Op, SelectionDAG &DAG, bool HasR2) {
SDValue
MipsTargetLowering::lowerFABS(SDValue Op, SelectionDAG &DAG) const {
if (Subtarget->hasMips64() && (Op.getValueType() == MVT::f64))
- return lowerFABS64(Op, DAG, Subtarget->hasMips32r2());
+ return lowerFABS64(Op, DAG, Subtarget->hasExtractInsert());
- return lowerFABS32(Op, DAG, Subtarget->hasMips32r2());
+ return lowerFABS32(Op, DAG, Subtarget->hasExtractInsert());
}
SDValue MipsTargetLowering::
@@ -1771,7 +1788,7 @@ lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
MFI->setFrameAddressIsTaken(true);
EVT VT = Op.getValueType();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
IsN64 ? Mips::FP_64 : Mips::FP, VT);
return FrameAddr;
@@ -1791,7 +1808,7 @@ SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
// Return RA, which contains the return address. Mark it an implicit live-in.
unsigned Reg = MF.addLiveIn(RA, getRegClassFor(VT));
- return DAG.getCopyFromReg(DAG.getEntryNode(), Op.getDebugLoc(), Reg, VT);
+ return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), Reg, VT);
}
// An EH_RETURN is the result of lowering llvm.eh.return which in turn is
@@ -1807,7 +1824,7 @@ SDValue MipsTargetLowering::lowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
SDValue Chain = Op.getOperand(0);
SDValue Offset = Op.getOperand(1);
SDValue Handler = Op.getOperand(2);
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT Ty = IsN64 ? MVT::i64 : MVT::i32;
// Store stack offset in V1, store jump target in V0. Glue CopyToReg and
@@ -1827,14 +1844,14 @@ SDValue MipsTargetLowering::lowerATOMIC_FENCE(SDValue Op,
// FIXME: Need pseudo-fence for 'singlethread' fences
// FIXME: Set SType for weaker fences where supported/appropriate.
unsigned SType = 0;
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
return DAG.getNode(MipsISD::Sync, DL, MVT::Other, Op.getOperand(0),
DAG.getConstant(SType, MVT::i32));
}
SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
SDValue Shamt = Op.getOperand(2);
@@ -1865,7 +1882,7 @@ SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
bool IsSRA) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
SDValue Shamt = Op.getOperand(2);
@@ -1909,7 +1926,7 @@ static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD,
SDValue Ptr = LD->getBasePtr();
EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
EVT BasePtrVT = Ptr.getValueType();
- DebugLoc DL = LD->getDebugLoc();
+ SDLoc DL(LD);
SDVTList VTList = DAG.getVTList(VT, MVT::Other);
if (Offset)
@@ -1975,7 +1992,7 @@ SDValue MipsTargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
// (set tmp1, (lwr baseptr, tmp0))
// (set tmp2, (shl tmp1, 32))
// (set dst, (srl tmp2, 32))
- DebugLoc DL = LD->getDebugLoc();
+ SDLoc DL(LD);
SDValue Const32 = DAG.getConstant(32, MVT::i32);
SDValue SLL = DAG.getNode(ISD::SHL, DL, MVT::i64, LWR, Const32);
SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i64, SLL, Const32);
@@ -1987,7 +2004,7 @@ static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD,
SDValue Chain, unsigned Offset) {
SDValue Ptr = SD->getBasePtr(), Value = SD->getValue();
EVT MemVT = SD->getMemoryVT(), BasePtrVT = Ptr.getValueType();
- DebugLoc DL = SD->getDebugLoc();
+ SDLoc DL(SD);
SDVTList VTList = DAG.getVTList(MVT::Other);
if (Offset)
@@ -2000,16 +2017,8 @@ static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD,
}
// Expand an unaligned 32 or 64-bit integer store node.
-SDValue MipsTargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
- StoreSDNode *SD = cast<StoreSDNode>(Op);
- EVT MemVT = SD->getMemoryVT();
-
- // Return if store is aligned or if MemVT is neither i32 nor i64.
- if ((SD->getAlignment() >= MemVT.getSizeInBits() / 8) ||
- ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
- return SDValue();
-
- bool IsLittle = Subtarget->isLittle();
+static SDValue lowerUnalignedIntStore(StoreSDNode *SD, SelectionDAG &DAG,
+ bool IsLittle) {
SDValue Value = SD->getValue(), Chain = SD->getChain();
EVT VT = Value.getValueType();
@@ -2036,6 +2045,34 @@ SDValue MipsTargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
return createStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7);
}
+// Lower (store (fp_to_sint $fp) $ptr) to (store (TruncIntFP $fp), $ptr).
+static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG) {
+ SDValue Val = SD->getValue();
+
+ if (Val.getOpcode() != ISD::FP_TO_SINT)
+ return SDValue();
+
+ EVT FPTy = EVT::getFloatingPointVT(Val.getValueSizeInBits());
+ SDValue Tr = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Val), FPTy,
+ Val.getOperand(0));
+
+ return DAG.getStore(SD->getChain(), SDLoc(SD), Tr, SD->getBasePtr(),
+ SD->getPointerInfo(), SD->isVolatile(),
+ SD->isNonTemporal(), SD->getAlignment());
+}
+
+SDValue MipsTargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
+ StoreSDNode *SD = cast<StoreSDNode>(Op);
+ EVT MemVT = SD->getMemoryVT();
+
+ // Lower unaligned integer stores.
+ if ((SD->getAlignment() < MemVT.getSizeInBits() / 8) &&
+ ((MemVT == MVT::i32) || (MemVT == MVT::i64)))
+ return lowerUnalignedIntStore(SD, DAG, Subtarget->isLittle());
+
+ return lowerFP_TO_SINT_STORE(SD, DAG);
+}
+
SDValue MipsTargetLowering::lowerADD(SDValue Op, SelectionDAG &DAG) const {
if (Op->getOperand(0).getOpcode() != ISD::FRAMEADDR
|| cast<ConstantSDNode>
@@ -2053,10 +2090,18 @@ SDValue MipsTargetLowering::lowerADD(SDValue Op, SelectionDAG &DAG) const {
EVT ValTy = Op->getValueType(0);
int FI = MFI->CreateFixedObject(Op.getValueSizeInBits() / 8, 0, false);
SDValue InArgsAddr = DAG.getFrameIndex(FI, ValTy);
- return DAG.getNode(ISD::ADD, Op->getDebugLoc(), ValTy, InArgsAddr,
+ return DAG.getNode(ISD::ADD, SDLoc(Op), ValTy, InArgsAddr,
DAG.getConstant(0, ValTy));
}
+SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
+ SelectionDAG &DAG) const {
+ EVT FPTy = EVT::getFloatingPointVT(Op.getValueSizeInBits());
+ SDValue Trunc = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Op), FPTy,
+ Op.getOperand(0));
+ return DAG.getNode(ISD::BITCAST, SDLoc(Op), Op.getValueType(), Trunc);
+}
+
//===----------------------------------------------------------------------===//
// Calling Convention Implementation
//===----------------------------------------------------------------------===//
@@ -2076,21 +2121,14 @@ SDValue MipsTargetLowering::lowerADD(SDValue Op, SelectionDAG &DAG) const {
// For vararg functions, all arguments are passed in A0, A1, A2, A3 and stack.
//===----------------------------------------------------------------------===//
-static bool CC_MipsO32(unsigned ValNo, MVT ValVT,
- MVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State) {
+static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
+ CCState &State, const uint16_t *F64Regs) {
- static const unsigned IntRegsSize=4, FloatRegsSize=2;
+ static const unsigned IntRegsSize = 4, FloatRegsSize = 2;
- static const uint16_t IntRegs[] = {
- Mips::A0, Mips::A1, Mips::A2, Mips::A3
- };
- static const uint16_t F32Regs[] = {
- Mips::F12, Mips::F14
- };
- static const uint16_t F64Regs[] = {
- Mips::D6, Mips::D7
- };
+ static const uint16_t IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
+ static const uint16_t F32Regs[] = { Mips::F12, Mips::F14 };
// Do not process byval args here.
if (ArgFlags.isByVal())
@@ -2159,14 +2197,28 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT,
return false;
}
+static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+ static const uint16_t F64Regs[] = { Mips::D6, Mips::D7 };
+
+ return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
+}
+
+static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+ static const uint16_t F64Regs[] = { Mips::D12_64, Mips::D14_64 };
+
+ return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
+}
+
#include "MipsGenCallingConv.inc"
//===----------------------------------------------------------------------===//
// Call Calling Convention Implementation
//===----------------------------------------------------------------------===//
-static const unsigned O32IntRegsSize = 4;
-
// Return next O32 integer argument register.
static unsigned getNextIntArgReg(unsigned Reg) {
assert((Reg == Mips::A0) || (Reg == Mips::A2));
@@ -2175,7 +2227,7 @@ static unsigned getNextIntArgReg(unsigned Reg) {
SDValue
MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,
- SDValue Chain, SDValue Arg, DebugLoc DL,
+ SDValue Chain, SDValue Arg, SDLoc DL,
bool IsTailCall, SelectionDAG &DAG) const {
if (!IsTailCall) {
SDValue PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr,
@@ -2229,6 +2281,15 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
const uint32_t *Mask = TRI->getCallPreservedMask(CLI.CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
+ if (Subtarget->inMips16HardFloat()) {
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(CLI.Callee)) {
+ llvm::StringRef Sym = G->getGlobal()->getName();
+ Function *F = G->getGlobal()->getParent()->getFunction(Sym);
+ if (F->hasFnAttribute("__Mips16RetHelper")) {
+ Mask = MipsRegisterInfo::getMips16RetHelperMask();
+ }
+ }
+ }
Ops.push_back(CLI.DAG.getRegisterMask(Mask));
if (InFlag.getNode())
@@ -2241,10 +2302,10 @@ SDValue
MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
- DebugLoc &DL = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDLoc DL = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &IsTailCall = CLI.IsTailCall;
@@ -2254,16 +2315,20 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
const TargetFrameLowering *TFL = MF.getTarget().getFrameLowering();
+ MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_;
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
getTargetMachine(), ArgLocs, *DAG.getContext());
- MipsCC MipsCCInfo(CallConv, IsO32, CCInfo);
+ MipsCC::SpecialCallingConvType SpecialCallingConv =
+ getSpecialCallingConv(Callee);
+ MipsCC MipsCCInfo(CallConv, IsO32, Subtarget->isFP64bit(), CCInfo,
+ SpecialCallingConv);
MipsCCInfo.analyzeCallOperands(Outs, IsVarArg,
- getTargetMachine().Options.UseSoftFloat,
+ Subtarget->mipsSEUsesSoftFloat(),
Callee.getNode(), CLI.Args);
// Get a count of how many bytes are to be pushed on the stack.
@@ -2286,7 +2351,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, true);
if (!IsTailCall)
- Chain = DAG.getCALLSEQ_START(Chain, NextStackOffsetVal);
+ Chain = DAG.getCALLSEQ_START(Chain, NextStackOffsetVal, DL);
SDValue StackPtr = DAG.getCopyFromReg(Chain, DL,
IsN64 ? Mips::SP_64 : Mips::SP,
@@ -2380,32 +2445,40 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
bool IsPICCall = (IsN64 || IsPIC); // true if calls are translated to jalr $25
bool GlobalOrExternal = false, InternalLinkage = false;
SDValue CalleeLo;
+ EVT Ty = Callee.getValueType();
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
if (IsPICCall) {
- InternalLinkage = G->getGlobal()->hasInternalLinkage();
+ const GlobalValue *Val = G->getGlobal();
+ InternalLinkage = Val->hasInternalLinkage();
if (InternalLinkage)
- Callee = getAddrLocal(Callee, DAG, HasMips64);
+ Callee = getAddrLocal(G, Ty, DAG, HasMips64);
else if (LargeGOT)
- Callee = getAddrGlobalLargeGOT(Callee, DAG, MipsII::MO_CALL_HI16,
- MipsII::MO_CALL_LO16);
+ Callee = getAddrGlobalLargeGOT(G, Ty, DAG, MipsII::MO_CALL_HI16,
+ MipsII::MO_CALL_LO16, Chain,
+ FuncInfo->callPtrInfo(Val));
else
- Callee = getAddrGlobal(Callee, DAG, MipsII::MO_GOT_CALL);
+ Callee = getAddrGlobal(G, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
+ FuncInfo->callPtrInfo(Val));
} else
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, getPointerTy(), 0,
MipsII::MO_NO_FLAG);
GlobalOrExternal = true;
}
else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ const char *Sym = S->getSymbol();
+
if (!IsN64 && !IsPIC) // !N64 && static
- Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
+ Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(),
MipsII::MO_NO_FLAG);
else if (LargeGOT)
- Callee = getAddrGlobalLargeGOT(Callee, DAG, MipsII::MO_CALL_HI16,
- MipsII::MO_CALL_LO16);
+ Callee = getAddrGlobalLargeGOT(S, Ty, DAG, MipsII::MO_CALL_HI16,
+ MipsII::MO_CALL_LO16, Chain,
+ FuncInfo->callPtrInfo(Sym));
else // N64 || PIC
- Callee = getAddrGlobal(Callee, DAG, MipsII::MO_GOT_CALL);
+ Callee = getAddrGlobal(S, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
+ FuncInfo->callPtrInfo(Sym));
GlobalOrExternal = true;
}
@@ -2424,7 +2497,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Create the CALLSEQ_END node.
Chain = DAG.getCALLSEQ_END(Chain, NextStackOffsetVal,
- DAG.getIntPtrConstant(0, true), InFlag);
+ DAG.getIntPtrConstant(0, true), InFlag, DL);
InFlag = Chain.getValue(1);
// Handle result values, copying them out of physregs into vregs that we
@@ -2439,7 +2512,7 @@ SDValue
MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc DL, SelectionDAG &DAG,
+ SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
const SDNode *CallNode,
const Type *RetTy) const {
@@ -2447,9 +2520,9 @@ MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
getTargetMachine(), RVLocs, *DAG.getContext());
- MipsCC MipsCCInfo(CallConv, IsO32, CCInfo);
+ MipsCC MipsCCInfo(CallConv, IsO32, Subtarget->isFP64bit(), CCInfo);
- MipsCCInfo.analyzeCallResult(Ins, getTargetMachine().Options.UseSoftFloat,
+ MipsCCInfo.analyzeCallResult(Ins, Subtarget->mipsSEUsesSoftFloat(),
CallNode, RetTy);
// Copy all of the result registers out of their specified physreg.
@@ -2478,7 +2551,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv,
bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc DL, SelectionDAG &DAG,
+ SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals)
const {
MachineFunction &MF = DAG.getMachineFunction();
@@ -2494,10 +2567,10 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
getTargetMachine(), ArgLocs, *DAG.getContext());
- MipsCC MipsCCInfo(CallConv, IsO32, CCInfo);
+ MipsCC MipsCCInfo(CallConv, IsO32, Subtarget->isFP64bit(), CCInfo);
Function::const_arg_iterator FuncArg =
DAG.getMachineFunction().getFunction()->arg_begin();
- bool UseSoftFloat = getTargetMachine().Options.UseSoftFloat;
+ bool UseSoftFloat = Subtarget->mipsSEUsesSoftFloat();
MipsCCInfo.analyzeFormalArguments(Ins, UseSoftFloat, FuncArg);
MipsFI->setFormalArgInfo(CCInfo.getNextStackOffset(),
@@ -2526,21 +2599,9 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
// Arguments stored on registers
if (IsRegLoc) {
- EVT RegVT = VA.getLocVT();
+ MVT RegVT = VA.getLocVT();
unsigned ArgReg = VA.getLocReg();
- const TargetRegisterClass *RC;
-
- if (RegVT == MVT::i32)
- RC = Subtarget->inMips16Mode()? &Mips::CPU16RegsRegClass :
- &Mips::CPURegsRegClass;
- else if (RegVT == MVT::i64)
- RC = &Mips::CPU64RegsRegClass;
- else if (RegVT == MVT::f32)
- RC = &Mips::FGR32RegClass;
- else if (RegVT == MVT::f64)
- RC = HasMips64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
- else
- llvm_unreachable("RegVT not supported by FormalArguments Lowering");
+ const TargetRegisterClass *RC = getRegClassFor(RegVT);
// Transform the arguments stored on
// physical registers into virtual ones
@@ -2590,9 +2651,11 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
// Create load nodes to retrieve arguments from the stack
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
- InVals.push_back(DAG.getLoad(ValVT, DL, Chain, FIN,
- MachinePointerInfo::getFixedStack(FI),
- false, false, false, 0));
+ SDValue Load = DAG.getLoad(ValVT, DL, Chain, FIN,
+ MachinePointerInfo::getFixedStack(FI),
+ false, false, false, 0);
+ InVals.push_back(Load);
+ OutChains.push_back(Load.getValue(1));
}
}
@@ -2644,7 +2707,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc DL, SelectionDAG &DAG) const {
+ SDLoc DL, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of
// the return value to a location
SmallVector<CCValAssign, 16> RVLocs;
@@ -2653,10 +2716,10 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
// CCState - Info about the registers and stack slot.
CCState CCInfo(CallConv, IsVarArg, MF, getTargetMachine(), RVLocs,
*DAG.getContext());
- MipsCC MipsCCInfo(CallConv, IsO32, CCInfo);
+ MipsCC MipsCCInfo(CallConv, IsO32, Subtarget->isFP64bit(), CCInfo);
// Analyze return values.
- MipsCCInfo.analyzeReturn(Outs, getTargetMachine().Options.UseSoftFloat,
+ MipsCCInfo.analyzeReturn(Outs, Subtarget->mipsSEUsesSoftFloat(),
MF.getFunction()->getReturnType());
SDValue Flag;
@@ -2715,7 +2778,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
MipsTargetLowering::ConstraintType MipsTargetLowering::
getConstraintType(const std::string &Constraint) const
{
- // Mips specific constrainy
+ // Mips specific constraints
// GCC config/mips/constraints.md
//
// 'd' : An address register. Equivalent to r
@@ -2766,16 +2829,19 @@ MipsTargetLowering::getSingleConstraintMatchWeight(
if (type->isIntegerTy())
weight = CW_Register;
break;
- case 'f':
- if (type->isFloatTy())
+ case 'f': // FPU or MSA register
+ if (Subtarget->hasMSA() && type->isVectorTy() &&
+ cast<VectorType>(type)->getBitWidth() == 128)
+ weight = CW_Register;
+ else if (type->isFloatTy())
weight = CW_Register;
break;
case 'c': // $25 for indirect jumps
case 'l': // lo register
case 'x': // hilo register pair
- if (type->isIntegerTy())
+ if (type->isIntegerTy())
weight = CW_SpecificReg;
- break;
+ break;
case 'I': // signed 16 bit immediate
case 'J': // integer zero
case 'K': // unsigned 16 bit immediate
@@ -2793,11 +2859,109 @@ MipsTargetLowering::getSingleConstraintMatchWeight(
return weight;
}
+/// This is a helper function to parse a physical register string and split it
+/// into non-numeric and numeric parts (Prefix and Reg). The first boolean flag
+/// that is returned indicates whether parsing was successful. The second flag
+/// is true if the numeric part exists.
+static std::pair<bool, bool>
+parsePhysicalReg(const StringRef &C, std::string &Prefix,
+ unsigned long long &Reg) {
+ if (C.front() != '{' || C.back() != '}')
+ return std::make_pair(false, false);
+
+ // Search for the first numeric character.
+ StringRef::const_iterator I, B = C.begin() + 1, E = C.end() - 1;
+ I = std::find_if(B, E, std::ptr_fun(isdigit));
+
+ Prefix.assign(B, I - B);
+
+ // The second flag is set to false if no numeric characters were found.
+ if (I == E)
+ return std::make_pair(true, false);
+
+ // Parse the numeric characters.
+ return std::make_pair(!getAsUnsignedInteger(StringRef(I, E - I), 10, Reg),
+ true);
+}
+
+std::pair<unsigned, const TargetRegisterClass *> MipsTargetLowering::
+parseRegForInlineAsmConstraint(const StringRef &C, MVT VT) const {
+ const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const TargetRegisterClass *RC;
+ std::string Prefix;
+ unsigned long long Reg;
+
+ std::pair<bool, bool> R = parsePhysicalReg(C, Prefix, Reg);
+
+ if (!R.first)
+ return std::make_pair((unsigned)0, (const TargetRegisterClass*)0);
+
+ if ((Prefix == "hi" || Prefix == "lo")) { // Parse hi/lo.
+ // No numeric characters follow "hi" or "lo".
+ if (R.second)
+ return std::make_pair((unsigned)0, (const TargetRegisterClass*)0);
+
+ RC = TRI->getRegClass(Prefix == "hi" ?
+ Mips::HI32RegClassID : Mips::LO32RegClassID);
+ return std::make_pair(*(RC->begin()), RC);
+ } else if (Prefix.compare(0, 4, "$msa") == 0) {
+ // Parse $msa(ir|csr|access|save|modify|request|map|unmap)
+
+ // No numeric characters follow the name.
+ if (R.second)
+ return std::make_pair((unsigned)0, (const TargetRegisterClass *)0);
+
+ Reg = StringSwitch<unsigned long long>(Prefix)
+ .Case("$msair", Mips::MSAIR)
+ .Case("$msacsr", Mips::MSACSR)
+ .Case("$msaaccess", Mips::MSAAccess)
+ .Case("$msasave", Mips::MSASave)
+ .Case("$msamodify", Mips::MSAModify)
+ .Case("$msarequest", Mips::MSARequest)
+ .Case("$msamap", Mips::MSAMap)
+ .Case("$msaunmap", Mips::MSAUnmap)
+ .Default(0);
+
+ if (!Reg)
+ return std::make_pair((unsigned)0, (const TargetRegisterClass *)0);
+
+ RC = TRI->getRegClass(Mips::MSACtrlRegClassID);
+ return std::make_pair(Reg, RC);
+ }
+
+ if (!R.second)
+ return std::make_pair((unsigned)0, (const TargetRegisterClass*)0);
+
+ if (Prefix == "$f") { // Parse $f0-$f31.
+ // If the size of FP registers is 64-bit or Reg is an even number, select
+ // the 64-bit register class. Otherwise, select the 32-bit register class.
+ if (VT == MVT::Other)
+ VT = (Subtarget->isFP64bit() || !(Reg % 2)) ? MVT::f64 : MVT::f32;
+
+ RC = getRegClassFor(VT);
+
+ if (RC == &Mips::AFGR64RegClass) {
+ assert(Reg % 2 == 0);
+ Reg >>= 1;
+ }
+ } else if (Prefix == "$fcc") // Parse $fcc0-$fcc7.
+ RC = TRI->getRegClass(Mips::FCCRegClassID);
+ else if (Prefix == "$w") { // Parse $w0-$w31.
+ RC = getRegClassFor((VT == MVT::Other) ? MVT::v16i8 : VT);
+ } else { // Parse $0-$31.
+ assert(Prefix == "$");
+ RC = getRegClassFor((VT == MVT::Other) ? MVT::i32 : VT);
+ }
+
+ assert(Reg < RC->getNumRegs());
+ return std::make_pair(*(RC->begin() + Reg), RC);
+}
+
/// Given a register class constraint, like 'r', if this corresponds directly
/// to an LLVM register class, return a register of 0 and the register class
/// pointer.
std::pair<unsigned, const TargetRegisterClass*> MipsTargetLowering::
-getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const
+getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
{
if (Constraint.size() == 1) {
switch (Constraint[0]) {
@@ -2807,18 +2971,26 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const
if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
if (Subtarget->inMips16Mode())
return std::make_pair(0U, &Mips::CPU16RegsRegClass);
- return std::make_pair(0U, &Mips::CPURegsRegClass);
+ return std::make_pair(0U, &Mips::GPR32RegClass);
}
if (VT == MVT::i64 && !HasMips64)
- return std::make_pair(0U, &Mips::CPURegsRegClass);
+ return std::make_pair(0U, &Mips::GPR32RegClass);
if (VT == MVT::i64 && HasMips64)
- return std::make_pair(0U, &Mips::CPU64RegsRegClass);
+ return std::make_pair(0U, &Mips::GPR64RegClass);
// This will generate an error message
return std::make_pair(0u, static_cast<const TargetRegisterClass*>(0));
- case 'f':
- if (VT == MVT::f32)
+ case 'f': // FPU or MSA register
+ if (VT == MVT::v16i8)
+ return std::make_pair(0U, &Mips::MSA128BRegClass);
+ else if (VT == MVT::v8i16 || VT == MVT::v8f16)
+ return std::make_pair(0U, &Mips::MSA128HRegClass);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return std::make_pair(0U, &Mips::MSA128WRegClass);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return std::make_pair(0U, &Mips::MSA128DRegClass);
+ else if (VT == MVT::f32)
return std::make_pair(0U, &Mips::FGR32RegClass);
- if ((VT == MVT::f64) && (!Subtarget->isSingleFloat())) {
+ else if ((VT == MVT::f64) && (!Subtarget->isSingleFloat())) {
if (Subtarget->isFP64bit())
return std::make_pair(0U, &Mips::FGR64RegClass);
return std::make_pair(0U, &Mips::AFGR64RegClass);
@@ -2826,19 +2998,26 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const
break;
case 'c': // register suitable for indirect jump
if (VT == MVT::i32)
- return std::make_pair((unsigned)Mips::T9, &Mips::CPURegsRegClass);
+ return std::make_pair((unsigned)Mips::T9, &Mips::GPR32RegClass);
assert(VT == MVT::i64 && "Unexpected type.");
- return std::make_pair((unsigned)Mips::T9_64, &Mips::CPU64RegsRegClass);
+ return std::make_pair((unsigned)Mips::T9_64, &Mips::GPR64RegClass);
case 'l': // register suitable for indirect jump
if (VT == MVT::i32)
- return std::make_pair((unsigned)Mips::LO, &Mips::LORegsRegClass);
- return std::make_pair((unsigned)Mips::LO64, &Mips::LORegs64RegClass);
+ return std::make_pair((unsigned)Mips::LO0, &Mips::LO32RegClass);
+ return std::make_pair((unsigned)Mips::LO0_64, &Mips::LO64RegClass);
case 'x': // register suitable for indirect jump
// Fixme: Not triggering the use of both hi and low
// This will generate an error message
return std::make_pair(0u, static_cast<const TargetRegisterClass*>(0));
}
}
+
+ std::pair<unsigned, const TargetRegisterClass *> R;
+ R = parseRegForInlineAsmConstraint(Constraint, VT);
+
+ if (R.second)
+ return R;
+
return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
}
@@ -2937,8 +3116,8 @@ void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}
-bool
-MipsTargetLowering::isLegalAddressingMode(const AddrMode &AM, Type *Ty) const {
+bool MipsTargetLowering::isLegalAddressingMode(const AddrMode &AM,
+ Type *Ty) const {
// No global is ever allowed as a base.
if (AM.BaseGV)
return false;
@@ -3002,13 +3181,13 @@ static bool isF128SoftLibCall(const char *CallSym) {
"log10l", "log2l", "logl", "nearbyintl", "powl", "rintl", "sinl", "sqrtl",
"truncl"};
- const char * const *End = LibCalls + array_lengthof(LibCalls);
+ const char *const *End = LibCalls + array_lengthof(LibCalls);
// Check that LibCalls is sorted alphabetically.
MipsTargetLowering::LTStr Comp;
#ifndef NDEBUG
- for (const char * const *I = LibCalls; I < End - 1; ++I)
+ for (const char *const *I = LibCalls; I < End - 1; ++I)
assert(Comp(*I, *(I + 1)));
#endif
@@ -3029,13 +3208,32 @@ static bool originalTypeIsF128(const Type *Ty, const SDNode *CallNode) {
return (ES && Ty->isIntegerTy(128) && isF128SoftLibCall(ES->getSymbol()));
}
-MipsTargetLowering::MipsCC::MipsCC(CallingConv::ID CC, bool IsO32_,
- CCState &Info)
- : CCInfo(Info), CallConv(CC), IsO32(IsO32_) {
+MipsTargetLowering::MipsCC::SpecialCallingConvType
+ MipsTargetLowering::getSpecialCallingConv(SDValue Callee) const {
+ MipsCC::SpecialCallingConvType SpecialCallingConv =
+ MipsCC::NoSpecialCallingConv;;
+ if (Subtarget->inMips16HardFloat()) {
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ llvm::StringRef Sym = G->getGlobal()->getName();
+ Function *F = G->getGlobal()->getParent()->getFunction(Sym);
+ if (F->hasFnAttribute("__Mips16RetHelper")) {
+ SpecialCallingConv = MipsCC::Mips16RetHelperConv;
+ }
+ }
+ }
+ return SpecialCallingConv;
+}
+
+MipsTargetLowering::MipsCC::MipsCC(
+ CallingConv::ID CC, bool IsO32_, bool IsFP64_, CCState &Info,
+ MipsCC::SpecialCallingConvType SpecialCallingConv_)
+ : CCInfo(Info), CallConv(CC), IsO32(IsO32_), IsFP64(IsFP64_),
+ SpecialCallingConv(SpecialCallingConv_){
// Pre-allocate reserved argument area.
CCInfo.AllocateStack(reservedArgArea(), 1);
}
+
void MipsTargetLowering::MipsCC::
analyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Args,
bool IsVarArg, bool IsSoftFloat, const SDNode *CallNode,
@@ -3143,11 +3341,10 @@ analyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsSoftFloat,
analyzeReturn(Outs, IsSoftFloat, 0, RetTy);
}
-void
-MipsTargetLowering::MipsCC::handleByValArg(unsigned ValNo, MVT ValVT,
- MVT LocVT,
- CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags) {
+void MipsTargetLowering::MipsCC::handleByValArg(unsigned ValNo, MVT ValVT,
+ MVT LocVT,
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags) {
assert(ArgFlags.getByValSize() && "Byval argument's size shouldn't be 0.");
struct ByValArgInfo ByVal;
@@ -3183,11 +3380,13 @@ llvm::CCAssignFn *MipsTargetLowering::MipsCC::fixedArgFn() const {
if (CallConv == CallingConv::Fast)
return CC_Mips_FastCC;
- return IsO32 ? CC_MipsO32 : CC_MipsN;
+ if (SpecialCallingConv == Mips16RetHelperConv)
+ return CC_Mips16RetHelper;
+ return IsO32 ? (IsFP64 ? CC_MipsO32_FP64 : CC_MipsO32_FP32) : CC_MipsN;
}
llvm::CCAssignFn *MipsTargetLowering::MipsCC::varArgFn() const {
- return IsO32 ? CC_MipsO32 : CC_MipsN_VarArg;
+ return IsO32 ? (IsFP64 ? CC_MipsO32_FP64 : CC_MipsO32_FP32) : CC_MipsN_VarArg;
}
const uint16_t *MipsTargetLowering::MipsCC::shadowRegs() const {
@@ -3233,7 +3432,7 @@ MVT MipsTargetLowering::MipsCC::getRegVT(MVT VT, const Type *OrigTy,
}
void MipsTargetLowering::
-copyByValRegs(SDValue Chain, DebugLoc DL, std::vector<SDValue> &OutChains,
+copyByValRegs(SDValue Chain, SDLoc DL, std::vector<SDValue> &OutChains,
SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags,
SmallVectorImpl<SDValue> &InVals, const Argument *FuncArg,
const MipsCC &CC, const ByValArgInfo &ByVal) const {
@@ -3277,9 +3476,9 @@ copyByValRegs(SDValue Chain, DebugLoc DL, std::vector<SDValue> &OutChains,
// Copy byVal arg to registers and stack.
void MipsTargetLowering::
-passByValArg(SDValue Chain, DebugLoc DL,
+passByValArg(SDValue Chain, SDLoc DL,
std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
- SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr,
+ SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
const MipsCC &CC, const ByValArgInfo &ByVal,
const ISD::ArgFlagsTy &Flags, bool isLittle) const {
@@ -3365,17 +3564,15 @@ passByValArg(SDValue Chain, DebugLoc DL,
DAG.getConstant(Offset, PtrTy));
SDValue Dst = DAG.getNode(ISD::ADD, DL, PtrTy, StackPtr,
DAG.getIntPtrConstant(ByVal.Address));
- Chain = DAG.getMemcpy(Chain, DL, Dst, Src,
- DAG.getConstant(MemCpySize, PtrTy), Alignment,
- /*isVolatile=*/false, /*AlwaysInline=*/false,
+ Chain = DAG.getMemcpy(Chain, DL, Dst, Src, DAG.getConstant(MemCpySize, PtrTy),
+ Alignment, /*isVolatile=*/false, /*AlwaysInline=*/false,
MachinePointerInfo(0), MachinePointerInfo(0));
MemOpChains.push_back(Chain);
}
-void
-MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
- const MipsCC &CC, SDValue Chain,
- DebugLoc DL, SelectionDAG &DAG) const {
+void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
+ const MipsCC &CC, SDValue Chain,
+ SDLoc DL, SelectionDAG &DAG) const {
unsigned NumRegs = CC.numIntArgRegs();
const uint16_t *ArgRegs = CC.intArgRegs();
const CCState &CCInfo = CC.getCCInfo();
@@ -3393,8 +3590,7 @@ MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
if (NumRegs == Idx)
VaArgOffset = RoundUpToAlignment(CCInfo.getNextStackOffset(), RegSize);
else
- VaArgOffset =
- (int)CC.reservedArgArea() - (int)(RegSize * (NumRegs - Idx));
+ VaArgOffset = (int)CC.reservedArgArea() - (int)(RegSize * (NumRegs - Idx));
// Record the frame index of the first variable argument
// which is a value necessary to VASTART.
diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h
index 5587e8f58147..65f68f04315d 100644
--- a/lib/Target/Mips/MipsISelLowering.h
+++ b/lib/Target/Mips/MipsISelLowering.h
@@ -17,6 +17,7 @@
#include "Mips.h"
#include "MipsSubtarget.h"
+#include "MCTargetDesc/MipsBaseInfo.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/IR/Function.h"
@@ -60,8 +61,8 @@ namespace llvm {
CMovFP_T,
CMovFP_F,
- // Floating Point Rounding
- FPRound,
+ // FP-to-int truncation node.
+ TruncIntFP,
// Return
Ret,
@@ -69,10 +70,11 @@ namespace llvm {
EH_RETURN,
// Node used to extract integer from accumulator.
- ExtractLOHI,
+ MFHI,
+ MFLO,
// Node used to insert integers to accumulator.
- InsertLOHI,
+ MTLOHI,
// Mult nodes.
Mult,
@@ -152,6 +154,43 @@ namespace llvm {
SETCC_DSP,
SELECT_CC_DSP,
+ // Vector comparisons.
+ // These take a vector and return a boolean.
+ VALL_ZERO,
+ VANY_ZERO,
+ VALL_NONZERO,
+ VANY_NONZERO,
+
+ // These take a vector and return a vector bitmask.
+ VCEQ,
+ VCLE_S,
+ VCLE_U,
+ VCLT_S,
+ VCLT_U,
+
+ // Element-wise vector max/min.
+ VSMAX,
+ VSMIN,
+ VUMAX,
+ VUMIN,
+
+ // Vector Shuffle with mask as an operand
+ VSHF, // Generic shuffle
+ SHF, // 4-element set shuffle.
+ ILVEV, // Interleave even elements
+ ILVOD, // Interleave odd elements
+ ILVL, // Interleave left elements
+ ILVR, // Interleave right elements
+ PCKEV, // Pack even elements
+ PCKOD, // Pack odd elements
+
+ // Combined (XOR (OR $a, $b), -1)
+ VNOR,
+
+ // Extended vector element extraction
+ VEXTRACT_SEXT_ELT,
+ VEXTRACT_ZEXT_ELT,
+
// Load/Store Left/Right nodes.
LWL = ISD::FIRST_TARGET_MEMORY_OPCODE,
LWR,
@@ -195,7 +234,7 @@ namespace llvm {
virtual const char *getTargetNodeName(unsigned Opcode) const;
/// getSetCCResultType - get the ISD::SETCC result ValueType
- EVT getSetCCResultType(EVT VT) const;
+ EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
@@ -211,12 +250,72 @@ namespace llvm {
protected:
SDValue getGlobalReg(SelectionDAG &DAG, EVT Ty) const;
- SDValue getAddrLocal(SDValue Op, SelectionDAG &DAG, bool HasMips64) const;
-
- SDValue getAddrGlobal(SDValue Op, SelectionDAG &DAG, unsigned Flag) const;
-
- SDValue getAddrGlobalLargeGOT(SDValue Op, SelectionDAG &DAG,
- unsigned HiFlag, unsigned LoFlag) const;
+ // This method creates the following nodes, which are necessary for
+ // computing a local symbol's address:
+ //
+ // (add (load (wrapper $gp, %got(sym)), %lo(sym))
+ template<class NodeTy>
+ SDValue getAddrLocal(NodeTy *N, EVT Ty, SelectionDAG &DAG,
+ bool HasMips64) const {
+ SDLoc DL(N);
+ unsigned GOTFlag = HasMips64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT;
+ SDValue GOT = DAG.getNode(MipsISD::Wrapper, DL, Ty, getGlobalReg(DAG, Ty),
+ getTargetNode(N, Ty, DAG, GOTFlag));
+ SDValue Load = DAG.getLoad(Ty, DL, DAG.getEntryNode(), GOT,
+ MachinePointerInfo::getGOT(), false, false,
+ false, 0);
+ unsigned LoFlag = HasMips64 ? MipsII::MO_GOT_OFST : MipsII::MO_ABS_LO;
+ SDValue Lo = DAG.getNode(MipsISD::Lo, DL, Ty,
+ getTargetNode(N, Ty, DAG, LoFlag));
+ return DAG.getNode(ISD::ADD, DL, Ty, Load, Lo);
+ }
+
+ // This method creates the following nodes, which are necessary for
+ // computing a global symbol's address:
+ //
+ // (load (wrapper $gp, %got(sym)))
+ template<class NodeTy>
+ SDValue getAddrGlobal(NodeTy *N, EVT Ty, SelectionDAG &DAG,
+ unsigned Flag, SDValue Chain,
+ const MachinePointerInfo &PtrInfo) const {
+ SDLoc DL(N);
+ SDValue Tgt = DAG.getNode(MipsISD::Wrapper, DL, Ty, getGlobalReg(DAG, Ty),
+ getTargetNode(N, Ty, DAG, Flag));
+ return DAG.getLoad(Ty, DL, Chain, Tgt, PtrInfo, false, false, false, 0);
+ }
+
+ // This method creates the following nodes, which are necessary for
+ // computing a global symbol's address in large-GOT mode:
+ //
+ // (load (wrapper (add %hi(sym), $gp), %lo(sym)))
+ template<class NodeTy>
+ SDValue getAddrGlobalLargeGOT(NodeTy *N, EVT Ty, SelectionDAG &DAG,
+ unsigned HiFlag, unsigned LoFlag,
+ SDValue Chain,
+ const MachinePointerInfo &PtrInfo) const {
+ SDLoc DL(N);
+ SDValue Hi = DAG.getNode(MipsISD::Hi, DL, Ty,
+ getTargetNode(N, Ty, DAG, HiFlag));
+ Hi = DAG.getNode(ISD::ADD, DL, Ty, Hi, getGlobalReg(DAG, Ty));
+ SDValue Wrapper = DAG.getNode(MipsISD::Wrapper, DL, Ty, Hi,
+ getTargetNode(N, Ty, DAG, LoFlag));
+ return DAG.getLoad(Ty, DL, Chain, Wrapper, PtrInfo, false, false, false,
+ 0);
+ }
+
+ // This method creates the following nodes, which are necessary for
+ // computing a symbol's address in non-PIC mode:
+ //
+ // (add %hi(sym), %lo(sym))
+ template<class NodeTy>
+ SDValue getAddrNonPIC(NodeTy *N, EVT Ty, SelectionDAG &DAG) const {
+ SDLoc DL(N);
+ SDValue Hi = getTargetNode(N, Ty, DAG, MipsII::MO_ABS_HI);
+ SDValue Lo = getTargetNode(N, Ty, DAG, MipsII::MO_ABS_LO);
+ return DAG.getNode(ISD::ADD, DL, Ty,
+ DAG.getNode(MipsISD::Hi, DL, Ty, Hi),
+ DAG.getNode(MipsISD::Lo, DL, Ty, Lo));
+ }
/// This function fills Ops, which is the list of operands that will later
/// be used when a function call node is created. It also generates
@@ -240,7 +339,13 @@ namespace llvm {
/// arguments and inquire about calling convention information.
class MipsCC {
public:
- MipsCC(CallingConv::ID CallConv, bool IsO32, CCState &Info);
+ enum SpecialCallingConvType {
+ Mips16RetHelperConv, NoSpecialCallingConv
+ };
+
+ MipsCC(CallingConv::ID CallConv, bool IsO32, bool IsFP64, CCState &Info,
+ SpecialCallingConvType SpecialCallingConv = NoSpecialCallingConv);
+
void analyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
bool IsVarArg, bool IsSoftFloat,
@@ -275,7 +380,7 @@ namespace llvm {
/// Return pointer to array of integer argument registers.
const uint16_t *intArgRegs() const;
- typedef SmallVector<ByValArgInfo, 2>::const_iterator byval_iterator;
+ typedef SmallVectorImpl<ByValArgInfo>::const_iterator byval_iterator;
byval_iterator byval_begin() const { return ByValArgs.begin(); }
byval_iterator byval_end() const { return ByValArgs.end(); }
@@ -312,9 +417,13 @@ namespace llvm {
CCState &CCInfo;
CallingConv::ID CallConv;
- bool IsO32;
+ bool IsO32, IsFP64;
+ SpecialCallingConvType SpecialCallingConv;
SmallVector<ByValArgInfo, 2> ByValArgs;
};
+ protected:
+ SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const;
// Subtarget Info
const MipsSubtarget *Subtarget;
@@ -322,11 +431,32 @@ namespace llvm {
bool HasMips64, IsN64, IsO32;
private:
+ // Create a TargetGlobalAddress node.
+ SDValue getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
+ unsigned Flag) const;
+
+ // Create a TargetExternalSymbol node.
+ SDValue getTargetNode(ExternalSymbolSDNode *N, EVT Ty, SelectionDAG &DAG,
+ unsigned Flag) const;
+
+ // Create a TargetBlockAddress node.
+ SDValue getTargetNode(BlockAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
+ unsigned Flag) const;
+
+ // Create a TargetJumpTable node.
+ SDValue getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG,
+ unsigned Flag) const;
+
+ // Create a TargetConstantPool node.
+ SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG,
+ unsigned Flag) const;
+
+ MipsCC::SpecialCallingConvType getSpecialCallingConv(SDValue Callee) const;
// Lower Operand helpers
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
const SDNode *CallNode, const Type *RetTy) const;
@@ -351,9 +481,8 @@ namespace llvm {
SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG& DAG) const;
SDValue lowerShiftRightParts(SDValue Op, SelectionDAG& DAG,
bool IsSRA) const;
- SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const;
- SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerADD(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
/// isEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization.
@@ -365,7 +494,7 @@ namespace llvm {
/// copyByValArg - Copy argument registers which were used to pass a byval
/// argument to the stack. Create a stack frame object for the byval
/// argument.
- void copyByValRegs(SDValue Chain, DebugLoc DL,
+ void copyByValRegs(SDValue Chain, SDLoc DL,
std::vector<SDValue> &OutChains, SelectionDAG &DAG,
const ISD::ArgFlagsTy &Flags,
SmallVectorImpl<SDValue> &InVals,
@@ -373,9 +502,9 @@ namespace llvm {
const MipsCC &CC, const ByValArgInfo &ByVal) const;
/// passByValArg - Pass a byval argument in registers or on stack.
- void passByValArg(SDValue Chain, DebugLoc DL,
+ void passByValArg(SDValue Chain, SDLoc DL,
std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
- SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr,
+ SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
const MipsCC &CC, const ByValArgInfo &ByVal,
const ISD::ArgFlagsTy &Flags, bool isLittle) const;
@@ -384,17 +513,17 @@ namespace llvm {
/// to the stack. Also create a stack frame object for the first variable
/// argument.
void writeVarArgRegs(std::vector<SDValue> &OutChains, const MipsCC &CC,
- SDValue Chain, DebugLoc DL, SelectionDAG &DAG) const;
+ SDValue Chain, SDLoc DL, SelectionDAG &DAG) const;
virtual SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue passArgOnStack(SDValue StackPtr, unsigned Offset, SDValue Chain,
- SDValue Arg, DebugLoc DL, bool IsTailCall,
+ SDValue Arg, SDLoc DL, bool IsTailCall,
SelectionDAG &DAG) const;
virtual SDValue
@@ -412,7 +541,7 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const;
+ SDLoc dl, SelectionDAG &DAG) const;
// Inline asm support
ConstraintType getConstraintType(const std::string &Constraint) const;
@@ -422,9 +551,14 @@ namespace llvm {
ConstraintWeight getSingleConstraintMatchWeight(
AsmOperandInfo &info, const char *constraint) const;
+ /// This function parses registers that appear in inline-asm constraints.
+ /// It returns pair (0, 0) on failure.
+ std::pair<unsigned, const TargetRegisterClass *>
+ parseRegForInlineAsmConstraint(const StringRef &C, MVT VT) const;
+
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const;
+ MVT VT) const;
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops. If hasMemory is
diff --git a/lib/Target/Mips/MipsInstrFPU.td b/lib/Target/Mips/MipsInstrFPU.td
index 6b23057c9cdb..9f7ce9aa72b0 100644
--- a/lib/Target/Mips/MipsInstrFPU.td
+++ b/lib/Target/Mips/MipsInstrFPU.td
@@ -24,12 +24,14 @@
//===----------------------------------------------------------------------===//
// Floating Point Compare and Branch
-def SDT_MipsFPBrcond : SDTypeProfile<0, 2, [SDTCisInt<0>,
- SDTCisVT<1, OtherVT>]>;
+def SDT_MipsFPBrcond : SDTypeProfile<0, 3, [SDTCisInt<0>,
+ SDTCisVT<1, i32>,
+ SDTCisVT<2, OtherVT>]>;
def SDT_MipsFPCmp : SDTypeProfile<0, 3, [SDTCisSameAs<0, 1>, SDTCisFP<1>,
SDTCisVT<2, i32>]>;
-def SDT_MipsCMovFP : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>,
- SDTCisSameAs<1, 2>]>;
+def SDT_MipsCMovFP : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisVT<2, i32>,
+ SDTCisSameAs<1, 3>]>;
+def SDT_MipsTruncIntFP : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisFP<1>]>;
def SDT_MipsBuildPairF64 : SDTypeProfile<1, 2, [SDTCisVT<0, f64>,
SDTCisVT<1, i32>,
SDTCisSameAs<1, 2>]>;
@@ -42,6 +44,7 @@ def MipsCMovFP_T : SDNode<"MipsISD::CMovFP_T", SDT_MipsCMovFP, [SDNPInGlue]>;
def MipsCMovFP_F : SDNode<"MipsISD::CMovFP_F", SDT_MipsCMovFP, [SDNPInGlue]>;
def MipsFPBrcond : SDNode<"MipsISD::FPBrcond", SDT_MipsFPBrcond,
[SDNPHasChain, SDNPOptInGlue]>;
+def MipsTruncIntFP : SDNode<"MipsISD::TruncIntFP", SDT_MipsTruncIntFP>;
def MipsBuildPairF64 : SDNode<"MipsISD::BuildPairF64", SDT_MipsBuildPairF64>;
def MipsExtractElementF64 : SDNode<"MipsISD::ExtractElementF64",
SDT_MipsExtractElementF64>;
@@ -86,7 +89,7 @@ def fpimm0neg : PatLeaf<(fpimm), [{
// Only S32 and D32 are supported right now.
//===----------------------------------------------------------------------===//
-class ADDS_FT<string opstr, RegisterClass RC, InstrItinClass Itin, bit IsComm,
+class ADDS_FT<string opstr, RegisterOperand RC, InstrItinClass Itin, bit IsComm,
SDPatternOperator OpNode= null_frag> :
InstSE<(outs RC:$fd), (ins RC:$fs, RC:$ft),
!strconcat(opstr, "\t$fd, $fs, $ft"),
@@ -96,15 +99,15 @@ class ADDS_FT<string opstr, RegisterClass RC, InstrItinClass Itin, bit IsComm,
multiclass ADDS_M<string opstr, InstrItinClass Itin, bit IsComm,
SDPatternOperator OpNode = null_frag> {
- def _D32 : ADDS_FT<opstr, AFGR64, Itin, IsComm, OpNode>,
+ def _D32 : ADDS_FT<opstr, AFGR64Opnd, Itin, IsComm, OpNode>,
Requires<[NotFP64bit, HasStdEnc]>;
- def _D64 : ADDS_FT<opstr, FGR64, Itin, IsComm, OpNode>,
+ def _D64 : ADDS_FT<opstr, FGR64Opnd, Itin, IsComm, OpNode>,
Requires<[IsFP64bit, HasStdEnc]> {
string DecoderNamespace = "Mips64";
}
}
-class ABSS_FT<string opstr, RegisterClass DstRC, RegisterClass SrcRC,
+class ABSS_FT<string opstr, RegisterOperand DstRC, RegisterOperand SrcRC,
InstrItinClass Itin, SDPatternOperator OpNode= null_frag> :
InstSE<(outs DstRC:$fd), (ins SrcRC:$fs), !strconcat(opstr, "\t$fd, $fs"),
[(set DstRC:$fd, (OpNode SrcRC:$fs))], Itin, FrmFR>,
@@ -112,95 +115,87 @@ class ABSS_FT<string opstr, RegisterClass DstRC, RegisterClass SrcRC,
multiclass ABSS_M<string opstr, InstrItinClass Itin,
SDPatternOperator OpNode= null_frag> {
- def _D32 : ABSS_FT<opstr, AFGR64, AFGR64, Itin, OpNode>,
+ def _D32 : ABSS_FT<opstr, AFGR64Opnd, AFGR64Opnd, Itin, OpNode>,
Requires<[NotFP64bit, HasStdEnc]>;
- def _D64 : ABSS_FT<opstr, FGR64, FGR64, Itin, OpNode>,
+ def _D64 : ABSS_FT<opstr, FGR64Opnd, FGR64Opnd, Itin, OpNode>,
Requires<[IsFP64bit, HasStdEnc]> {
string DecoderNamespace = "Mips64";
}
}
multiclass ROUND_M<string opstr, InstrItinClass Itin> {
- def _D32 : ABSS_FT<opstr, FGR32, AFGR64, Itin>,
+ def _D32 : ABSS_FT<opstr, FGR32Opnd, AFGR64Opnd, Itin>,
Requires<[NotFP64bit, HasStdEnc]>;
- def _D64 : ABSS_FT<opstr, FGR32, FGR64, Itin>,
+ def _D64 : ABSS_FT<opstr, FGR32Opnd, FGR64Opnd, Itin>,
Requires<[IsFP64bit, HasStdEnc]> {
let DecoderNamespace = "Mips64";
}
}
-class MFC1_FT<string opstr, RegisterClass DstRC, RegisterClass SrcRC,
+class MFC1_FT<string opstr, RegisterOperand DstRC, RegisterOperand SrcRC,
InstrItinClass Itin, SDPatternOperator OpNode= null_frag> :
InstSE<(outs DstRC:$rt), (ins SrcRC:$fs), !strconcat(opstr, "\t$rt, $fs"),
[(set DstRC:$rt, (OpNode SrcRC:$fs))], Itin, FrmFR>;
-class MTC1_FT<string opstr, RegisterClass DstRC, RegisterClass SrcRC,
+class MTC1_FT<string opstr, RegisterOperand DstRC, RegisterOperand SrcRC,
InstrItinClass Itin, SDPatternOperator OpNode= null_frag> :
InstSE<(outs DstRC:$fs), (ins SrcRC:$rt), !strconcat(opstr, "\t$rt, $fs"),
[(set DstRC:$fs, (OpNode SrcRC:$rt))], Itin, FrmFR>;
-class MFC1_FT_CCR<string opstr, RegisterClass DstRC, RegisterOperand SrcRC,
- InstrItinClass Itin, SDPatternOperator OpNode= null_frag> :
- InstSE<(outs DstRC:$rt), (ins SrcRC:$fs), !strconcat(opstr, "\t$rt, $fs"),
- [(set DstRC:$rt, (OpNode SrcRC:$fs))], Itin, FrmFR>;
-
-class MTC1_FT_CCR<string opstr, RegisterOperand DstRC, RegisterClass SrcRC,
- InstrItinClass Itin, SDPatternOperator OpNode= null_frag> :
- InstSE<(outs DstRC:$fs), (ins SrcRC:$rt), !strconcat(opstr, "\t$rt, $fs"),
- [(set DstRC:$fs, (OpNode SrcRC:$rt))], Itin, FrmFR>;
-
-class LW_FT<string opstr, RegisterClass RC, InstrItinClass Itin,
- Operand MemOpnd, SDPatternOperator OpNode= null_frag> :
- InstSE<(outs RC:$rt), (ins MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"),
+class LW_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
+ SDPatternOperator OpNode= null_frag> :
+ InstSE<(outs RC:$rt), (ins mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
[(set RC:$rt, (OpNode addrDefault:$addr))], Itin, FrmFI> {
let DecoderMethod = "DecodeFMem";
+ let mayLoad = 1;
}
-class SW_FT<string opstr, RegisterClass RC, InstrItinClass Itin,
- Operand MemOpnd, SDPatternOperator OpNode= null_frag> :
- InstSE<(outs), (ins RC:$rt, MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"),
+class SW_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
+ SDPatternOperator OpNode= null_frag> :
+ InstSE<(outs), (ins RC:$rt, mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
[(OpNode RC:$rt, addrDefault:$addr)], Itin, FrmFI> {
let DecoderMethod = "DecodeFMem";
+ let mayStore = 1;
}
-class MADDS_FT<string opstr, RegisterClass RC, InstrItinClass Itin,
+class MADDS_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
SDPatternOperator OpNode = null_frag> :
InstSE<(outs RC:$fd), (ins RC:$fr, RC:$fs, RC:$ft),
!strconcat(opstr, "\t$fd, $fr, $fs, $ft"),
[(set RC:$fd, (OpNode (fmul RC:$fs, RC:$ft), RC:$fr))], Itin, FrmFR>;
-class NMADDS_FT<string opstr, RegisterClass RC, InstrItinClass Itin,
+class NMADDS_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
SDPatternOperator OpNode = null_frag> :
InstSE<(outs RC:$fd), (ins RC:$fr, RC:$fs, RC:$ft),
!strconcat(opstr, "\t$fd, $fr, $fs, $ft"),
[(set RC:$fd, (fsub fpimm0, (OpNode (fmul RC:$fs, RC:$ft), RC:$fr)))],
Itin, FrmFR>;
-class LWXC1_FT<string opstr, RegisterClass DRC, RegisterClass PRC,
+class LWXC1_FT<string opstr, RegisterOperand DRC,
InstrItinClass Itin, SDPatternOperator OpNode = null_frag> :
- InstSE<(outs DRC:$fd), (ins PRC:$base, PRC:$index),
+ InstSE<(outs DRC:$fd), (ins PtrRC:$base, PtrRC:$index),
!strconcat(opstr, "\t$fd, ${index}(${base})"),
- [(set DRC:$fd, (OpNode (add PRC:$base, PRC:$index)))], Itin, FrmFI> {
+ [(set DRC:$fd, (OpNode (add iPTR:$base, iPTR:$index)))], Itin, FrmFI> {
let AddedComplexity = 20;
}
-class SWXC1_FT<string opstr, RegisterClass DRC, RegisterClass PRC,
+class SWXC1_FT<string opstr, RegisterOperand DRC,
InstrItinClass Itin, SDPatternOperator OpNode = null_frag> :
- InstSE<(outs), (ins DRC:$fs, PRC:$base, PRC:$index),
+ InstSE<(outs), (ins DRC:$fs, PtrRC:$base, PtrRC:$index),
!strconcat(opstr, "\t$fs, ${index}(${base})"),
- [(OpNode DRC:$fs, (add PRC:$base, PRC:$index))], Itin, FrmFI> {
+ [(OpNode DRC:$fs, (add iPTR:$base, iPTR:$index))], Itin, FrmFI> {
let AddedComplexity = 20;
}
class BC1F_FT<string opstr, InstrItinClass Itin,
SDPatternOperator Op = null_frag> :
- InstSE<(outs), (ins brtarget:$offset), !strconcat(opstr, "\t$offset"),
- [(MipsFPBrcond Op, bb:$offset)], Itin, FrmFI> {
+ InstSE<(outs), (ins FCCRegsOpnd:$fcc, brtarget:$offset),
+ !strconcat(opstr, "\t$fcc, $offset"),
+ [(MipsFPBrcond Op, FCCRegsOpnd:$fcc, bb:$offset)], Itin, FrmFI> {
let isBranch = 1;
let isTerminator = 1;
let hasDelaySlot = 1;
let Defs = [AT];
- let Uses = [FCR31];
}
class CEQS_FT<string typestr, RegisterClass RC, InstrItinClass Itin,
@@ -208,17 +203,53 @@ class CEQS_FT<string typestr, RegisterClass RC, InstrItinClass Itin,
InstSE<(outs), (ins RC:$fs, RC:$ft, condcode:$cond),
!strconcat("c.$cond.", typestr, "\t$fs, $ft"),
[(OpNode RC:$fs, RC:$ft, imm:$cond)], Itin, FrmFR> {
- let Defs = [FCR31];
-}
+ let Defs = [FCC0];
+ let isCodeGenOnly = 1;
+}
+
+class C_COND_FT<string CondStr, string Typestr, RegisterOperand RC> :
+ InstSE<(outs), (ins RC:$fs, RC:$ft),
+ !strconcat("c.", CondStr, ".", Typestr, "\t$fs, $ft"), [], IIFcmp,
+ FrmFR>;
+
+multiclass C_COND_M<string TypeStr, RegisterOperand RC, bits<5> fmt> {
+ def C_F_#NAME : C_COND_FT<"f", TypeStr, RC>, C_COND_FM<fmt, 0>;
+ def C_UN_#NAME : C_COND_FT<"un", TypeStr, RC>, C_COND_FM<fmt, 1>;
+ def C_EQ_#NAME : C_COND_FT<"eq", TypeStr, RC>, C_COND_FM<fmt, 2>;
+ def C_UEQ_#NAME : C_COND_FT<"ueq", TypeStr, RC>, C_COND_FM<fmt, 3>;
+ def C_OLT_#NAME : C_COND_FT<"olt", TypeStr, RC>, C_COND_FM<fmt, 4>;
+ def C_ULT_#NAME : C_COND_FT<"ult", TypeStr, RC>, C_COND_FM<fmt, 5>;
+ def C_OLE_#NAME : C_COND_FT<"ole", TypeStr, RC>, C_COND_FM<fmt, 6>;
+ def C_ULE_#NAME : C_COND_FT<"ule", TypeStr, RC>, C_COND_FM<fmt, 7>;
+ def C_SF_#NAME : C_COND_FT<"sf", TypeStr, RC>, C_COND_FM<fmt, 8>;
+ def C_NGLE_#NAME : C_COND_FT<"ngle", TypeStr, RC>, C_COND_FM<fmt, 9>;
+ def C_SEQ_#NAME : C_COND_FT<"seq", TypeStr, RC>, C_COND_FM<fmt, 10>;
+ def C_NGL_#NAME : C_COND_FT<"ngl", TypeStr, RC>, C_COND_FM<fmt, 11>;
+ def C_LT_#NAME : C_COND_FT<"lt", TypeStr, RC>, C_COND_FM<fmt, 12>;
+ def C_NGE_#NAME : C_COND_FT<"nge", TypeStr, RC>, C_COND_FM<fmt, 13>;
+ def C_LE_#NAME : C_COND_FT<"le", TypeStr, RC>, C_COND_FM<fmt, 14>;
+ def C_NGT_#NAME : C_COND_FT<"ngt", TypeStr, RC>, C_COND_FM<fmt, 15>;
+}
+
+defm S : C_COND_M<"s", FGR32Opnd, 16>;
+defm D32 : C_COND_M<"d", AFGR64Opnd, 17>,
+ Requires<[NotFP64bit, HasStdEnc]>;
+let DecoderNamespace = "Mips64" in
+defm D64 : C_COND_M<"d", FGR64Opnd, 17>, Requires<[IsFP64bit, HasStdEnc]>;
//===----------------------------------------------------------------------===//
// Floating Point Instructions
//===----------------------------------------------------------------------===//
-def ROUND_W_S : ABSS_FT<"round.w.s", FGR32, FGR32, IIFcvt>, ABSS_FM<0xc, 16>;
-def TRUNC_W_S : ABSS_FT<"trunc.w.s", FGR32, FGR32, IIFcvt>, ABSS_FM<0xd, 16>;
-def CEIL_W_S : ABSS_FT<"ceil.w.s", FGR32, FGR32, IIFcvt>, ABSS_FM<0xe, 16>;
-def FLOOR_W_S : ABSS_FT<"floor.w.s", FGR32, FGR32, IIFcvt>, ABSS_FM<0xf, 16>;
-def CVT_W_S : ABSS_FT<"cvt.w.s", FGR32, FGR32, IIFcvt>, ABSS_FM<0x24, 16>;
+def ROUND_W_S : ABSS_FT<"round.w.s", FGR32Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0xc, 16>;
+def TRUNC_W_S : ABSS_FT<"trunc.w.s", FGR32Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0xd, 16>;
+def CEIL_W_S : ABSS_FT<"ceil.w.s", FGR32Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0xe, 16>;
+def FLOOR_W_S : ABSS_FT<"floor.w.s", FGR32Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0xf, 16>;
+def CVT_W_S : ABSS_FT<"cvt.w.s", FGR32Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0x24, 16>;
defm ROUND_W : ROUND_M<"round.w.d", IIFcvt>, ABSS_FM<0xc, 17>;
defm TRUNC_W : ROUND_M<"trunc.w.d", IIFcvt>, ABSS_FM<0xd, 17>;
@@ -227,46 +258,72 @@ defm FLOOR_W : ROUND_M<"floor.w.d", IIFcvt>, ABSS_FM<0xf, 17>;
defm CVT_W : ROUND_M<"cvt.w.d", IIFcvt>, ABSS_FM<0x24, 17>;
let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace = "Mips64" in {
- def ROUND_L_S : ABSS_FT<"round.l.s", FGR64, FGR32, IIFcvt>, ABSS_FM<0x8, 16>;
- def ROUND_L_D64 : ABSS_FT<"round.l.d", FGR64, FGR64, IIFcvt>,
+ def ROUND_L_S : ABSS_FT<"round.l.s", FGR64Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0x8, 16>;
+ def ROUND_L_D64 : ABSS_FT<"round.l.d", FGR64Opnd, FGR64Opnd, IIFcvt>,
ABSS_FM<0x8, 17>;
- def TRUNC_L_S : ABSS_FT<"trunc.l.s", FGR64, FGR32, IIFcvt>, ABSS_FM<0x9, 16>;
- def TRUNC_L_D64 : ABSS_FT<"trunc.l.d", FGR64, FGR64, IIFcvt>,
+ def TRUNC_L_S : ABSS_FT<"trunc.l.s", FGR64Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0x9, 16>;
+ def TRUNC_L_D64 : ABSS_FT<"trunc.l.d", FGR64Opnd, FGR64Opnd, IIFcvt>,
ABSS_FM<0x9, 17>;
- def CEIL_L_S : ABSS_FT<"ceil.l.s", FGR64, FGR32, IIFcvt>, ABSS_FM<0xa, 16>;
- def CEIL_L_D64 : ABSS_FT<"ceil.l.d", FGR64, FGR64, IIFcvt>, ABSS_FM<0xa, 17>;
- def FLOOR_L_S : ABSS_FT<"floor.l.s", FGR64, FGR32, IIFcvt>, ABSS_FM<0xb, 16>;
- def FLOOR_L_D64 : ABSS_FT<"floor.l.d", FGR64, FGR64, IIFcvt>,
+ def CEIL_L_S : ABSS_FT<"ceil.l.s", FGR64Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0xa, 16>;
+ def CEIL_L_D64 : ABSS_FT<"ceil.l.d", FGR64Opnd, FGR64Opnd, IIFcvt>,
+ ABSS_FM<0xa, 17>;
+ def FLOOR_L_S : ABSS_FT<"floor.l.s", FGR64Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0xb, 16>;
+ def FLOOR_L_D64 : ABSS_FT<"floor.l.d", FGR64Opnd, FGR64Opnd, IIFcvt>,
ABSS_FM<0xb, 17>;
}
-def CVT_S_W : ABSS_FT<"cvt.s.w", FGR32, FGR32, IIFcvt>, ABSS_FM<0x20, 20>;
-def CVT_L_S : ABSS_FT<"cvt.l.s", FGR64, FGR32, IIFcvt>, ABSS_FM<0x25, 16>;
-def CVT_L_D64: ABSS_FT<"cvt.l.d", FGR64, FGR64, IIFcvt>, ABSS_FM<0x25, 17>;
+def CVT_S_W : ABSS_FT<"cvt.s.w", FGR32Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0x20, 20>;
+def CVT_L_S : ABSS_FT<"cvt.l.s", FGR64Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0x25, 16>;
+def CVT_L_D64: ABSS_FT<"cvt.l.d", FGR64Opnd, FGR64Opnd, IIFcvt>,
+ ABSS_FM<0x25, 17>;
let Predicates = [NotFP64bit, HasStdEnc] in {
- def CVT_S_D32 : ABSS_FT<"cvt.s.d", FGR32, AFGR64, IIFcvt>, ABSS_FM<0x20, 17>;
- def CVT_D32_W : ABSS_FT<"cvt.d.w", AFGR64, FGR32, IIFcvt>, ABSS_FM<0x21, 20>;
- def CVT_D32_S : ABSS_FT<"cvt.d.s", AFGR64, FGR32, IIFcvt>, ABSS_FM<0x21, 16>;
+ def CVT_S_D32 : ABSS_FT<"cvt.s.d", FGR32Opnd, AFGR64Opnd, IIFcvt>,
+ ABSS_FM<0x20, 17>;
+ def CVT_D32_W : ABSS_FT<"cvt.d.w", AFGR64Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0x21, 20>;
+ def CVT_D32_S : ABSS_FT<"cvt.d.s", AFGR64Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0x21, 16>;
}
let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace = "Mips64" in {
- def CVT_S_D64 : ABSS_FT<"cvt.s.d", FGR32, FGR64, IIFcvt>, ABSS_FM<0x20, 17>;
- def CVT_S_L : ABSS_FT<"cvt.s.l", FGR32, FGR64, IIFcvt>, ABSS_FM<0x20, 21>;
- def CVT_D64_W : ABSS_FT<"cvt.d.w", FGR64, FGR32, IIFcvt>, ABSS_FM<0x21, 20>;
- def CVT_D64_S : ABSS_FT<"cvt.d.s", FGR64, FGR32, IIFcvt>, ABSS_FM<0x21, 16>;
- def CVT_D64_L : ABSS_FT<"cvt.d.l", FGR64, FGR64, IIFcvt>, ABSS_FM<0x21, 21>;
+ def CVT_S_D64 : ABSS_FT<"cvt.s.d", FGR32Opnd, FGR64Opnd, IIFcvt>,
+ ABSS_FM<0x20, 17>;
+ def CVT_S_L : ABSS_FT<"cvt.s.l", FGR32Opnd, FGR64Opnd, IIFcvt>,
+ ABSS_FM<0x20, 21>;
+ def CVT_D64_W : ABSS_FT<"cvt.d.w", FGR64Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0x21, 20>;
+ def CVT_D64_S : ABSS_FT<"cvt.d.s", FGR64Opnd, FGR32Opnd, IIFcvt>,
+ ABSS_FM<0x21, 16>;
+ def CVT_D64_L : ABSS_FT<"cvt.d.l", FGR64Opnd, FGR64Opnd, IIFcvt>,
+ ABSS_FM<0x21, 21>;
+}
+
+let isPseudo = 1, isCodeGenOnly = 1 in {
+ def PseudoCVT_S_W : ABSS_FT<"", FGR32Opnd, GPR32Opnd, IIFcvt>;
+ def PseudoCVT_D32_W : ABSS_FT<"", AFGR64Opnd, GPR32Opnd, IIFcvt>;
+ def PseudoCVT_S_L : ABSS_FT<"", FGR64Opnd, GPR64Opnd, IIFcvt>;
+ def PseudoCVT_D64_W : ABSS_FT<"", FGR64Opnd, GPR32Opnd, IIFcvt>;
+ def PseudoCVT_D64_L : ABSS_FT<"", FGR64Opnd, GPR64Opnd, IIFcvt>;
}
let Predicates = [NoNaNsFPMath, HasStdEnc] in {
- def FABS_S : ABSS_FT<"abs.s", FGR32, FGR32, IIFcvt, fabs>, ABSS_FM<0x5, 16>;
- def FNEG_S : ABSS_FT<"neg.s", FGR32, FGR32, IIFcvt, fneg>, ABSS_FM<0x7, 16>;
+ def FABS_S : ABSS_FT<"abs.s", FGR32Opnd, FGR32Opnd, IIFcvt, fabs>,
+ ABSS_FM<0x5, 16>;
+ def FNEG_S : ABSS_FT<"neg.s", FGR32Opnd, FGR32Opnd, IIFcvt, fneg>,
+ ABSS_FM<0x7, 16>;
defm FABS : ABSS_M<"abs.d", IIFcvt, fabs>, ABSS_FM<0x5, 17>;
defm FNEG : ABSS_M<"neg.d", IIFcvt, fneg>, ABSS_FM<0x7, 17>;
}
-def FSQRT_S : ABSS_FT<"sqrt.s", FGR32, FGR32, IIFsqrtSingle, fsqrt>,
- ABSS_FM<0x4, 16>;
+def FSQRT_S : ABSS_FT<"sqrt.s", FGR32Opnd, FGR32Opnd, IIFsqrtSingle,
+ fsqrt>, ABSS_FM<0x4, 16>;
defm FSQRT : ABSS_M<"sqrt.d", IIFsqrtDouble, fsqrt>, ABSS_FM<0x4, 17>;
// The odd-numbered registers are only referenced when doing loads,
@@ -275,130 +332,136 @@ defm FSQRT : ABSS_M<"sqrt.d", IIFsqrtDouble, fsqrt>, ABSS_FM<0x4, 17>;
// regardless of register aliasing.
/// Move Control Registers From/To CPU Registers
-def CFC1 : MFC1_FT_CCR<"cfc1", CPURegs, CCROpnd, IIFmove>, MFC1_FM<2>;
-def CTC1 : MTC1_FT_CCR<"ctc1", CCROpnd, CPURegs, IIFmove>, MFC1_FM<6>;
-def MFC1 : MFC1_FT<"mfc1", CPURegs, FGR32, IIFmove, bitconvert>, MFC1_FM<0>;
-def MTC1 : MTC1_FT<"mtc1", FGR32, CPURegs, IIFmove, bitconvert>, MFC1_FM<4>;
-def DMFC1 : MFC1_FT<"dmfc1", CPU64Regs, FGR64, IIFmove, bitconvert>, MFC1_FM<1>;
-def DMTC1 : MTC1_FT<"dmtc1", FGR64, CPU64Regs, IIFmove, bitconvert>, MFC1_FM<5>;
-
-def FMOV_S : ABSS_FT<"mov.s", FGR32, FGR32, IIFmove>, ABSS_FM<0x6, 16>;
-def FMOV_D32 : ABSS_FT<"mov.d", AFGR64, AFGR64, IIFmove>, ABSS_FM<0x6, 17>,
- Requires<[NotFP64bit, HasStdEnc]>;
-def FMOV_D64 : ABSS_FT<"mov.d", FGR64, FGR64, IIFmove>, ABSS_FM<0x6, 17>,
- Requires<[IsFP64bit, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
+def CFC1 : MFC1_FT<"cfc1", GPR32Opnd, CCROpnd, IIFmove>, MFC1_FM<2>;
+def CTC1 : MTC1_FT<"ctc1", CCROpnd, GPR32Opnd, IIFmove>, MFC1_FM<6>;
+def MFC1 : MFC1_FT<"mfc1", GPR32Opnd, FGR32Opnd, IIFmoveC1, bitconvert>,
+ MFC1_FM<0>;
+def MTC1 : MTC1_FT<"mtc1", FGR32Opnd, GPR32Opnd, IIFmoveC1, bitconvert>,
+ MFC1_FM<4>;
+def MFHC1 : MFC1_FT<"mfhc1", GPR32Opnd, FGRH32Opnd, IIFmoveC1>,
+ MFC1_FM<3>;
+def MTHC1 : MTC1_FT<"mthc1", FGRH32Opnd, GPR32Opnd, IIFmoveC1>,
+ MFC1_FM<7>;
+def DMFC1 : MFC1_FT<"dmfc1", GPR64Opnd, FGR64Opnd, IIFmoveC1,
+ bitconvert>, MFC1_FM<1>;
+def DMTC1 : MTC1_FT<"dmtc1", FGR64Opnd, GPR64Opnd, IIFmoveC1,
+ bitconvert>, MFC1_FM<5>;
+
+def FMOV_S : ABSS_FT<"mov.s", FGR32Opnd, FGR32Opnd, IIFmove>,
+ ABSS_FM<0x6, 16>;
+def FMOV_D32 : ABSS_FT<"mov.d", AFGR64Opnd, AFGR64Opnd, IIFmove>,
+ ABSS_FM<0x6, 17>, Requires<[NotFP64bit, HasStdEnc]>;
+def FMOV_D64 : ABSS_FT<"mov.d", FGR64Opnd, FGR64Opnd, IIFmove>,
+ ABSS_FM<0x6, 17>, Requires<[IsFP64bit, HasStdEnc]> {
+ let DecoderNamespace = "Mips64";
}
/// Floating Point Memory Instructions
-let Predicates = [IsN64, HasStdEnc], DecoderNamespace = "Mips64" in {
- def LWC1_P8 : LW_FT<"lwc1", FGR32, IILoad, mem64, load>, LW_FM<0x31>;
- def SWC1_P8 : SW_FT<"swc1", FGR32, IIStore, mem64, store>, LW_FM<0x39>;
- def LDC164_P8 : LW_FT<"ldc1", FGR64, IILoad, mem64, load>, LW_FM<0x35> {
- let isCodeGenOnly =1;
- }
- def SDC164_P8 : SW_FT<"sdc1", FGR64, IIStore, mem64, store>, LW_FM<0x3d> {
- let isCodeGenOnly =1;
- }
+let Predicates = [HasStdEnc] in {
+ def LWC1 : LW_FT<"lwc1", FGR32Opnd, IIFLoad, load>, LW_FM<0x31>;
+ def SWC1 : SW_FT<"swc1", FGR32Opnd, IIFStore, store>, LW_FM<0x39>;
}
-let Predicates = [NotN64, HasStdEnc] in {
- def LWC1 : LW_FT<"lwc1", FGR32, IILoad, mem, load>, LW_FM<0x31>;
- def SWC1 : SW_FT<"swc1", FGR32, IIStore, mem, store>, LW_FM<0x39>;
+let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace = "Mips64" in {
+ def LDC164 : LW_FT<"ldc1", FGR64Opnd, IIFLoad, load>, LW_FM<0x35>;
+ def SDC164 : SW_FT<"sdc1", FGR64Opnd, IIFStore, store>, LW_FM<0x3d>;
}
-let Predicates = [NotN64, HasMips64, HasStdEnc],
- DecoderNamespace = "Mips64" in {
- def LDC164 : LW_FT<"ldc1", FGR64, IILoad, mem, load>, LW_FM<0x35>;
- def SDC164 : SW_FT<"sdc1", FGR64, IIStore, mem, store>, LW_FM<0x3d>;
+let Predicates = [NotFP64bit, HasStdEnc] in {
+ def LDC1 : LW_FT<"ldc1", AFGR64Opnd, IIFLoad, load>, LW_FM<0x35>;
+ def SDC1 : SW_FT<"sdc1", AFGR64Opnd, IIFStore, store>, LW_FM<0x3d>;
}
-let Predicates = [NotN64, NotMips64, HasStdEnc] in {
- def LDC1 : LW_FT<"ldc1", AFGR64, IILoad, mem, load>, LW_FM<0x35>;
- def SDC1 : SW_FT<"sdc1", AFGR64, IIStore, mem, store>, LW_FM<0x3d>;
+/// Cop2 Memory Instructions
+let Predicates = [HasStdEnc] in {
+ def LWC2 : LW_FT<"lwc2", COP2Opnd, NoItinerary, load>, LW_FM<0x32>;
+ def SWC2 : SW_FT<"swc2", COP2Opnd, NoItinerary, store>, LW_FM<0x3a>;
+ def LDC2 : LW_FT<"ldc2", COP2Opnd, NoItinerary, load>, LW_FM<0x36>;
+ def SDC2 : SW_FT<"sdc2", COP2Opnd, NoItinerary, store>, LW_FM<0x3e>;
}
// Indexed loads and stores.
let Predicates = [HasFPIdx, HasStdEnc] in {
- def LWXC1 : LWXC1_FT<"lwxc1", FGR32, CPURegs, IILoad, load>, LWXC1_FM<0>;
- def SWXC1 : SWXC1_FT<"swxc1", FGR32, CPURegs, IIStore, store>, SWXC1_FM<8>;
-}
-
-let Predicates = [HasMips32r2, NotMips64, HasStdEnc] in {
- def LDXC1 : LWXC1_FT<"ldxc1", AFGR64, CPURegs, IILoad, load>, LWXC1_FM<1>;
- def SDXC1 : SWXC1_FT<"sdxc1", AFGR64, CPURegs, IIStore, store>, SWXC1_FM<9>;
+ def LWXC1 : LWXC1_FT<"lwxc1", FGR32Opnd, IIFLoad, load>, LWXC1_FM<0>;
+ def SWXC1 : SWXC1_FT<"swxc1", FGR32Opnd, IIFStore, store>, SWXC1_FM<8>;
}
-let Predicates = [HasMips64, NotN64, HasStdEnc], DecoderNamespace="Mips64" in {
- def LDXC164 : LWXC1_FT<"ldxc1", FGR64, CPURegs, IILoad, load>, LWXC1_FM<1>;
- def SDXC164 : SWXC1_FT<"sdxc1", FGR64, CPURegs, IIStore, store>, SWXC1_FM<9>;
+let Predicates = [HasFPIdx, NotFP64bit, HasStdEnc] in {
+ def LDXC1 : LWXC1_FT<"ldxc1", AFGR64Opnd, IIFLoad, load>, LWXC1_FM<1>;
+ def SDXC1 : SWXC1_FT<"sdxc1", AFGR64Opnd, IIFStore, store>, SWXC1_FM<9>;
}
-// n64
-let Predicates = [IsN64, HasStdEnc], isCodeGenOnly=1 in {
- def LWXC1_P8 : LWXC1_FT<"lwxc1", FGR32, CPU64Regs, IILoad, load>, LWXC1_FM<0>;
- def LDXC164_P8 : LWXC1_FT<"ldxc1", FGR64, CPU64Regs, IILoad, load>,
- LWXC1_FM<1>;
- def SWXC1_P8 : SWXC1_FT<"swxc1", FGR32, CPU64Regs, IIStore, store>,
- SWXC1_FM<8>;
- def SDXC164_P8 : SWXC1_FT<"sdxc1", FGR64, CPU64Regs, IIStore, store>,
- SWXC1_FM<9>;
+let Predicates = [HasFPIdx, IsFP64bit, HasStdEnc],
+ DecoderNamespace="Mips64" in {
+ def LDXC164 : LWXC1_FT<"ldxc1", FGR64Opnd, IIFLoad, load>, LWXC1_FM<1>;
+ def SDXC164 : SWXC1_FT<"sdxc1", FGR64Opnd, IIFStore, store>, SWXC1_FM<9>;
}
// Load/store doubleword indexed unaligned.
-let Predicates = [NotMips64, HasStdEnc] in {
- def LUXC1 : LWXC1_FT<"luxc1", AFGR64, CPURegs, IILoad>, LWXC1_FM<0x5>;
- def SUXC1 : SWXC1_FT<"suxc1", AFGR64, CPURegs, IIStore>, SWXC1_FM<0xd>;
+let Predicates = [NotFP64bit, HasStdEnc] in {
+ def LUXC1 : LWXC1_FT<"luxc1", AFGR64Opnd, IIFLoad>, LWXC1_FM<0x5>;
+ def SUXC1 : SWXC1_FT<"suxc1", AFGR64Opnd, IIFStore>, SWXC1_FM<0xd>;
}
-let Predicates = [HasMips64, HasStdEnc],
- DecoderNamespace="Mips64" in {
- def LUXC164 : LWXC1_FT<"luxc1", FGR64, CPURegs, IILoad>, LWXC1_FM<0x5>;
- def SUXC164 : SWXC1_FT<"suxc1", FGR64, CPURegs, IIStore>, SWXC1_FM<0xd>;
+let Predicates = [IsFP64bit, HasStdEnc], DecoderNamespace="Mips64" in {
+ def LUXC164 : LWXC1_FT<"luxc1", FGR64Opnd, IIFLoad>, LWXC1_FM<0x5>;
+ def SUXC164 : SWXC1_FT<"suxc1", FGR64Opnd, IIFStore>, SWXC1_FM<0xd>;
}
/// Floating-point Aritmetic
-def FADD_S : ADDS_FT<"add.s", FGR32, IIFadd, 1, fadd>, ADDS_FM<0x00, 16>;
-defm FADD : ADDS_M<"add.d", IIFadd, 1, fadd>, ADDS_FM<0x00, 17>;
-def FDIV_S : ADDS_FT<"div.s", FGR32, IIFdivSingle, 0, fdiv>, ADDS_FM<0x03, 16>;
-defm FDIV : ADDS_M<"div.d", IIFdivDouble, 0, fdiv>, ADDS_FM<0x03, 17>;
-def FMUL_S : ADDS_FT<"mul.s", FGR32, IIFmulSingle, 1, fmul>, ADDS_FM<0x02, 16>;
-defm FMUL : ADDS_M<"mul.d", IIFmulDouble, 1, fmul>, ADDS_FM<0x02, 17>;
-def FSUB_S : ADDS_FT<"sub.s", FGR32, IIFadd, 0, fsub>, ADDS_FM<0x01, 16>;
-defm FSUB : ADDS_M<"sub.d", IIFadd, 0, fsub>, ADDS_FM<0x01, 17>;
+def FADD_S : ADDS_FT<"add.s", FGR32Opnd, IIFadd, 1, fadd>,
+ ADDS_FM<0x00, 16>;
+defm FADD : ADDS_M<"add.d", IIFadd, 1, fadd>, ADDS_FM<0x00, 17>;
+def FDIV_S : ADDS_FT<"div.s", FGR32Opnd, IIFdivSingle, 0, fdiv>,
+ ADDS_FM<0x03, 16>;
+defm FDIV : ADDS_M<"div.d", IIFdivDouble, 0, fdiv>, ADDS_FM<0x03, 17>;
+def FMUL_S : ADDS_FT<"mul.s", FGR32Opnd, IIFmulSingle, 1, fmul>,
+ ADDS_FM<0x02, 16>;
+defm FMUL : ADDS_M<"mul.d", IIFmulDouble, 1, fmul>, ADDS_FM<0x02, 17>;
+def FSUB_S : ADDS_FT<"sub.s", FGR32Opnd, IIFadd, 0, fsub>,
+ ADDS_FM<0x01, 16>;
+defm FSUB : ADDS_M<"sub.d", IIFadd, 0, fsub>, ADDS_FM<0x01, 17>;
let Predicates = [HasMips32r2, HasStdEnc] in {
- def MADD_S : MADDS_FT<"madd.s", FGR32, IIFmulSingle, fadd>, MADDS_FM<4, 0>;
- def MSUB_S : MADDS_FT<"msub.s", FGR32, IIFmulSingle, fsub>, MADDS_FM<5, 0>;
+ def MADD_S : MADDS_FT<"madd.s", FGR32Opnd, IIFmulSingle, fadd>,
+ MADDS_FM<4, 0>;
+ def MSUB_S : MADDS_FT<"msub.s", FGR32Opnd, IIFmulSingle, fsub>,
+ MADDS_FM<5, 0>;
}
let Predicates = [HasMips32r2, NoNaNsFPMath, HasStdEnc] in {
- def NMADD_S : NMADDS_FT<"nmadd.s", FGR32, IIFmulSingle, fadd>, MADDS_FM<6, 0>;
- def NMSUB_S : NMADDS_FT<"nmsub.s", FGR32, IIFmulSingle, fsub>, MADDS_FM<7, 0>;
+ def NMADD_S : NMADDS_FT<"nmadd.s", FGR32Opnd, IIFmulSingle, fadd>,
+ MADDS_FM<6, 0>;
+ def NMSUB_S : NMADDS_FT<"nmsub.s", FGR32Opnd, IIFmulSingle, fsub>,
+ MADDS_FM<7, 0>;
}
let Predicates = [HasMips32r2, NotFP64bit, HasStdEnc] in {
- def MADD_D32 : MADDS_FT<"madd.d", AFGR64, IIFmulDouble, fadd>, MADDS_FM<4, 1>;
- def MSUB_D32 : MADDS_FT<"msub.d", AFGR64, IIFmulDouble, fsub>, MADDS_FM<5, 1>;
+ def MADD_D32 : MADDS_FT<"madd.d", AFGR64Opnd, IIFmulDouble, fadd>,
+ MADDS_FM<4, 1>;
+ def MSUB_D32 : MADDS_FT<"msub.d", AFGR64Opnd, IIFmulDouble, fsub>,
+ MADDS_FM<5, 1>;
}
let Predicates = [HasMips32r2, NotFP64bit, NoNaNsFPMath, HasStdEnc] in {
- def NMADD_D32 : NMADDS_FT<"nmadd.d", AFGR64, IIFmulDouble, fadd>,
+ def NMADD_D32 : NMADDS_FT<"nmadd.d", AFGR64Opnd, IIFmulDouble, fadd>,
MADDS_FM<6, 1>;
- def NMSUB_D32 : NMADDS_FT<"nmsub.d", AFGR64, IIFmulDouble, fsub>,
+ def NMSUB_D32 : NMADDS_FT<"nmsub.d", AFGR64Opnd, IIFmulDouble, fsub>,
MADDS_FM<7, 1>;
}
let Predicates = [HasMips32r2, IsFP64bit, HasStdEnc], isCodeGenOnly=1 in {
- def MADD_D64 : MADDS_FT<"madd.d", FGR64, IIFmulDouble, fadd>, MADDS_FM<4, 1>;
- def MSUB_D64 : MADDS_FT<"msub.d", FGR64, IIFmulDouble, fsub>, MADDS_FM<5, 1>;
+ def MADD_D64 : MADDS_FT<"madd.d", FGR64Opnd, IIFmulDouble, fadd>,
+ MADDS_FM<4, 1>;
+ def MSUB_D64 : MADDS_FT<"msub.d", FGR64Opnd, IIFmulDouble, fsub>,
+ MADDS_FM<5, 1>;
}
let Predicates = [HasMips32r2, IsFP64bit, NoNaNsFPMath, HasStdEnc],
isCodeGenOnly=1 in {
- def NMADD_D64 : NMADDS_FT<"nmadd.d", FGR64, IIFmulDouble, fadd>,
+ def NMADD_D64 : NMADDS_FT<"nmadd.d", FGR64Opnd, IIFmulDouble, fadd>,
MADDS_FM<6, 1>;
- def NMSUB_D64 : NMADDS_FT<"nmsub.d", FGR64, IIFmulDouble, fsub>,
+ def NMSUB_D64 : NMADDS_FT<"nmsub.d", FGR64Opnd, IIFmulDouble, fsub>,
MADDS_FM<7, 1>;
}
@@ -410,10 +473,9 @@ let Predicates = [HasMips32r2, IsFP64bit, NoNaNsFPMath, HasStdEnc],
def MIPS_BRANCH_F : PatLeaf<(i32 0)>;
def MIPS_BRANCH_T : PatLeaf<(i32 1)>;
-let DecoderMethod = "DecodeBC1" in {
def BC1F : BC1F_FT<"bc1f", IIBranch, MIPS_BRANCH_F>, BC1F_FM<0, 0>;
def BC1T : BC1F_FT<"bc1t", IIBranch, MIPS_BRANCH_T>, BC1F_FM<0, 1>;
-}
+
//===----------------------------------------------------------------------===//
// Floating Point Flag Conditions
//===----------------------------------------------------------------------===//
@@ -447,22 +509,36 @@ def FCMP_D64 : CEQS_FT<"d", FGR64, IIFcmp, MipsFPCmp>, CEQS_FM<17>,
//===----------------------------------------------------------------------===//
// Floating Point Pseudo-Instructions
//===----------------------------------------------------------------------===//
-def MOVCCRToCCR : PseudoSE<(outs CCR:$dst), (ins CCROpnd:$src), []>;
// This pseudo instr gets expanded into 2 mtc1 instrs after register
// allocation.
-def BuildPairF64 :
- PseudoSE<(outs AFGR64:$dst),
- (ins CPURegs:$lo, CPURegs:$hi),
- [(set AFGR64:$dst, (MipsBuildPairF64 CPURegs:$lo, CPURegs:$hi))]>;
+class BuildPairF64Base<RegisterOperand RO> :
+ PseudoSE<(outs RO:$dst), (ins GPR32Opnd:$lo, GPR32Opnd:$hi),
+ [(set RO:$dst, (MipsBuildPairF64 GPR32Opnd:$lo, GPR32Opnd:$hi))]>;
+
+def BuildPairF64 : BuildPairF64Base<AFGR64Opnd>,
+ Requires<[NotFP64bit, HasStdEnc]>;
+def BuildPairF64_64 : BuildPairF64Base<FGR64Opnd>,
+ Requires<[IsFP64bit, HasStdEnc]>;
// This pseudo instr gets expanded into 2 mfc1 instrs after register
// allocation.
// if n is 0, lower part of src is extracted.
// if n is 1, higher part of src is extracted.
-def ExtractElementF64 :
- PseudoSE<(outs CPURegs:$dst), (ins AFGR64:$src, i32imm:$n),
- [(set CPURegs:$dst, (MipsExtractElementF64 AFGR64:$src, imm:$n))]>;
+class ExtractElementF64Base<RegisterOperand RO> :
+ PseudoSE<(outs GPR32Opnd:$dst), (ins RO:$src, i32imm:$n),
+ [(set GPR32Opnd:$dst, (MipsExtractElementF64 RO:$src, imm:$n))]>;
+
+def ExtractElementF64 : ExtractElementF64Base<AFGR64Opnd>,
+ Requires<[NotFP64bit, HasStdEnc]>;
+def ExtractElementF64_64 : ExtractElementF64Base<FGR64Opnd>,
+ Requires<[IsFP64bit, HasStdEnc]>;
+
+//===----------------------------------------------------------------------===//
+// InstAliases.
+//===----------------------------------------------------------------------===//
+def : InstAlias<"bc1t $offset", (BC1T FCC0, brtarget:$offset)>;
+def : InstAlias<"bc1f $offset", (BC1F FCC0, brtarget:$offset)>;
//===----------------------------------------------------------------------===//
// Floating Point Patterns
@@ -470,59 +546,59 @@ def ExtractElementF64 :
def : MipsPat<(f32 fpimm0), (MTC1 ZERO)>;
def : MipsPat<(f32 fpimm0neg), (FNEG_S (MTC1 ZERO))>;
-def : MipsPat<(f32 (sint_to_fp CPURegs:$src)), (CVT_S_W (MTC1 CPURegs:$src))>;
-def : MipsPat<(i32 (fp_to_sint FGR32:$src)), (MFC1 (TRUNC_W_S FGR32:$src))>;
+def : MipsPat<(f32 (sint_to_fp GPR32Opnd:$src)),
+ (PseudoCVT_S_W GPR32Opnd:$src)>;
+def : MipsPat<(MipsTruncIntFP FGR32Opnd:$src),
+ (TRUNC_W_S FGR32Opnd:$src)>;
let Predicates = [NotFP64bit, HasStdEnc] in {
- def : MipsPat<(f64 (sint_to_fp CPURegs:$src)),
- (CVT_D32_W (MTC1 CPURegs:$src))>;
- def : MipsPat<(i32 (fp_to_sint AFGR64:$src)),
- (MFC1 (TRUNC_W_D32 AFGR64:$src))>;
- def : MipsPat<(f32 (fround AFGR64:$src)), (CVT_S_D32 AFGR64:$src)>;
- def : MipsPat<(f64 (fextend FGR32:$src)), (CVT_D32_S FGR32:$src)>;
+ def : MipsPat<(f64 (sint_to_fp GPR32Opnd:$src)),
+ (PseudoCVT_D32_W GPR32Opnd:$src)>;
+ def : MipsPat<(MipsTruncIntFP AFGR64Opnd:$src),
+ (TRUNC_W_D32 AFGR64Opnd:$src)>;
+ def : MipsPat<(f32 (fround AFGR64Opnd:$src)),
+ (CVT_S_D32 AFGR64Opnd:$src)>;
+ def : MipsPat<(f64 (fextend FGR32Opnd:$src)),
+ (CVT_D32_S FGR32Opnd:$src)>;
}
let Predicates = [IsFP64bit, HasStdEnc] in {
def : MipsPat<(f64 fpimm0), (DMTC1 ZERO_64)>;
def : MipsPat<(f64 fpimm0neg), (FNEG_D64 (DMTC1 ZERO_64))>;
- def : MipsPat<(f64 (sint_to_fp CPURegs:$src)),
- (CVT_D64_W (MTC1 CPURegs:$src))>;
- def : MipsPat<(f32 (sint_to_fp CPU64Regs:$src)),
- (CVT_S_L (DMTC1 CPU64Regs:$src))>;
- def : MipsPat<(f64 (sint_to_fp CPU64Regs:$src)),
- (CVT_D64_L (DMTC1 CPU64Regs:$src))>;
+ def : MipsPat<(f64 (sint_to_fp GPR32Opnd:$src)),
+ (PseudoCVT_D64_W GPR32Opnd:$src)>;
+ def : MipsPat<(f32 (sint_to_fp GPR64Opnd:$src)),
+ (EXTRACT_SUBREG (PseudoCVT_S_L GPR64Opnd:$src), sub_lo)>;
+ def : MipsPat<(f64 (sint_to_fp GPR64Opnd:$src)),
+ (PseudoCVT_D64_L GPR64Opnd:$src)>;
- def : MipsPat<(i32 (fp_to_sint FGR64:$src)),
- (MFC1 (TRUNC_W_D64 FGR64:$src))>;
- def : MipsPat<(i64 (fp_to_sint FGR32:$src)), (DMFC1 (TRUNC_L_S FGR32:$src))>;
- def : MipsPat<(i64 (fp_to_sint FGR64:$src)),
- (DMFC1 (TRUNC_L_D64 FGR64:$src))>;
+ def : MipsPat<(MipsTruncIntFP FGR64Opnd:$src),
+ (TRUNC_W_D64 FGR64Opnd:$src)>;
+ def : MipsPat<(MipsTruncIntFP FGR32Opnd:$src),
+ (TRUNC_L_S FGR32Opnd:$src)>;
+ def : MipsPat<(MipsTruncIntFP FGR64Opnd:$src),
+ (TRUNC_L_D64 FGR64Opnd:$src)>;
- def : MipsPat<(f32 (fround FGR64:$src)), (CVT_S_D64 FGR64:$src)>;
- def : MipsPat<(f64 (fextend FGR32:$src)), (CVT_D64_S FGR32:$src)>;
+ def : MipsPat<(f32 (fround FGR64Opnd:$src)),
+ (CVT_S_D64 FGR64Opnd:$src)>;
+ def : MipsPat<(f64 (fextend FGR32Opnd:$src)),
+ (CVT_D64_S FGR32Opnd:$src)>;
}
// Patterns for loads/stores with a reg+imm operand.
let AddedComplexity = 40 in {
- let Predicates = [IsN64, HasStdEnc] in {
- def : LoadRegImmPat<LWC1_P8, f32, load>;
- def : StoreRegImmPat<SWC1_P8, f32>;
- def : LoadRegImmPat<LDC164_P8, f64, load>;
- def : StoreRegImmPat<SDC164_P8, f64>;
- }
-
- let Predicates = [NotN64, HasStdEnc] in {
+ let Predicates = [HasStdEnc] in {
def : LoadRegImmPat<LWC1, f32, load>;
def : StoreRegImmPat<SWC1, f32>;
}
- let Predicates = [NotN64, HasMips64, HasStdEnc] in {
+ let Predicates = [IsFP64bit, HasStdEnc] in {
def : LoadRegImmPat<LDC164, f64, load>;
def : StoreRegImmPat<SDC164, f64>;
}
- let Predicates = [NotN64, NotMips64, HasStdEnc] in {
+ let Predicates = [NotFP64bit, HasStdEnc] in {
def : LoadRegImmPat<LDC1, f64, load>;
def : StoreRegImmPat<SDC1, f64>;
}
diff --git a/lib/Target/Mips/MipsInstrFormats.td b/lib/Target/Mips/MipsInstrFormats.td
index ea0737222125..737a018c67af 100644
--- a/lib/Target/Mips/MipsInstrFormats.td
+++ b/lib/Target/Mips/MipsInstrFormats.td
@@ -183,7 +183,7 @@ class BranchBase<bits<6> op, dag outs, dag ins, string asmstr,
// Format J instruction class in Mips : <|opcode|address|>
//===----------------------------------------------------------------------===//
-class FJ<bits<6> op>
+class FJ<bits<6> op> : StdArch
{
bits<26> target;
@@ -272,7 +272,7 @@ class SRLV_FM<bits<6> funct, bit rotate> : StdArch {
let Inst{5-0} = funct;
}
-class BEQ_FM<bits<6> op> {
+class BEQ_FM<bits<6> op> : StdArch {
bits<5> rs;
bits<5> rt;
bits<16> offset;
@@ -285,7 +285,7 @@ class BEQ_FM<bits<6> op> {
let Inst{15-0} = offset;
}
-class BGEZ_FM<bits<6> op, bits<5> funct> {
+class BGEZ_FM<bits<6> op, bits<5> funct> : StdArch {
bits<5> rs;
bits<16> offset;
@@ -297,17 +297,6 @@ class BGEZ_FM<bits<6> op, bits<5> funct> {
let Inst{15-0} = offset;
}
-class B_FM {
- bits<16> offset;
-
- bits<32> Inst;
-
- let Inst{31-26} = 4;
- let Inst{25-21} = 0;
- let Inst{20-16} = 0;
- let Inst{15-0} = offset;
-}
-
class SLTI_FM<bits<6> op> : StdArch {
bits<5> rt;
bits<5> rs;
@@ -321,7 +310,7 @@ class SLTI_FM<bits<6> op> : StdArch {
let Inst{15-0} = imm16;
}
-class MFLO_FM<bits<6> funct> {
+class MFLO_FM<bits<6> funct> : StdArch {
bits<5> rd;
bits<32> Inst;
@@ -333,7 +322,7 @@ class MFLO_FM<bits<6> funct> {
let Inst{5-0} = funct;
}
-class MTLO_FM<bits<6> funct> {
+class MTLO_FM<bits<6> funct> : StdArch {
bits<5> rs;
bits<32> Inst;
@@ -344,7 +333,7 @@ class MTLO_FM<bits<6> funct> {
let Inst{5-0} = funct;
}
-class SEB_FM<bits<5> funct, bits<6> funct2> {
+class SEB_FM<bits<5> funct, bits<6> funct2> : StdArch {
bits<5> rd;
bits<5> rt;
@@ -358,7 +347,7 @@ class SEB_FM<bits<5> funct, bits<6> funct2> {
let Inst{5-0} = funct2;
}
-class CLO_FM<bits<6> funct> {
+class CLO_FM<bits<6> funct> : StdArch {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@@ -374,7 +363,7 @@ class CLO_FM<bits<6> funct> {
let rt = rd;
}
-class LUI_FM {
+class LUI_FM : StdArch {
bits<5> rt;
bits<16> imm16;
@@ -386,7 +375,7 @@ class LUI_FM {
let Inst{15-0} = imm16;
}
-class JALR_FM {
+class JALR_FM : StdArch {
bits<5> rd;
bits<5> rs;
@@ -400,18 +389,7 @@ class JALR_FM {
let Inst{5-0} = 9;
}
-class BAL_FM {
- bits<16> offset;
-
- bits<32> Inst;
-
- let Inst{31-26} = 1;
- let Inst{25-21} = 0;
- let Inst{20-16} = 0x11;
- let Inst{15-0} = offset;
-}
-
-class BGEZAL_FM<bits<5> funct> {
+class BGEZAL_FM<bits<5> funct> : StdArch {
bits<5> rs;
bits<16> offset;
@@ -446,7 +424,7 @@ class MULT_FM<bits<6> op, bits<6> funct> : StdArch {
let Inst{5-0} = funct;
}
-class EXT_FM<bits<6> funct> {
+class EXT_FM<bits<6> funct> : StdArch {
bits<5> rt;
bits<5> rs;
bits<5> pos;
@@ -476,6 +454,90 @@ class RDHWR_FM {
let Inst{5-0} = 0x3b;
}
+class TEQ_FM<bits<6> funct> : StdArch {
+ bits<5> rs;
+ bits<5> rt;
+ bits<10> code_;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 0;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-6} = code_;
+ let Inst{5-0} = funct;
+}
+
+class TEQI_FM<bits<5> funct> : StdArch {
+ bits<5> rs;
+ bits<16> imm16;
+
+ bits<32> Inst;
+
+ let Inst{31-26} = 1;
+ let Inst{25-21} = rs;
+ let Inst{20-16} = funct;
+ let Inst{15-0} = imm16;
+}
+//===----------------------------------------------------------------------===//
+// System calls format <op|code_|funct>
+//===----------------------------------------------------------------------===//
+
+class SYS_FM<bits<6> funct>
+{
+ bits<20> code_;
+ bits<32> Inst;
+ let Inst{31-26} = 0x0;
+ let Inst{25-6} = code_;
+ let Inst{5-0} = funct;
+}
+
+//===----------------------------------------------------------------------===//
+// Break instruction format <op|code_1|funct>
+//===----------------------------------------------------------------------===//
+
+class BRK_FM<bits<6> funct>
+{
+ bits<10> code_1;
+ bits<10> code_2;
+ bits<32> Inst;
+ let Inst{31-26} = 0x0;
+ let Inst{25-16} = code_1;
+ let Inst{15-6} = code_2;
+ let Inst{5-0} = funct;
+}
+
+//===----------------------------------------------------------------------===//
+// Exception return format <Cop0|1|0|funct>
+//===----------------------------------------------------------------------===//
+
+class ER_FM<bits<6> funct>
+{
+ bits<32> Inst;
+ let Inst{31-26} = 0x10;
+ let Inst{25} = 1;
+ let Inst{24-6} = 0;
+ let Inst{5-0} = funct;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Enable/disable interrupt instruction format <Cop0|MFMC0|rt|12|0|sc|0|0>
+//===----------------------------------------------------------------------===//
+
+class EI_FM<bits<1> sc>
+{
+ bits<32> Inst;
+ bits<5> rt;
+ let Inst{31-26} = 0x10;
+ let Inst{25-21} = 0xb;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = 0xc;
+ let Inst{10-6} = 0;
+ let Inst{5} = sc;
+ let Inst{4-0} = 0;
+}
+
//===----------------------------------------------------------------------===//
//
// FLOATING POINT INSTRUCTION FORMATS
@@ -609,13 +671,14 @@ class SWXC1_FM<bits<6> funct> {
}
class BC1F_FM<bit nd, bit tf> {
+ bits<3> fcc;
bits<16> offset;
bits<32> Inst;
let Inst{31-26} = 0x11;
let Inst{25-21} = 0x8;
- let Inst{20-18} = 0; // cc
+ let Inst{20-18} = fcc;
let Inst{17} = nd;
let Inst{16} = tf;
let Inst{15-0} = offset;
@@ -637,6 +700,10 @@ class CEQS_FM<bits<5> fmt> {
let Inst{3-0} = cond;
}
+class C_COND_FM<bits<5> fmt, bits<4> c> : CEQS_FM<fmt> {
+ let cond = c;
+}
+
class CMov_I_F_FM<bits<6> funct, bits<5> fmt> {
bits<5> fd;
bits<5> fs;
@@ -652,15 +719,16 @@ class CMov_I_F_FM<bits<6> funct, bits<5> fmt> {
let Inst{5-0} = funct;
}
-class CMov_F_I_FM<bit tf> {
+class CMov_F_I_FM<bit tf> : StdArch {
bits<5> rd;
bits<5> rs;
+ bits<3> fcc;
bits<32> Inst;
let Inst{31-26} = 0;
let Inst{25-21} = rs;
- let Inst{20-18} = 0; // cc
+ let Inst{20-18} = fcc;
let Inst{17} = 0;
let Inst{16} = tf;
let Inst{15-11} = rd;
@@ -671,12 +739,13 @@ class CMov_F_I_FM<bit tf> {
class CMov_F_F_FM<bits<5> fmt, bit tf> {
bits<5> fd;
bits<5> fs;
+ bits<3> fcc;
bits<32> Inst;
let Inst{31-26} = 0x11;
let Inst{25-21} = fmt;
- let Inst{20-18} = 0; // cc
+ let Inst{20-18} = fcc;
let Inst{17} = 0;
let Inst{16} = tf;
let Inst{15-11} = fs;
diff --git a/lib/Target/Mips/MipsInstrInfo.cpp b/lib/Target/Mips/MipsInstrInfo.cpp
index ad92d41209e9..0ebad0584757 100644
--- a/lib/Target/Mips/MipsInstrInfo.cpp
+++ b/lib/Target/Mips/MipsInstrInfo.cpp
@@ -22,11 +22,14 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
-#define GET_INSTRINFO_CTOR
+#define GET_INSTRINFO_CTOR_DTOR
#include "MipsGenInstrInfo.inc"
using namespace llvm;
+// Pin the vtable to this file.
+void MipsInstrInfo::anchor() {}
+
MipsInstrInfo::MipsInstrInfo(MipsTargetMachine &tm, unsigned UncondBr)
: MipsGenInstrInfo(Mips::ADJCALLSTACKDOWN, Mips::ADJCALLSTACKUP),
TM(tm), UncondBrOpc(UncondBr) {}
@@ -61,15 +64,6 @@ MachineMemOperand *MipsInstrInfo::GetMemOperand(MachineBasicBlock &MBB, int FI,
MFI.getObjectSize(FI), Align);
}
-MachineInstr*
-MipsInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx,
- uint64_t Offset, const MDNode *MDPtr,
- DebugLoc DL) const {
- MachineInstrBuilder MIB = BuildMI(MF, DL, get(Mips::DBG_VALUE))
- .addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr);
- return &*MIB;
-}
-
//===----------------------------------------------------------------------===//
// Branch Analysis
//===----------------------------------------------------------------------===//
@@ -77,7 +71,7 @@ MipsInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx,
void MipsInstrInfo::AnalyzeCondBr(const MachineInstr *Inst, unsigned Opc,
MachineBasicBlock *&BB,
SmallVectorImpl<MachineOperand> &Cond) const {
- assert(GetAnalyzableBrOpc(Opc) && "Not an analyzable branch");
+ assert(getAnalyzableBrOpc(Opc) && "Not an analyzable branch");
int NumOp = Inst->getNumExplicitOperands();
// for both int and fp branches, the last explicit operand is the
@@ -167,7 +161,7 @@ RemoveBranch(MachineBasicBlock &MBB) const
// Up to 2 branches are removed.
// Note that indirect branches are not removed.
for(removed = 0; I != REnd && removed < 2; ++I, ++removed)
- if (!GetAnalyzableBrOpc(I->getOpcode()))
+ if (!getAnalyzableBrOpc(I->getOpcode()))
break;
MBB.erase(I.base(), FirstBr.base());
@@ -182,7 +176,7 @@ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const
{
assert( (Cond.size() && Cond.size() <= 3) &&
"Invalid Mips branch condition!");
- Cond[0].setImm(GetOppositeBranchOpc(Cond[0].getImm()));
+ Cond[0].setImm(getOppositeBranchOpc(Cond[0].getImm()));
return false;
}
@@ -210,7 +204,7 @@ AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
BranchInstrs.push_back(LastInst);
// Not an analyzable branch (e.g., indirect jump).
- if (!GetAnalyzableBrOpc(LastOpc))
+ if (!getAnalyzableBrOpc(LastOpc))
return LastInst->isIndirectBranch() ? BT_Indirect : BT_None;
// Get the second to last instruction in the block.
@@ -219,7 +213,7 @@ AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
if (++I != REnd) {
SecondLastInst = &*I;
- SecondLastOpc = GetAnalyzableBrOpc(SecondLastInst->getOpcode());
+ SecondLastOpc = getAnalyzableBrOpc(SecondLastInst->getOpcode());
// Not an analyzable branch (must be an indirect jump).
if (isUnpredicatedTerminator(SecondLastInst) && !SecondLastOpc)
@@ -228,7 +222,7 @@ AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
// If there is only one terminator instruction, process it.
if (!SecondLastOpc) {
- // Unconditional branch
+ // Unconditional branch.
if (LastOpc == UncondBrOpc) {
TBB = LastInst->getOperand(0).getMBB();
return BT_Uncond;
@@ -280,5 +274,22 @@ unsigned MipsInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
const char *AsmStr = MI->getOperand(0).getSymbolName();
return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
}
+ case Mips::CONSTPOOL_ENTRY:
+ // If this machine instr is a constant pool entry, its size is recorded as
+ // operand #2.
+ return MI->getOperand(2).getImm();
}
}
+
+MachineInstrBuilder
+MipsInstrInfo::genInstrWithNewOpc(unsigned NewOpc,
+ MachineBasicBlock::iterator I) const {
+ MachineInstrBuilder MIB;
+ MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), get(NewOpc));
+
+ for (unsigned J = 0, E = I->getDesc().getNumOperands(); J < E; ++J)
+ MIB.addOperand(I->getOperand(J));
+
+ MIB.setMemRefs(I->memoperands_begin(), I->memoperands_end());
+ return MIB;
+}
diff --git a/lib/Target/Mips/MipsInstrInfo.h b/lib/Target/Mips/MipsInstrInfo.h
index 8c05d97beac2..d9ac961cd33c 100644
--- a/lib/Target/Mips/MipsInstrInfo.h
+++ b/lib/Target/Mips/MipsInstrInfo.h
@@ -17,6 +17,7 @@
#include "Mips.h"
#include "MipsAnalyzeImmediate.h"
#include "MipsRegisterInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -26,6 +27,7 @@
namespace llvm {
class MipsInstrInfo : public MipsGenInstrInfo {
+ virtual void anchor();
protected:
MipsTargetMachine &TM;
unsigned UncondBrOpc;
@@ -66,11 +68,6 @@ public:
bool AllowModify,
SmallVectorImpl<MachineInstr*> &BranchInstrs) const;
- virtual MachineInstr* emitFrameIndexDebugValue(MachineFunction &MF,
- int FrameIx, uint64_t Offset,
- const MDNode *MDPtr,
- DebugLoc DL) const;
-
/// Insert nop instruction when hazard condition is found
virtual void insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;
@@ -81,7 +78,7 @@ public:
///
virtual const MipsRegisterInfo &getRegisterInfo() const = 0;
- virtual unsigned GetOppositeBranchOpc(unsigned Opc) const = 0;
+ virtual unsigned getOppositeBranchOpc(unsigned Opc) const = 0;
/// Return the number of bytes of code the specified instruction may be.
unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
@@ -116,6 +113,11 @@ public:
const TargetRegisterInfo *TRI,
int64_t Offset) const = 0;
+ /// Create an instruction which has the same operands and memory operands
+ /// as MI but has a new opcode.
+ MachineInstrBuilder genInstrWithNewOpc(unsigned NewOpc,
+ MachineBasicBlock::iterator I) const;
+
protected:
bool isZeroImm(const MachineOperand &op) const;
@@ -123,7 +125,7 @@ protected:
unsigned Flag) const;
private:
- virtual unsigned GetAnalyzableBrOpc(unsigned Opc) const = 0;
+ virtual unsigned getAnalyzableBrOpc(unsigned Opc) const = 0;
void AnalyzeCondBr(const MachineInstr *Inst, unsigned Opc,
MachineBasicBlock *&BB,
diff --git a/lib/Target/Mips/MipsInstrInfo.td b/lib/Target/Mips/MipsInstrInfo.td
index 86ec72982b33..ebdbaa416fcc 100644
--- a/lib/Target/Mips/MipsInstrInfo.td
+++ b/lib/Target/Mips/MipsInstrInfo.td
@@ -23,10 +23,9 @@ def SDT_MipsCMov : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>,
SDTCisInt<4>]>;
def SDT_MipsCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
def SDT_MipsCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
-def SDT_ExtractLOHI : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisVT<1, untyped>,
- SDTCisVT<2, i32>]>;
-def SDT_InsertLOHI : SDTypeProfile<1, 2, [SDTCisVT<0, untyped>,
- SDTCisVT<1, i32>, SDTCisSameAs<1, 2>]>;
+def SDT_MFLOHI : SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVT<1, untyped>]>;
+def SDT_MTLOHI : SDTypeProfile<1, 2, [SDTCisVT<0, untyped>,
+ SDTCisInt<1>, SDTCisSameAs<1, 2>]>;
def SDT_MipsMultDiv : SDTypeProfile<1, 2, [SDTCisVT<0, untyped>, SDTCisInt<1>,
SDTCisSameAs<1, 2>]>;
def SDT_MipsMAddMSub : SDTypeProfile<1, 3,
@@ -85,11 +84,12 @@ def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_MipsCallSeqEnd,
[SDNPHasChain, SDNPSideEffect,
SDNPOptInGlue, SDNPOutGlue]>;
-// Node used to extract integer from LO/HI register.
-def ExtractLOHI : SDNode<"MipsISD::ExtractLOHI", SDT_ExtractLOHI>;
+// Nodes used to extract LO/HI registers.
+def MipsMFHI : SDNode<"MipsISD::MFHI", SDT_MFLOHI>;
+def MipsMFLO : SDNode<"MipsISD::MFLO", SDT_MFLOHI>;
// Node used to insert 32-bit integers to LOHI register pair.
-def InsertLOHI : SDNode<"MipsISD::InsertLOHI", SDT_InsertLOHI>;
+def MipsMTLOHI : SDNode<"MipsISD::MTLOHI", SDT_MTLOHI>;
// Mult nodes.
def MipsMult : SDNode<"MipsISD::Mult", SDT_MipsMultDiv>;
@@ -104,7 +104,8 @@ def MipsMSubu : SDNode<"MipsISD::MSubu", SDT_MipsMAddMSub>;
// DivRem(u) nodes
def MipsDivRem : SDNode<"MipsISD::DivRem", SDT_MipsMultDiv>;
def MipsDivRemU : SDNode<"MipsISD::DivRemU", SDT_MipsMultDiv>;
-def MipsDivRem16 : SDNode<"MipsISD::DivRem16", SDT_MipsDivRem16, [SDNPOutGlue]>;
+def MipsDivRem16 : SDNode<"MipsISD::DivRem16", SDT_MipsDivRem16,
+ [SDNPOutGlue]>;
def MipsDivRemU16 : SDNode<"MipsISD::DivRemU16", SDT_MipsDivRem16,
[SDNPOutGlue]>;
@@ -113,7 +114,7 @@ def MipsDivRemU16 : SDNode<"MipsISD::DivRemU16", SDT_MipsDivRem16,
// Wrapper node patterns give the instruction selector a chance to replace
// target constant nodes that would otherwise remain unchanged with ADDiu
// nodes. Without these wrapper node patterns, the following conditional move
-// instrucion is emitted when function cmov2 in test/CodeGen/Mips/cmov.ll is
+// instruction is emitted when function cmov2 in test/CodeGen/Mips/cmov.ll is
// compiled:
// movn %got(d)($gp), %got(c)($gp), $4
// This instruction is illegal since movn can take only register operands.
@@ -180,6 +181,12 @@ def NoNaNsFPMath : Predicate<"TM.Options.NoNaNsFPMath">,
def HasStdEnc : Predicate<"Subtarget.hasStandardEncoding()">,
AssemblerPredicate<"!FeatureMips16">;
def NotDSP : Predicate<"!Subtarget.hasDSP()">;
+def InMicroMips : Predicate<"Subtarget.inMicroMipsMode()">,
+ AssemblerPredicate<"FeatureMicroMips">;
+def NotInMicroMips : Predicate<"!Subtarget.inMicroMipsMode()">,
+ AssemblerPredicate<"!FeatureMicroMips">;
+def IsLE : Predicate<"Subtarget.isLittle()">;
+def IsBE : Predicate<"!Subtarget.isLittle()">;
class MipsPat<dag pattern, dag result> : Pat<pattern, result> {
let Predicates = [HasStdEnc];
@@ -240,7 +247,7 @@ def brtarget : Operand<OtherVT> {
def calltarget : Operand<iPTR> {
let EncoderMethod = "getJumpTargetOpValue";
}
-def calltarget64: Operand<i64>;
+
def simm16 : Operand<i32> {
let DecoderMethod= "DecodeSimm16";
}
@@ -248,48 +255,73 @@ def simm16 : Operand<i32> {
def simm20 : Operand<i32> {
}
-def simm16_64 : Operand<i64>;
-def shamt : Operand<i32>;
+def uimm20 : Operand<i32> {
+}
+
+def uimm10 : Operand<i32> {
+}
+
+def simm16_64 : Operand<i64> {
+ let DecoderMethod = "DecodeSimm16";
+}
// Unsigned Operand
+def uimm5 : Operand<i32> {
+ let PrintMethod = "printUnsignedImm";
+}
+
+def uimm6 : Operand<i32> {
+ let PrintMethod = "printUnsignedImm";
+}
+
def uimm16 : Operand<i32> {
let PrintMethod = "printUnsignedImm";
}
+def pcrel16 : Operand<i32> {
+}
+
def MipsMemAsmOperand : AsmOperandClass {
let Name = "Mem";
let ParserMethod = "parseMemOperand";
}
-// Address operand
-def mem : Operand<i32> {
- let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops CPURegs, simm16);
- let EncoderMethod = "getMemEncoding";
- let ParserMatchClass = MipsMemAsmOperand;
- let OperandType = "OPERAND_MEMORY";
+def MipsInvertedImmoperand : AsmOperandClass {
+ let Name = "InvNum";
+ let RenderMethod = "addImmOperands";
+ let ParserMethod = "parseInvNum";
+}
+
+def PtrRegAsmOperand : AsmOperandClass {
+ let Name = "PtrReg";
+ let ParserMethod = "parsePtrReg";
}
-def mem64 : Operand<i64> {
+
+def InvertedImOperand : Operand<i32> {
+ let ParserMatchClass = MipsInvertedImmoperand;
+}
+
+// Address operand
+def mem : Operand<iPTR> {
let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops CPU64Regs, simm16_64);
+ let MIOperandInfo = (ops ptr_rc, simm16);
let EncoderMethod = "getMemEncoding";
let ParserMatchClass = MipsMemAsmOperand;
let OperandType = "OPERAND_MEMORY";
}
-def mem_ea : Operand<i32> {
+def mem_ea : Operand<iPTR> {
let PrintMethod = "printMemOperandEA";
- let MIOperandInfo = (ops CPURegs, simm16);
+ let MIOperandInfo = (ops ptr_rc, simm16);
let EncoderMethod = "getMemEncoding";
let OperandType = "OPERAND_MEMORY";
}
-def mem_ea_64 : Operand<i64> {
- let PrintMethod = "printMemOperandEA";
- let MIOperandInfo = (ops CPU64Regs, simm16_64);
- let EncoderMethod = "getMemEncoding";
- let OperandType = "OPERAND_MEMORY";
+def PtrRC : Operand<iPTR> {
+ let MIOperandInfo = (ops ptr_rc);
+ let DecoderMethod = "DecodePtrRegisterClass";
+ let ParserMatchClass = PtrRegAsmOperand;
}
// size operand of ext instruction
@@ -362,6 +394,9 @@ def addr :
def addrRegImm :
ComplexPattern<iPTR, 2, "selectAddrRegImm", [frameindex]>;
+def addrRegReg :
+ ComplexPattern<iPTR, 2, "selectAddrRegReg", [frameindex]>;
+
def addrDefault :
ComplexPattern<iPTR, 2, "selectAddrDefault", [frameindex]>;
@@ -382,160 +417,111 @@ class ArithLogicR<string opstr, RegisterOperand RO, bit isComm = 0,
// Arithmetic and logical instructions with 2 register operands.
class ArithLogicI<string opstr, Operand Od, RegisterOperand RO,
+ InstrItinClass Itin = NoItinerary,
SDPatternOperator imm_type = null_frag,
SDPatternOperator OpNode = null_frag> :
InstSE<(outs RO:$rt), (ins RO:$rs, Od:$imm16),
!strconcat(opstr, "\t$rt, $rs, $imm16"),
[(set RO:$rt, (OpNode RO:$rs, imm_type:$imm16))],
- IIAlu, FrmI, opstr> {
+ Itin, FrmI, opstr> {
let isReMaterializable = 1;
+ let TwoOperandAliasConstraint = "$rs = $rt";
}
// Arithmetic Multiply ADD/SUB
class MArithR<string opstr, bit isComm = 0> :
- InstSE<(outs), (ins CPURegsOpnd:$rs, CPURegsOpnd:$rt),
- !strconcat(opstr, "\t$rs, $rt"), [], IIImul, FrmR> {
- let Defs = [HI, LO];
- let Uses = [HI, LO];
+ InstSE<(outs), (ins GPR32Opnd:$rs, GPR32Opnd:$rt),
+ !strconcat(opstr, "\t$rs, $rt"), [], IIImult, FrmR, opstr> {
+ let Defs = [HI0, LO0];
+ let Uses = [HI0, LO0];
let isCommutable = isComm;
}
// Logical
-class LogicNOR<string opstr, RegisterOperand RC>:
- InstSE<(outs RC:$rd), (ins RC:$rs, RC:$rt),
+class LogicNOR<string opstr, RegisterOperand RO>:
+ InstSE<(outs RO:$rd), (ins RO:$rs, RO:$rt),
!strconcat(opstr, "\t$rd, $rs, $rt"),
- [(set RC:$rd, (not (or RC:$rs, RC:$rt)))], IIAlu, FrmR, opstr> {
+ [(set RO:$rd, (not (or RO:$rs, RO:$rt)))], IIArith, FrmR, opstr> {
let isCommutable = 1;
}
// Shifts
class shift_rotate_imm<string opstr, Operand ImmOpnd,
- RegisterOperand RC, SDPatternOperator OpNode = null_frag,
+ RegisterOperand RO, SDPatternOperator OpNode = null_frag,
SDPatternOperator PF = null_frag> :
- InstSE<(outs RC:$rd), (ins RC:$rt, ImmOpnd:$shamt),
+ InstSE<(outs RO:$rd), (ins RO:$rt, ImmOpnd:$shamt),
!strconcat(opstr, "\t$rd, $rt, $shamt"),
- [(set RC:$rd, (OpNode RC:$rt, PF:$shamt))], IIAlu, FrmR, opstr>;
+ [(set RO:$rd, (OpNode RO:$rt, PF:$shamt))], IIArith, FrmR, opstr>;
-class shift_rotate_reg<string opstr, RegisterOperand RC,
+class shift_rotate_reg<string opstr, RegisterOperand RO,
SDPatternOperator OpNode = null_frag>:
- InstSE<(outs RC:$rd), (ins CPURegsOpnd:$rs, RC:$rt),
+ InstSE<(outs RO:$rd), (ins RO:$rt, GPR32Opnd:$rs),
!strconcat(opstr, "\t$rd, $rt, $rs"),
- [(set RC:$rd, (OpNode RC:$rt, CPURegsOpnd:$rs))], IIAlu, FrmR, opstr>;
+ [(set RO:$rd, (OpNode RO:$rt, GPR32Opnd:$rs))], IIArith, FrmR, opstr>;
// Load Upper Imediate
-class LoadUpper<string opstr, RegisterClass RC, Operand Imm>:
- InstSE<(outs RC:$rt), (ins Imm:$imm16), !strconcat(opstr, "\t$rt, $imm16"),
- [], IIAlu, FrmI>, IsAsCheapAsAMove {
+class LoadUpper<string opstr, RegisterOperand RO, Operand Imm>:
+ InstSE<(outs RO:$rt), (ins Imm:$imm16), !strconcat(opstr, "\t$rt, $imm16"),
+ [], IIArith, FrmI, opstr>, IsAsCheapAsAMove {
let neverHasSideEffects = 1;
let isReMaterializable = 1;
}
-class FMem<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern,
- InstrItinClass itin>: FFI<op, outs, ins, asmstr, pattern> {
- bits<21> addr;
- let Inst{25-21} = addr{20-16};
- let Inst{15-0} = addr{15-0};
- let DecoderMethod = "DecodeMem";
-}
-
// Memory Load/Store
-class Load<string opstr, SDPatternOperator OpNode, RegisterClass RC,
- Operand MemOpnd, ComplexPattern Addr, string ofsuffix> :
- InstSE<(outs RC:$rt), (ins MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"),
- [(set RC:$rt, (OpNode Addr:$addr))], NoItinerary, FrmI,
- !strconcat(opstr, ofsuffix)> {
+class Load<string opstr, DAGOperand RO, SDPatternOperator OpNode = null_frag,
+ InstrItinClass Itin = NoItinerary, ComplexPattern Addr = addr> :
+ InstSE<(outs RO:$rt), (ins mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
+ [(set RO:$rt, (OpNode Addr:$addr))], Itin, FrmI, opstr> {
let DecoderMethod = "DecodeMem";
let canFoldAsLoad = 1;
let mayLoad = 1;
}
-class Store<string opstr, SDPatternOperator OpNode, RegisterClass RC,
- Operand MemOpnd, ComplexPattern Addr, string ofsuffix> :
- InstSE<(outs), (ins RC:$rt, MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"),
- [(OpNode RC:$rt, Addr:$addr)], NoItinerary, FrmI,
- !strconcat(opstr, ofsuffix)> {
+class Store<string opstr, DAGOperand RO, SDPatternOperator OpNode = null_frag,
+ InstrItinClass Itin = NoItinerary, ComplexPattern Addr = addr> :
+ InstSE<(outs), (ins RO:$rt, mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
+ [(OpNode RO:$rt, Addr:$addr)], Itin, FrmI, opstr> {
let DecoderMethod = "DecodeMem";
let mayStore = 1;
}
-multiclass LoadM<string opstr, RegisterClass RC,
- SDPatternOperator OpNode = null_frag,
- ComplexPattern Addr = addr> {
- def NAME : Load<opstr, OpNode, RC, mem, Addr, "">,
- Requires<[NotN64, HasStdEnc]>;
- def _P8 : Load<opstr, OpNode, RC, mem64, Addr, "_p8">,
- Requires<[IsN64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
- let isCodeGenOnly = 1;
- }
-}
-
-multiclass StoreM<string opstr, RegisterClass RC,
- SDPatternOperator OpNode = null_frag,
- ComplexPattern Addr = addr> {
- def NAME : Store<opstr, OpNode, RC, mem, Addr, "">,
- Requires<[NotN64, HasStdEnc]>;
- def _P8 : Store<opstr, OpNode, RC, mem64, Addr, "_p8">,
- Requires<[IsN64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
- let isCodeGenOnly = 1;
- }
-}
-
// Load/Store Left/Right
let canFoldAsLoad = 1 in
-class LoadLeftRight<string opstr, SDNode OpNode, RegisterClass RC,
- Operand MemOpnd> :
- InstSE<(outs RC:$rt), (ins MemOpnd:$addr, RC:$src),
+class LoadLeftRight<string opstr, SDNode OpNode, RegisterOperand RO,
+ InstrItinClass Itin> :
+ InstSE<(outs RO:$rt), (ins mem:$addr, RO:$src),
!strconcat(opstr, "\t$rt, $addr"),
- [(set RC:$rt, (OpNode addr:$addr, RC:$src))], NoItinerary, FrmI> {
+ [(set RO:$rt, (OpNode addr:$addr, RO:$src))], Itin, FrmI> {
let DecoderMethod = "DecodeMem";
string Constraints = "$src = $rt";
}
-class StoreLeftRight<string opstr, SDNode OpNode, RegisterClass RC,
- Operand MemOpnd>:
- InstSE<(outs), (ins RC:$rt, MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"),
- [(OpNode RC:$rt, addr:$addr)], NoItinerary, FrmI> {
+class StoreLeftRight<string opstr, SDNode OpNode, RegisterOperand RO,
+ InstrItinClass Itin> :
+ InstSE<(outs), (ins RO:$rt, mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
+ [(OpNode RO:$rt, addr:$addr)], Itin, FrmI> {
let DecoderMethod = "DecodeMem";
}
-multiclass LoadLeftRightM<string opstr, SDNode OpNode, RegisterClass RC> {
- def NAME : LoadLeftRight<opstr, OpNode, RC, mem>,
- Requires<[NotN64, HasStdEnc]>;
- def _P8 : LoadLeftRight<opstr, OpNode, RC, mem64>,
- Requires<[IsN64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
- let isCodeGenOnly = 1;
- }
-}
-
-multiclass StoreLeftRightM<string opstr, SDNode OpNode, RegisterClass RC> {
- def NAME : StoreLeftRight<opstr, OpNode, RC, mem>,
- Requires<[NotN64, HasStdEnc]>;
- def _P8 : StoreLeftRight<opstr, OpNode, RC, mem64>,
- Requires<[IsN64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
- let isCodeGenOnly = 1;
- }
-}
-
// Conditional Branch
-class CBranch<string opstr, PatFrag cond_op, RegisterClass RC> :
- InstSE<(outs), (ins RC:$rs, RC:$rt, brtarget:$offset),
+class CBranch<string opstr, DAGOperand opnd, PatFrag cond_op,
+ RegisterOperand RO> :
+ InstSE<(outs), (ins RO:$rs, RO:$rt, opnd:$offset),
!strconcat(opstr, "\t$rs, $rt, $offset"),
- [(brcond (i32 (cond_op RC:$rs, RC:$rt)), bb:$offset)], IIBranch,
- FrmI> {
+ [(brcond (i32 (cond_op RO:$rs, RO:$rt)), bb:$offset)], IIBranch,
+ FrmI, opstr> {
let isBranch = 1;
let isTerminator = 1;
let hasDelaySlot = 1;
let Defs = [AT];
}
-class CBranchZero<string opstr, PatFrag cond_op, RegisterClass RC> :
- InstSE<(outs), (ins RC:$rs, brtarget:$offset),
+class CBranchZero<string opstr, DAGOperand opnd, PatFrag cond_op,
+ RegisterOperand RO> :
+ InstSE<(outs), (ins RO:$rs, opnd:$offset),
!strconcat(opstr, "\t$rs, $offset"),
- [(brcond (i32 (cond_op RC:$rs, 0)), bb:$offset)], IIBranch, FrmI> {
+ [(brcond (i32 (cond_op RO:$rs, 0)), bb:$offset)], IIBranch,
+ FrmI, opstr> {
let isBranch = 1;
let isTerminator = 1;
let hasDelaySlot = 1;
@@ -543,24 +529,24 @@ class CBranchZero<string opstr, PatFrag cond_op, RegisterClass RC> :
}
// SetCC
-class SetCC_R<string opstr, PatFrag cond_op, RegisterClass RC> :
- InstSE<(outs CPURegsOpnd:$rd), (ins RC:$rs, RC:$rt),
+class SetCC_R<string opstr, PatFrag cond_op, RegisterOperand RO> :
+ InstSE<(outs GPR32Opnd:$rd), (ins RO:$rs, RO:$rt),
!strconcat(opstr, "\t$rd, $rs, $rt"),
- [(set CPURegsOpnd:$rd, (cond_op RC:$rs, RC:$rt))],
- IIAlu, FrmR, opstr>;
+ [(set GPR32Opnd:$rd, (cond_op RO:$rs, RO:$rt))],
+ IIslt, FrmR, opstr>;
class SetCC_I<string opstr, PatFrag cond_op, Operand Od, PatLeaf imm_type,
- RegisterClass RC>:
- InstSE<(outs CPURegsOpnd:$rt), (ins RC:$rs, Od:$imm16),
+ RegisterOperand RO>:
+ InstSE<(outs GPR32Opnd:$rt), (ins RO:$rs, Od:$imm16),
!strconcat(opstr, "\t$rt, $rs, $imm16"),
- [(set CPURegsOpnd:$rt, (cond_op RC:$rs, imm_type:$imm16))],
- IIAlu, FrmI, opstr>;
+ [(set GPR32Opnd:$rt, (cond_op RO:$rs, imm_type:$imm16))],
+ IIslt, FrmI, opstr>;
// Jump
class JumpFJ<DAGOperand opnd, string opstr, SDPatternOperator operator,
- SDPatternOperator targetoperator> :
+ SDPatternOperator targetoperator, string bopstr> :
InstSE<(outs), (ins opnd:$target), !strconcat(opstr, "\t$target"),
- [(operator targetoperator:$target)], IIBranch, FrmJ> {
+ [(operator targetoperator:$target)], IIBranch, FrmJ, bopstr> {
let isTerminator=1;
let isBarrier=1;
let hasDelaySlot = 1;
@@ -569,9 +555,9 @@ class JumpFJ<DAGOperand opnd, string opstr, SDPatternOperator operator,
}
// Unconditional branch
-class UncondBranch<string opstr> :
- InstSE<(outs), (ins brtarget:$offset), !strconcat(opstr, "\t$offset"),
- [(br bb:$offset)], IIBranch, FrmI> {
+class UncondBranch<Instruction BEQInst> :
+ PseudoSE<(outs), (ins brtarget:$offset), [(br bb:$offset)], IIBranch>,
+ PseudoInstExpansion<(BEQInst ZERO, ZERO, brtarget:$offset)> {
let isBranch = 1;
let isTerminator = 1;
let isBarrier = 1;
@@ -582,17 +568,20 @@ class UncondBranch<string opstr> :
// Base class for indirect branch and return instruction classes.
let isTerminator=1, isBarrier=1, hasDelaySlot = 1 in
-class JumpFR<RegisterClass RC, SDPatternOperator operator = null_frag>:
- InstSE<(outs), (ins RC:$rs), "jr\t$rs", [(operator RC:$rs)], IIBranch, FrmR>;
+class JumpFR<string opstr, RegisterOperand RO,
+ SDPatternOperator operator = null_frag>:
+ InstSE<(outs), (ins RO:$rs), "jr\t$rs", [(operator RO:$rs)], IIBranch,
+ FrmR, opstr>;
// Indirect branch
-class IndirectBranch<RegisterClass RC>: JumpFR<RC, brind> {
+class IndirectBranch<string opstr, RegisterOperand RO> :
+ JumpFR<opstr, RO, brind> {
let isBranch = 1;
let isIndirectBranch = 1;
}
// Return instruction
-class RetBase<RegisterClass RC>: JumpFR<RC> {
+class RetBase<string opstr, RegisterOperand RO>: JumpFR<opstr, RO> {
let isReturn = 1;
let isCodeGenOnly = 1;
let hasCtrlDep = 1;
@@ -601,29 +590,30 @@ class RetBase<RegisterClass RC>: JumpFR<RC> {
// Jump and Link (Call)
let isCall=1, hasDelaySlot=1, Defs = [RA] in {
- class JumpLink<string opstr> :
- InstSE<(outs), (ins calltarget:$target), !strconcat(opstr, "\t$target"),
- [(MipsJmpLink imm:$target)], IIBranch, FrmJ> {
+ class JumpLink<string opstr, DAGOperand opnd> :
+ InstSE<(outs), (ins opnd:$target), !strconcat(opstr, "\t$target"),
+ [(MipsJmpLink imm:$target)], IIBranch, FrmJ, opstr> {
let DecoderMethod = "DecodeJumpTarget";
}
- class JumpLinkRegPseudo<RegisterClass RC, Instruction JALRInst,
- Register RetReg>:
- PseudoSE<(outs), (ins RC:$rs), [(MipsJmpLink RC:$rs)], IIBranch>,
- PseudoInstExpansion<(JALRInst RetReg, RC:$rs)>;
+ class JumpLinkRegPseudo<RegisterOperand RO, Instruction JALRInst,
+ Register RetReg, RegisterOperand ResRO = RO>:
+ PseudoSE<(outs), (ins RO:$rs), [(MipsJmpLink RO:$rs)], IIBranch>,
+ PseudoInstExpansion<(JALRInst RetReg, ResRO:$rs)>;
- class JumpLinkReg<string opstr, RegisterClass RC>:
- InstSE<(outs RC:$rd), (ins RC:$rs), !strconcat(opstr, "\t$rd, $rs"),
- [], IIBranch, FrmR>;
+ class JumpLinkReg<string opstr, RegisterOperand RO>:
+ InstSE<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"),
+ [], IIBranch, FrmR, opstr>;
- class BGEZAL_FT<string opstr, RegisterOperand RO> :
- InstSE<(outs), (ins RO:$rs, brtarget:$offset),
- !strconcat(opstr, "\t$rs, $offset"), [], IIBranch, FrmI>;
+ class BGEZAL_FT<string opstr, DAGOperand opnd, RegisterOperand RO> :
+ InstSE<(outs), (ins RO:$rs, opnd:$offset),
+ !strconcat(opstr, "\t$rs, $offset"), [], IIBranch, FrmI, opstr>;
}
-class BAL_FT :
- InstSE<(outs), (ins brtarget:$offset), "bal\t$offset", [], IIBranch, FrmI> {
+class BAL_BR_Pseudo<Instruction RealInst> :
+ PseudoSE<(outs), (ins brtarget:$offset), [], IIBranch>,
+ PseudoInstExpansion<(RealInst ZERO, brtarget:$offset)> {
let isBranch = 1;
let isTerminator = 1;
let isBarrier = 1;
@@ -631,12 +621,49 @@ class BAL_FT :
let Defs = [RA];
}
+// Syscall
+class SYS_FT<string opstr> :
+ InstSE<(outs), (ins uimm20:$code_),
+ !strconcat(opstr, "\t$code_"), [], NoItinerary, FrmI>;
+// Break
+class BRK_FT<string opstr> :
+ InstSE<(outs), (ins uimm10:$code_1, uimm10:$code_2),
+ !strconcat(opstr, "\t$code_1, $code_2"), [], NoItinerary, FrmOther>;
+
+// (D)Eret
+class ER_FT<string opstr> :
+ InstSE<(outs), (ins),
+ opstr, [], NoItinerary, FrmOther>;
+
+// Interrupts
+class DEI_FT<string opstr, RegisterOperand RO> :
+ InstSE<(outs RO:$rt), (ins),
+ !strconcat(opstr, "\t$rt"), [], NoItinerary, FrmOther>;
+
+// Wait
+class WAIT_FT<string opstr> :
+ InstSE<(outs), (ins), opstr, [], NoItinerary, FrmOther> {
+ let Inst{31-26} = 0x10;
+ let Inst{25} = 1;
+ let Inst{24-6} = 0;
+ let Inst{5-0} = 0x20;
+}
+
// Sync
let hasSideEffects = 1 in
class SYNC_FT :
InstSE<(outs), (ins i32imm:$stype), "sync $stype", [(MipsSync imm:$stype)],
NoItinerary, FrmOther>;
+let hasSideEffects = 1 in
+class TEQ_FT<string opstr, RegisterOperand RO> :
+ InstSE<(outs), (ins RO:$rs, RO:$rt, uimm16:$code_),
+ !strconcat(opstr, "\t$rs, $rt, $code_"), [], NoItinerary,
+ FrmI, opstr>;
+
+class TEQI_FT<string opstr, RegisterOperand RO> :
+ InstSE<(outs), (ins RO:$rs, uimm16:$imm16),
+ !strconcat(opstr, "\t$rs, $imm16"), [], NoItinerary, FrmOther, opstr>;
// Mul, Div
class Mult<string opstr, InstrItinClass itin, RegisterOperand RO,
list<Register> DefRegs> :
@@ -651,49 +678,61 @@ class Mult<string opstr, InstrItinClass itin, RegisterOperand RO,
// operands.
class MultDivPseudo<Instruction RealInst, RegisterClass R0, RegisterOperand R1,
SDPatternOperator OpNode, InstrItinClass Itin,
- bit IsComm = 1, bit HasSideEffects = 0> :
+ bit IsComm = 1, bit HasSideEffects = 0,
+ bit UsesCustomInserter = 0> :
PseudoSE<(outs R0:$ac), (ins R1:$rs, R1:$rt),
[(set R0:$ac, (OpNode R1:$rs, R1:$rt))], Itin>,
PseudoInstExpansion<(RealInst R1:$rs, R1:$rt)> {
let isCommutable = IsComm;
let hasSideEffects = HasSideEffects;
+ let usesCustomInserter = UsesCustomInserter;
}
// Pseudo multiply add/sub instruction with explicit accumulator register
// operands.
class MAddSubPseudo<Instruction RealInst, SDPatternOperator OpNode>
- : PseudoSE<(outs ACRegs:$ac),
- (ins CPURegsOpnd:$rs, CPURegsOpnd:$rt, ACRegs:$acin),
- [(set ACRegs:$ac,
- (OpNode CPURegsOpnd:$rs, CPURegsOpnd:$rt, ACRegs:$acin))],
- IIImul>,
- PseudoInstExpansion<(RealInst CPURegsOpnd:$rs, CPURegsOpnd:$rt)> {
+ : PseudoSE<(outs ACC64:$ac),
+ (ins GPR32Opnd:$rs, GPR32Opnd:$rt, ACC64:$acin),
+ [(set ACC64:$ac,
+ (OpNode GPR32Opnd:$rs, GPR32Opnd:$rt, ACC64:$acin))],
+ IIImult>,
+ PseudoInstExpansion<(RealInst GPR32Opnd:$rs, GPR32Opnd:$rt)> {
string Constraints = "$acin = $ac";
}
class Div<string opstr, InstrItinClass itin, RegisterOperand RO,
list<Register> DefRegs> :
InstSE<(outs), (ins RO:$rs, RO:$rt), !strconcat(opstr, "\t$$zero, $rs, $rt"),
- [], itin, FrmR> {
+ [], itin, FrmR, opstr> {
let Defs = DefRegs;
}
// Move from Hi/Lo
-class MoveFromLOHI<string opstr, RegisterClass RC, list<Register> UseRegs>:
- InstSE<(outs RC:$rd), (ins), !strconcat(opstr, "\t$rd"), [], IIHiLo, FrmR> {
- let Uses = UseRegs;
+class PseudoMFLOHI<RegisterClass DstRC, RegisterClass SrcRC, SDNode OpNode>
+ : PseudoSE<(outs DstRC:$rd), (ins SrcRC:$hilo),
+ [(set DstRC:$rd, (OpNode SrcRC:$hilo))], IIHiLo>;
+
+class MoveFromLOHI<string opstr, RegisterOperand RO, Register UseReg>:
+ InstSE<(outs RO:$rd), (ins), !strconcat(opstr, "\t$rd"), [], IIHiLo, FrmR,
+ opstr> {
+ let Uses = [UseReg];
let neverHasSideEffects = 1;
}
-class MoveToLOHI<string opstr, RegisterClass RC, list<Register> DefRegs>:
- InstSE<(outs), (ins RC:$rs), !strconcat(opstr, "\t$rs"), [], IIHiLo, FrmR> {
+class PseudoMTLOHI<RegisterClass DstRC, RegisterClass SrcRC>
+ : PseudoSE<(outs DstRC:$lohi), (ins SrcRC:$lo, SrcRC:$hi),
+ [(set DstRC:$lohi, (MipsMTLOHI SrcRC:$lo, SrcRC:$hi))], IIHiLo>;
+
+class MoveToLOHI<string opstr, RegisterOperand RO, list<Register> DefRegs>:
+ InstSE<(outs), (ins RO:$rs), !strconcat(opstr, "\t$rs"), [], IIHiLo,
+ FrmR, opstr> {
let Defs = DefRegs;
let neverHasSideEffects = 1;
}
-class EffectiveAddress<string opstr, RegisterClass RC, Operand Mem> :
- InstSE<(outs RC:$rt), (ins Mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
- [(set RC:$rt, addr:$addr)], NoItinerary, FrmI> {
+class EffectiveAddress<string opstr, RegisterOperand RO> :
+ InstSE<(outs RO:$rt), (ins mem_ea:$addr), !strconcat(opstr, "\t$rt, $addr"),
+ [(set RO:$rt, addr:$addr)], NoItinerary, FrmI> {
let isCodeGenOnly = 1;
let DecoderMethod = "DecodeMem";
}
@@ -701,97 +740,91 @@ class EffectiveAddress<string opstr, RegisterClass RC, Operand Mem> :
// Count Leading Ones/Zeros in Word
class CountLeading0<string opstr, RegisterOperand RO>:
InstSE<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"),
- [(set RO:$rd, (ctlz RO:$rs))], IIAlu, FrmR>,
+ [(set RO:$rd, (ctlz RO:$rs))], IIArith, FrmR, opstr>,
Requires<[HasBitCount, HasStdEnc]>;
class CountLeading1<string opstr, RegisterOperand RO>:
InstSE<(outs RO:$rd), (ins RO:$rs), !strconcat(opstr, "\t$rd, $rs"),
- [(set RO:$rd, (ctlz (not RO:$rs)))], IIAlu, FrmR>,
+ [(set RO:$rd, (ctlz (not RO:$rs)))], IIArith, FrmR, opstr>,
Requires<[HasBitCount, HasStdEnc]>;
// Sign Extend in Register.
-class SignExtInReg<string opstr, ValueType vt, RegisterClass RC> :
- InstSE<(outs RC:$rd), (ins RC:$rt), !strconcat(opstr, "\t$rd, $rt"),
- [(set RC:$rd, (sext_inreg RC:$rt, vt))], NoItinerary, FrmR> {
+class SignExtInReg<string opstr, ValueType vt, RegisterOperand RO> :
+ InstSE<(outs RO:$rd), (ins RO:$rt), !strconcat(opstr, "\t$rd, $rt"),
+ [(set RO:$rd, (sext_inreg RO:$rt, vt))], IIseb, FrmR, opstr> {
let Predicates = [HasSEInReg, HasStdEnc];
}
// Subword Swap
class SubwordSwap<string opstr, RegisterOperand RO>:
InstSE<(outs RO:$rd), (ins RO:$rt), !strconcat(opstr, "\t$rd, $rt"), [],
- NoItinerary, FrmR> {
+ NoItinerary, FrmR, opstr> {
let Predicates = [HasSwap, HasStdEnc];
let neverHasSideEffects = 1;
}
// Read Hardware
-class ReadHardware<RegisterClass CPURegClass, RegisterOperand RO> :
- InstSE<(outs CPURegClass:$rt), (ins RO:$rd), "rdhwr\t$rt, $rd", [],
- IIAlu, FrmR>;
+class ReadHardware<RegisterOperand CPURegOperand, RegisterOperand RO> :
+ InstSE<(outs CPURegOperand:$rt), (ins RO:$rd), "rdhwr\t$rt, $rd", [],
+ IIArith, FrmR>;
// Ext and Ins
-class ExtBase<string opstr, RegisterOperand RO>:
- InstSE<(outs RO:$rt), (ins RO:$rs, uimm16:$pos, size_ext:$size),
+class ExtBase<string opstr, RegisterOperand RO, Operand PosOpnd,
+ SDPatternOperator Op = null_frag>:
+ InstSE<(outs RO:$rt), (ins RO:$rs, PosOpnd:$pos, size_ext:$size),
!strconcat(opstr, " $rt, $rs, $pos, $size"),
- [(set RO:$rt, (MipsExt RO:$rs, imm:$pos, imm:$size))], NoItinerary,
- FrmR> {
+ [(set RO:$rt, (Op RO:$rs, imm:$pos, imm:$size))], NoItinerary,
+ FrmR, opstr> {
let Predicates = [HasMips32r2, HasStdEnc];
}
-class InsBase<string opstr, RegisterOperand RO>:
- InstSE<(outs RO:$rt), (ins RO:$rs, uimm16:$pos, size_ins:$size, RO:$src),
+class InsBase<string opstr, RegisterOperand RO, Operand PosOpnd,
+ SDPatternOperator Op = null_frag>:
+ InstSE<(outs RO:$rt), (ins RO:$rs, PosOpnd:$pos, size_ins:$size, RO:$src),
!strconcat(opstr, " $rt, $rs, $pos, $size"),
- [(set RO:$rt, (MipsIns RO:$rs, imm:$pos, imm:$size, RO:$src))],
- NoItinerary, FrmR> {
+ [(set RO:$rt, (Op RO:$rs, imm:$pos, imm:$size, RO:$src))],
+ NoItinerary, FrmR, opstr> {
let Predicates = [HasMips32r2, HasStdEnc];
let Constraints = "$src = $rt";
}
// Atomic instructions with 2 source operands (ATOMIC_SWAP & ATOMIC_LOAD_*).
-class Atomic2Ops<PatFrag Op, RegisterClass DRC, RegisterClass PRC> :
- PseudoSE<(outs DRC:$dst), (ins PRC:$ptr, DRC:$incr),
- [(set DRC:$dst, (Op PRC:$ptr, DRC:$incr))]>;
-
-multiclass Atomic2Ops32<PatFrag Op> {
- def NAME : Atomic2Ops<Op, CPURegs, CPURegs>, Requires<[NotN64, HasStdEnc]>;
- def _P8 : Atomic2Ops<Op, CPURegs, CPU64Regs>,
- Requires<[IsN64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
- }
-}
+class Atomic2Ops<PatFrag Op, RegisterClass DRC> :
+ PseudoSE<(outs DRC:$dst), (ins PtrRC:$ptr, DRC:$incr),
+ [(set DRC:$dst, (Op iPTR:$ptr, DRC:$incr))]>;
// Atomic Compare & Swap.
-class AtomicCmpSwap<PatFrag Op, RegisterClass DRC, RegisterClass PRC> :
- PseudoSE<(outs DRC:$dst), (ins PRC:$ptr, DRC:$cmp, DRC:$swap),
- [(set DRC:$dst, (Op PRC:$ptr, DRC:$cmp, DRC:$swap))]>;
-
-multiclass AtomicCmpSwap32<PatFrag Op> {
- def NAME : AtomicCmpSwap<Op, CPURegs, CPURegs>,
- Requires<[NotN64, HasStdEnc]>;
- def _P8 : AtomicCmpSwap<Op, CPURegs, CPU64Regs>,
- Requires<[IsN64, HasStdEnc]> {
- let DecoderNamespace = "Mips64";
- }
-}
+class AtomicCmpSwap<PatFrag Op, RegisterClass DRC> :
+ PseudoSE<(outs DRC:$dst), (ins PtrRC:$ptr, DRC:$cmp, DRC:$swap),
+ [(set DRC:$dst, (Op iPTR:$ptr, DRC:$cmp, DRC:$swap))]>;
-class LLBase<string opstr, RegisterOperand RO, Operand Mem> :
- InstSE<(outs RO:$rt), (ins Mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
+class LLBase<string opstr, RegisterOperand RO> :
+ InstSE<(outs RO:$rt), (ins mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
[], NoItinerary, FrmI> {
let DecoderMethod = "DecodeMem";
let mayLoad = 1;
}
-class SCBase<string opstr, RegisterOperand RO, Operand Mem> :
- InstSE<(outs RO:$dst), (ins RO:$rt, Mem:$addr),
+class SCBase<string opstr, RegisterOperand RO> :
+ InstSE<(outs RO:$dst), (ins RO:$rt, mem:$addr),
!strconcat(opstr, "\t$rt, $addr"), [], NoItinerary, FrmI> {
let DecoderMethod = "DecodeMem";
let mayStore = 1;
let Constraints = "$rt = $dst";
}
-class MFC3OP<dag outs, dag ins, string asmstr> :
- InstSE<outs, ins, asmstr, [], NoItinerary, FrmFR>;
+class MFC3OP<string asmstr, RegisterOperand RO> :
+ InstSE<(outs RO:$rt, RO:$rd, uimm16:$sel), (ins),
+ !strconcat(asmstr, "\t$rt, $rd, $sel"), [], NoItinerary, FrmFR>;
+
+class TrapBase<Instruction RealInst>
+ : PseudoSE<(outs), (ins), [(trap)], NoItinerary>,
+ PseudoInstExpansion<(RealInst 0, 0)> {
+ let isBarrier = 1;
+ let isTerminator = 1;
+ let isCodeGenOnly = 1;
+}
//===----------------------------------------------------------------------===//
// Pseudo instructions
@@ -809,38 +842,38 @@ def ADJCALLSTACKUP : MipsPseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
}
let usesCustomInserter = 1 in {
- defm ATOMIC_LOAD_ADD_I8 : Atomic2Ops32<atomic_load_add_8>;
- defm ATOMIC_LOAD_ADD_I16 : Atomic2Ops32<atomic_load_add_16>;
- defm ATOMIC_LOAD_ADD_I32 : Atomic2Ops32<atomic_load_add_32>;
- defm ATOMIC_LOAD_SUB_I8 : Atomic2Ops32<atomic_load_sub_8>;
- defm ATOMIC_LOAD_SUB_I16 : Atomic2Ops32<atomic_load_sub_16>;
- defm ATOMIC_LOAD_SUB_I32 : Atomic2Ops32<atomic_load_sub_32>;
- defm ATOMIC_LOAD_AND_I8 : Atomic2Ops32<atomic_load_and_8>;
- defm ATOMIC_LOAD_AND_I16 : Atomic2Ops32<atomic_load_and_16>;
- defm ATOMIC_LOAD_AND_I32 : Atomic2Ops32<atomic_load_and_32>;
- defm ATOMIC_LOAD_OR_I8 : Atomic2Ops32<atomic_load_or_8>;
- defm ATOMIC_LOAD_OR_I16 : Atomic2Ops32<atomic_load_or_16>;
- defm ATOMIC_LOAD_OR_I32 : Atomic2Ops32<atomic_load_or_32>;
- defm ATOMIC_LOAD_XOR_I8 : Atomic2Ops32<atomic_load_xor_8>;
- defm ATOMIC_LOAD_XOR_I16 : Atomic2Ops32<atomic_load_xor_16>;
- defm ATOMIC_LOAD_XOR_I32 : Atomic2Ops32<atomic_load_xor_32>;
- defm ATOMIC_LOAD_NAND_I8 : Atomic2Ops32<atomic_load_nand_8>;
- defm ATOMIC_LOAD_NAND_I16 : Atomic2Ops32<atomic_load_nand_16>;
- defm ATOMIC_LOAD_NAND_I32 : Atomic2Ops32<atomic_load_nand_32>;
-
- defm ATOMIC_SWAP_I8 : Atomic2Ops32<atomic_swap_8>;
- defm ATOMIC_SWAP_I16 : Atomic2Ops32<atomic_swap_16>;
- defm ATOMIC_SWAP_I32 : Atomic2Ops32<atomic_swap_32>;
-
- defm ATOMIC_CMP_SWAP_I8 : AtomicCmpSwap32<atomic_cmp_swap_8>;
- defm ATOMIC_CMP_SWAP_I16 : AtomicCmpSwap32<atomic_cmp_swap_16>;
- defm ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap32<atomic_cmp_swap_32>;
+ def ATOMIC_LOAD_ADD_I8 : Atomic2Ops<atomic_load_add_8, GPR32>;
+ def ATOMIC_LOAD_ADD_I16 : Atomic2Ops<atomic_load_add_16, GPR32>;
+ def ATOMIC_LOAD_ADD_I32 : Atomic2Ops<atomic_load_add_32, GPR32>;
+ def ATOMIC_LOAD_SUB_I8 : Atomic2Ops<atomic_load_sub_8, GPR32>;
+ def ATOMIC_LOAD_SUB_I16 : Atomic2Ops<atomic_load_sub_16, GPR32>;
+ def ATOMIC_LOAD_SUB_I32 : Atomic2Ops<atomic_load_sub_32, GPR32>;
+ def ATOMIC_LOAD_AND_I8 : Atomic2Ops<atomic_load_and_8, GPR32>;
+ def ATOMIC_LOAD_AND_I16 : Atomic2Ops<atomic_load_and_16, GPR32>;
+ def ATOMIC_LOAD_AND_I32 : Atomic2Ops<atomic_load_and_32, GPR32>;
+ def ATOMIC_LOAD_OR_I8 : Atomic2Ops<atomic_load_or_8, GPR32>;
+ def ATOMIC_LOAD_OR_I16 : Atomic2Ops<atomic_load_or_16, GPR32>;
+ def ATOMIC_LOAD_OR_I32 : Atomic2Ops<atomic_load_or_32, GPR32>;
+ def ATOMIC_LOAD_XOR_I8 : Atomic2Ops<atomic_load_xor_8, GPR32>;
+ def ATOMIC_LOAD_XOR_I16 : Atomic2Ops<atomic_load_xor_16, GPR32>;
+ def ATOMIC_LOAD_XOR_I32 : Atomic2Ops<atomic_load_xor_32, GPR32>;
+ def ATOMIC_LOAD_NAND_I8 : Atomic2Ops<atomic_load_nand_8, GPR32>;
+ def ATOMIC_LOAD_NAND_I16 : Atomic2Ops<atomic_load_nand_16, GPR32>;
+ def ATOMIC_LOAD_NAND_I32 : Atomic2Ops<atomic_load_nand_32, GPR32>;
+
+ def ATOMIC_SWAP_I8 : Atomic2Ops<atomic_swap_8, GPR32>;
+ def ATOMIC_SWAP_I16 : Atomic2Ops<atomic_swap_16, GPR32>;
+ def ATOMIC_SWAP_I32 : Atomic2Ops<atomic_swap_32, GPR32>;
+
+ def ATOMIC_CMP_SWAP_I8 : AtomicCmpSwap<atomic_cmp_swap_8, GPR32>;
+ def ATOMIC_CMP_SWAP_I16 : AtomicCmpSwap<atomic_cmp_swap_16, GPR32>;
+ def ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap<atomic_cmp_swap_32, GPR32>;
}
/// Pseudo instructions for loading and storing accumulator registers.
-let isPseudo = 1 in {
- defm LOAD_AC64 : LoadM<"load_ac64", ACRegs>;
- defm STORE_AC64 : StoreM<"store_ac64", ACRegs>;
+let isPseudo = 1, isCodeGenOnly = 1 in {
+ def LOAD_ACC64 : Load<"", ACC64>;
+ def STORE_ACC64 : Store<"", ACC64>;
}
//===----------------------------------------------------------------------===//
@@ -851,113 +884,146 @@ let isPseudo = 1 in {
//===----------------------------------------------------------------------===//
/// Arithmetic Instructions (ALU Immediate)
-def ADDiu : MMRel, ArithLogicI<"addiu", simm16, CPURegsOpnd, immSExt16, add>,
+def ADDiu : MMRel, ArithLogicI<"addiu", simm16, GPR32Opnd, IIArith, immSExt16,
+ add>,
ADDI_FM<0x9>, IsAsCheapAsAMove;
-def ADDi : MMRel, ArithLogicI<"addi", simm16, CPURegsOpnd>, ADDI_FM<0x8>;
-def SLTi : MMRel, SetCC_I<"slti", setlt, simm16, immSExt16, CPURegs>,
+def ADDi : MMRel, ArithLogicI<"addi", simm16, GPR32Opnd>, ADDI_FM<0x8>;
+def SLTi : MMRel, SetCC_I<"slti", setlt, simm16, immSExt16, GPR32Opnd>,
SLTI_FM<0xa>;
-def SLTiu : MMRel, SetCC_I<"sltiu", setult, simm16, immSExt16, CPURegs>,
+def SLTiu : MMRel, SetCC_I<"sltiu", setult, simm16, immSExt16, GPR32Opnd>,
SLTI_FM<0xb>;
-def ANDi : MMRel, ArithLogicI<"andi", uimm16, CPURegsOpnd, immZExt16, and>,
+def ANDi : MMRel, ArithLogicI<"andi", uimm16, GPR32Opnd, IILogic, immZExt16,
+ and>,
ADDI_FM<0xc>;
-def ORi : MMRel, ArithLogicI<"ori", uimm16, CPURegsOpnd, immZExt16, or>,
+def ORi : MMRel, ArithLogicI<"ori", uimm16, GPR32Opnd, IILogic, immZExt16,
+ or>,
ADDI_FM<0xd>;
-def XORi : MMRel, ArithLogicI<"xori", uimm16, CPURegsOpnd, immZExt16, xor>,
+def XORi : MMRel, ArithLogicI<"xori", uimm16, GPR32Opnd, IILogic, immZExt16,
+ xor>,
ADDI_FM<0xe>;
-def LUi : MMRel, LoadUpper<"lui", CPURegs, uimm16>, LUI_FM;
+def LUi : MMRel, LoadUpper<"lui", GPR32Opnd, uimm16>, LUI_FM;
/// Arithmetic Instructions (3-Operand, R-Type)
-def ADDu : MMRel, ArithLogicR<"addu", CPURegsOpnd, 1, IIAlu, add>,
+def ADDu : MMRel, ArithLogicR<"addu", GPR32Opnd, 1, IIArith, add>,
ADD_FM<0, 0x21>;
-def SUBu : MMRel, ArithLogicR<"subu", CPURegsOpnd, 0, IIAlu, sub>,
+def SUBu : MMRel, ArithLogicR<"subu", GPR32Opnd, 0, IIArith, sub>,
ADD_FM<0, 0x23>;
-def MUL : MMRel, ArithLogicR<"mul", CPURegsOpnd, 1, IIImul, mul>,
+let Defs = [HI0, LO0] in
+def MUL : MMRel, ArithLogicR<"mul", GPR32Opnd, 1, IIImul, mul>,
ADD_FM<0x1c, 2>;
-def ADD : MMRel, ArithLogicR<"add", CPURegsOpnd>, ADD_FM<0, 0x20>;
-def SUB : MMRel, ArithLogicR<"sub", CPURegsOpnd>, ADD_FM<0, 0x22>;
-def SLT : MMRel, SetCC_R<"slt", setlt, CPURegs>, ADD_FM<0, 0x2a>;
-def SLTu : MMRel, SetCC_R<"sltu", setult, CPURegs>, ADD_FM<0, 0x2b>;
-def AND : MMRel, ArithLogicR<"and", CPURegsOpnd, 1, IIAlu, and>,
+def ADD : MMRel, ArithLogicR<"add", GPR32Opnd>, ADD_FM<0, 0x20>;
+def SUB : MMRel, ArithLogicR<"sub", GPR32Opnd>, ADD_FM<0, 0x22>;
+def SLT : MMRel, SetCC_R<"slt", setlt, GPR32Opnd>, ADD_FM<0, 0x2a>;
+def SLTu : MMRel, SetCC_R<"sltu", setult, GPR32Opnd>, ADD_FM<0, 0x2b>;
+def AND : MMRel, ArithLogicR<"and", GPR32Opnd, 1, IILogic, and>,
ADD_FM<0, 0x24>;
-def OR : MMRel, ArithLogicR<"or", CPURegsOpnd, 1, IIAlu, or>,
+def OR : MMRel, ArithLogicR<"or", GPR32Opnd, 1, IILogic, or>,
ADD_FM<0, 0x25>;
-def XOR : MMRel, ArithLogicR<"xor", CPURegsOpnd, 1, IIAlu, xor>,
+def XOR : MMRel, ArithLogicR<"xor", GPR32Opnd, 1, IILogic, xor>,
ADD_FM<0, 0x26>;
-def NOR : MMRel, LogicNOR<"nor", CPURegsOpnd>, ADD_FM<0, 0x27>;
+def NOR : MMRel, LogicNOR<"nor", GPR32Opnd>, ADD_FM<0, 0x27>;
/// Shift Instructions
-def SLL : MMRel, shift_rotate_imm<"sll", shamt, CPURegsOpnd, shl, immZExt5>,
+def SLL : MMRel, shift_rotate_imm<"sll", uimm5, GPR32Opnd, shl, immZExt5>,
SRA_FM<0, 0>;
-def SRL : MMRel, shift_rotate_imm<"srl", shamt, CPURegsOpnd, srl, immZExt5>,
+def SRL : MMRel, shift_rotate_imm<"srl", uimm5, GPR32Opnd, srl, immZExt5>,
SRA_FM<2, 0>;
-def SRA : MMRel, shift_rotate_imm<"sra", shamt, CPURegsOpnd, sra, immZExt5>,
+def SRA : MMRel, shift_rotate_imm<"sra", uimm5, GPR32Opnd, sra, immZExt5>,
SRA_FM<3, 0>;
-def SLLV : MMRel, shift_rotate_reg<"sllv", CPURegsOpnd, shl>, SRLV_FM<4, 0>;
-def SRLV : MMRel, shift_rotate_reg<"srlv", CPURegsOpnd, srl>, SRLV_FM<6, 0>;
-def SRAV : MMRel, shift_rotate_reg<"srav", CPURegsOpnd, sra>, SRLV_FM<7, 0>;
+def SLLV : MMRel, shift_rotate_reg<"sllv", GPR32Opnd, shl>, SRLV_FM<4, 0>;
+def SRLV : MMRel, shift_rotate_reg<"srlv", GPR32Opnd, srl>, SRLV_FM<6, 0>;
+def SRAV : MMRel, shift_rotate_reg<"srav", GPR32Opnd, sra>, SRLV_FM<7, 0>;
// Rotate Instructions
let Predicates = [HasMips32r2, HasStdEnc] in {
- def ROTR : MMRel, shift_rotate_imm<"rotr", shamt, CPURegsOpnd, rotr,
+ def ROTR : MMRel, shift_rotate_imm<"rotr", uimm5, GPR32Opnd, rotr,
immZExt5>,
SRA_FM<2, 1>;
- def ROTRV : MMRel, shift_rotate_reg<"rotrv", CPURegsOpnd, rotr>,
+ def ROTRV : MMRel, shift_rotate_reg<"rotrv", GPR32Opnd, rotr>,
SRLV_FM<6, 1>;
}
/// Load and Store Instructions
/// aligned
-defm LB : LoadM<"lb", CPURegs, sextloadi8>, MMRel, LW_FM<0x20>;
-defm LBu : LoadM<"lbu", CPURegs, zextloadi8, addrDefault>, MMRel, LW_FM<0x24>;
-defm LH : LoadM<"lh", CPURegs, sextloadi16, addrDefault>, MMRel, LW_FM<0x21>;
-defm LHu : LoadM<"lhu", CPURegs, zextloadi16>, MMRel, LW_FM<0x25>;
-defm LW : LoadM<"lw", CPURegs, load, addrDefault>, MMRel, LW_FM<0x23>;
-defm SB : StoreM<"sb", CPURegs, truncstorei8>, MMRel, LW_FM<0x28>;
-defm SH : StoreM<"sh", CPURegs, truncstorei16>, MMRel, LW_FM<0x29>;
-defm SW : StoreM<"sw", CPURegs, store>, MMRel, LW_FM<0x2b>;
+def LB : Load<"lb", GPR32Opnd, sextloadi8, IILoad>, MMRel, LW_FM<0x20>;
+def LBu : Load<"lbu", GPR32Opnd, zextloadi8, IILoad, addrDefault>, MMRel,
+ LW_FM<0x24>;
+def LH : Load<"lh", GPR32Opnd, sextloadi16, IILoad, addrDefault>, MMRel,
+ LW_FM<0x21>;
+def LHu : Load<"lhu", GPR32Opnd, zextloadi16, IILoad>, MMRel, LW_FM<0x25>;
+def LW : Load<"lw", GPR32Opnd, load, IILoad, addrDefault>, MMRel,
+ LW_FM<0x23>;
+def SB : Store<"sb", GPR32Opnd, truncstorei8, IIStore>, MMRel, LW_FM<0x28>;
+def SH : Store<"sh", GPR32Opnd, truncstorei16, IIStore>, MMRel, LW_FM<0x29>;
+def SW : Store<"sw", GPR32Opnd, store, IIStore>, MMRel, LW_FM<0x2b>;
/// load/store left/right
-defm LWL : LoadLeftRightM<"lwl", MipsLWL, CPURegs>, LW_FM<0x22>;
-defm LWR : LoadLeftRightM<"lwr", MipsLWR, CPURegs>, LW_FM<0x26>;
-defm SWL : StoreLeftRightM<"swl", MipsSWL, CPURegs>, LW_FM<0x2a>;
-defm SWR : StoreLeftRightM<"swr", MipsSWR, CPURegs>, LW_FM<0x2e>;
+let Predicates = [NotInMicroMips] in {
+def LWL : LoadLeftRight<"lwl", MipsLWL, GPR32Opnd, IILoad>, LW_FM<0x22>;
+def LWR : LoadLeftRight<"lwr", MipsLWR, GPR32Opnd, IILoad>, LW_FM<0x26>;
+def SWL : StoreLeftRight<"swl", MipsSWL, GPR32Opnd, IIStore>, LW_FM<0x2a>;
+def SWR : StoreLeftRight<"swr", MipsSWR, GPR32Opnd, IIStore>, LW_FM<0x2e>;
+}
def SYNC : SYNC_FT, SYNC_FM;
+def TEQ : MMRel, TEQ_FT<"teq", GPR32Opnd>, TEQ_FM<0x34>;
+def TGE : MMRel, TEQ_FT<"tge", GPR32Opnd>, TEQ_FM<0x30>;
+def TGEU : MMRel, TEQ_FT<"tgeu", GPR32Opnd>, TEQ_FM<0x31>;
+def TLT : MMRel, TEQ_FT<"tlt", GPR32Opnd>, TEQ_FM<0x32>;
+def TLTU : MMRel, TEQ_FT<"tltu", GPR32Opnd>, TEQ_FM<0x33>;
+def TNE : MMRel, TEQ_FT<"tne", GPR32Opnd>, TEQ_FM<0x36>;
-/// Load-linked, Store-conditional
-let Predicates = [NotN64, HasStdEnc] in {
- def LL : LLBase<"ll", CPURegsOpnd, mem>, LW_FM<0x30>;
- def SC : SCBase<"sc", CPURegsOpnd, mem>, LW_FM<0x38>;
-}
+def TEQI : MMRel, TEQI_FT<"teqi", GPR32Opnd>, TEQI_FM<0xc>;
+def TGEI : MMRel, TEQI_FT<"tgei", GPR32Opnd>, TEQI_FM<0x8>;
+def TGEIU : MMRel, TEQI_FT<"tgeiu", GPR32Opnd>, TEQI_FM<0x9>;
+def TLTI : MMRel, TEQI_FT<"tlti", GPR32Opnd>, TEQI_FM<0xa>;
+def TTLTIU : MMRel, TEQI_FT<"tltiu", GPR32Opnd>, TEQI_FM<0xb>;
+def TNEI : MMRel, TEQI_FT<"tnei", GPR32Opnd>, TEQI_FM<0xe>;
-let Predicates = [IsN64, HasStdEnc], DecoderNamespace = "Mips64" in {
- def LL_P8 : LLBase<"ll", CPURegsOpnd, mem64>, LW_FM<0x30>;
- def SC_P8 : SCBase<"sc", CPURegsOpnd, mem64>, LW_FM<0x38>;
-}
+def BREAK : BRK_FT<"break">, BRK_FM<0xd>;
+def SYSCALL : SYS_FT<"syscall">, SYS_FM<0xc>;
+def TRAP : TrapBase<BREAK>;
+
+def ERET : ER_FT<"eret">, ER_FM<0x18>;
+def DERET : ER_FT<"deret">, ER_FM<0x1f>;
+
+def EI : DEI_FT<"ei", GPR32Opnd>, EI_FM<1>;
+def DI : DEI_FT<"di", GPR32Opnd>, EI_FM<0>;
+
+def WAIT : WAIT_FT<"wait">;
+
+/// Load-linked, Store-conditional
+def LL : LLBase<"ll", GPR32Opnd>, LW_FM<0x30>;
+def SC : SCBase<"sc", GPR32Opnd>, LW_FM<0x38>;
/// Jump and Branch Instructions
-def J : JumpFJ<jmptarget, "j", br, bb>, FJ<2>,
+def J : MMRel, JumpFJ<jmptarget, "j", br, bb, "j">, FJ<2>,
Requires<[RelocStatic, HasStdEnc]>, IsBranch;
-def JR : IndirectBranch<CPURegs>, MTLO_FM<8>;
-def B : UncondBranch<"b">, B_FM;
-def BEQ : CBranch<"beq", seteq, CPURegs>, BEQ_FM<4>;
-def BNE : CBranch<"bne", setne, CPURegs>, BEQ_FM<5>;
-def BGEZ : CBranchZero<"bgez", setge, CPURegs>, BGEZ_FM<1, 1>;
-def BGTZ : CBranchZero<"bgtz", setgt, CPURegs>, BGEZ_FM<7, 0>;
-def BLEZ : CBranchZero<"blez", setle, CPURegs>, BGEZ_FM<6, 0>;
-def BLTZ : CBranchZero<"bltz", setlt, CPURegs>, BGEZ_FM<1, 0>;
-
-def BAL_BR: BAL_FT, BAL_FM;
-
-def JAL : JumpLink<"jal">, FJ<3>;
-def JALR : JumpLinkReg<"jalr", CPURegs>, JALR_FM;
-def JALRPseudo : JumpLinkRegPseudo<CPURegs, JALR, RA>;
-def BGEZAL : BGEZAL_FT<"bgezal", CPURegsOpnd>, BGEZAL_FM<0x11>;
-def BLTZAL : BGEZAL_FT<"bltzal", CPURegsOpnd>, BGEZAL_FM<0x10>;
-def TAILCALL : JumpFJ<calltarget, "j", MipsTailCall, imm>, FJ<2>, IsTailCall;
-def TAILCALL_R : JumpFR<CPURegs, MipsTailCall>, MTLO_FM<8>, IsTailCall;
-
-def RET : RetBase<CPURegs>, MTLO_FM<8>;
+def JR : MMRel, IndirectBranch<"jr", GPR32Opnd>, MTLO_FM<8>;
+def BEQ : MMRel, CBranch<"beq", brtarget, seteq, GPR32Opnd>, BEQ_FM<4>;
+def BNE : MMRel, CBranch<"bne", brtarget, setne, GPR32Opnd>, BEQ_FM<5>;
+def BGEZ : MMRel, CBranchZero<"bgez", brtarget, setge, GPR32Opnd>,
+ BGEZ_FM<1, 1>;
+def BGTZ : MMRel, CBranchZero<"bgtz", brtarget, setgt, GPR32Opnd>,
+ BGEZ_FM<7, 0>;
+def BLEZ : MMRel, CBranchZero<"blez", brtarget, setle, GPR32Opnd>,
+ BGEZ_FM<6, 0>;
+def BLTZ : MMRel, CBranchZero<"bltz", brtarget, setlt, GPR32Opnd>,
+ BGEZ_FM<1, 0>;
+def B : UncondBranch<BEQ>;
+
+def JAL : MMRel, JumpLink<"jal", calltarget>, FJ<3>;
+def JALR : MMRel, JumpLinkReg<"jalr", GPR32Opnd>, JALR_FM;
+def JALRPseudo : JumpLinkRegPseudo<GPR32Opnd, JALR, RA>;
+def BGEZAL : MMRel, BGEZAL_FT<"bgezal", brtarget, GPR32Opnd>, BGEZAL_FM<0x11>;
+def BLTZAL : MMRel, BGEZAL_FT<"bltzal", brtarget, GPR32Opnd>, BGEZAL_FM<0x10>;
+def BAL_BR : BAL_BR_Pseudo<BGEZAL>;
+def TAILCALL : MMRel, JumpFJ<calltarget, "j", MipsTailCall, imm, "tcall">,
+ FJ<2>, IsTailCall;
+def TAILCALL_R : MMRel, JumpFR<"tcallr", GPR32Opnd, MipsTailCall>, MTLO_FM<8>,
+ IsTailCall;
+
+def RET : MMRel, RetBase<"ret", GPR32Opnd>, MTLO_FM<8>;
// Exception handling related node and instructions.
// The conversion sequence is:
@@ -973,41 +1039,38 @@ def MIPSehret : SDNode<"MipsISD::EH_RETURN", SDT_MipsEHRET,
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
let Uses = [V0, V1], isTerminator = 1, isReturn = 1, isBarrier = 1 in {
- def MIPSeh_return32 : MipsPseudo<(outs), (ins CPURegs:$spoff, CPURegs:$dst),
- [(MIPSehret CPURegs:$spoff, CPURegs:$dst)]>;
- def MIPSeh_return64 : MipsPseudo<(outs), (ins CPU64Regs:$spoff,
- CPU64Regs:$dst),
- [(MIPSehret CPU64Regs:$spoff, CPU64Regs:$dst)]>;
+ def MIPSeh_return32 : MipsPseudo<(outs), (ins GPR32:$spoff, GPR32:$dst),
+ [(MIPSehret GPR32:$spoff, GPR32:$dst)]>;
+ def MIPSeh_return64 : MipsPseudo<(outs), (ins GPR64:$spoff,
+ GPR64:$dst),
+ [(MIPSehret GPR64:$spoff, GPR64:$dst)]>;
}
/// Multiply and Divide Instructions.
-def MULT : MMRel, Mult<"mult", IIImul, CPURegsOpnd, [HI, LO]>,
+def MULT : MMRel, Mult<"mult", IIImult, GPR32Opnd, [HI0, LO0]>,
MULT_FM<0, 0x18>;
-def MULTu : MMRel, Mult<"multu", IIImul, CPURegsOpnd, [HI, LO]>,
+def MULTu : MMRel, Mult<"multu", IIImult, GPR32Opnd, [HI0, LO0]>,
MULT_FM<0, 0x19>;
-def PseudoMULT : MultDivPseudo<MULT, ACRegs, CPURegsOpnd, MipsMult, IIImul>;
-def PseudoMULTu : MultDivPseudo<MULTu, ACRegs, CPURegsOpnd, MipsMultu, IIImul>;
-def SDIV : Div<"div", IIIdiv, CPURegsOpnd, [HI, LO]>, MULT_FM<0, 0x1a>;
-def UDIV : Div<"divu", IIIdiv, CPURegsOpnd, [HI, LO]>, MULT_FM<0, 0x1b>;
-def PseudoSDIV : MultDivPseudo<SDIV, ACRegs, CPURegsOpnd, MipsDivRem, IIIdiv, 0>;
-def PseudoUDIV : MultDivPseudo<UDIV, ACRegs, CPURegsOpnd, MipsDivRemU, IIIdiv,
- 0>;
-
-def MTHI : MoveToLOHI<"mthi", CPURegs, [HI]>, MTLO_FM<0x11>;
-def MTLO : MoveToLOHI<"mtlo", CPURegs, [LO]>, MTLO_FM<0x13>;
-def MFHI : MoveFromLOHI<"mfhi", CPURegs, [HI]>, MFLO_FM<0x10>;
-def MFLO : MoveFromLOHI<"mflo", CPURegs, [LO]>, MFLO_FM<0x12>;
+def SDIV : MMRel, Div<"div", IIIdiv, GPR32Opnd, [HI0, LO0]>,
+ MULT_FM<0, 0x1a>;
+def UDIV : MMRel, Div<"divu", IIIdiv, GPR32Opnd, [HI0, LO0]>,
+ MULT_FM<0, 0x1b>;
+
+def MTHI : MMRel, MoveToLOHI<"mthi", GPR32Opnd, [HI0]>, MTLO_FM<0x11>;
+def MTLO : MMRel, MoveToLOHI<"mtlo", GPR32Opnd, [LO0]>, MTLO_FM<0x13>;
+def MFHI : MMRel, MoveFromLOHI<"mfhi", GPR32Opnd, AC0>, MFLO_FM<0x10>;
+def MFLO : MMRel, MoveFromLOHI<"mflo", GPR32Opnd, AC0>, MFLO_FM<0x12>;
/// Sign Ext In Register Instructions.
-def SEB : SignExtInReg<"seb", i8, CPURegs>, SEB_FM<0x10, 0x20>;
-def SEH : SignExtInReg<"seh", i16, CPURegs>, SEB_FM<0x18, 0x20>;
+def SEB : MMRel, SignExtInReg<"seb", i8, GPR32Opnd>, SEB_FM<0x10, 0x20>;
+def SEH : MMRel, SignExtInReg<"seh", i16, GPR32Opnd>, SEB_FM<0x18, 0x20>;
/// Count Leading
-def CLZ : CountLeading0<"clz", CPURegsOpnd>, CLO_FM<0x20>;
-def CLO : CountLeading1<"clo", CPURegsOpnd>, CLO_FM<0x21>;
+def CLZ : MMRel, CountLeading0<"clz", GPR32Opnd>, CLO_FM<0x20>;
+def CLO : MMRel, CountLeading1<"clo", GPR32Opnd>, CLO_FM<0x21>;
/// Word Swap Bytes Within Halfwords
-def WSBH : SubwordSwap<"wsbh", CPURegsOpnd>, SEB_FM<2, 0x20>;
+def WSBH : MMRel, SubwordSwap<"wsbh", GPR32Opnd>, SEB_FM<2, 0x20>;
/// No operation.
def NOP : PseudoSE<(outs), (ins), []>, PseudoInstExpansion<(SLL ZERO, ZERO, 0)>;
@@ -1016,85 +1079,98 @@ def NOP : PseudoSE<(outs), (ins), []>, PseudoInstExpansion<(SLL ZERO, ZERO, 0)>;
// instructions. The same not happens for stack address copies, so an
// add op with mem ComplexPattern is used and the stack address copy
// can be matched. It's similar to Sparc LEA_ADDRi
-def LEA_ADDiu : EffectiveAddress<"addiu", CPURegs, mem_ea>, LW_FM<9>;
+def LEA_ADDiu : EffectiveAddress<"addiu", GPR32Opnd>, LW_FM<9>;
// MADD*/MSUB*
-def MADD : MArithR<"madd", 1>, MULT_FM<0x1c, 0>;
-def MADDU : MArithR<"maddu", 1>, MULT_FM<0x1c, 1>;
-def MSUB : MArithR<"msub">, MULT_FM<0x1c, 4>;
-def MSUBU : MArithR<"msubu">, MULT_FM<0x1c, 5>;
+def MADD : MMRel, MArithR<"madd", 1>, MULT_FM<0x1c, 0>;
+def MADDU : MMRel, MArithR<"maddu", 1>, MULT_FM<0x1c, 1>;
+def MSUB : MMRel, MArithR<"msub">, MULT_FM<0x1c, 4>;
+def MSUBU : MMRel, MArithR<"msubu">, MULT_FM<0x1c, 5>;
+
+let Predicates = [HasStdEnc, NotDSP] in {
+def PseudoMULT : MultDivPseudo<MULT, ACC64, GPR32Opnd, MipsMult, IIImult>;
+def PseudoMULTu : MultDivPseudo<MULTu, ACC64, GPR32Opnd, MipsMultu, IIImult>;
+def PseudoMFHI : PseudoMFLOHI<GPR32, ACC64, MipsMFHI>;
+def PseudoMFLO : PseudoMFLOHI<GPR32, ACC64, MipsMFLO>;
+def PseudoMTLOHI : PseudoMTLOHI<ACC64, GPR32>;
def PseudoMADD : MAddSubPseudo<MADD, MipsMAdd>;
def PseudoMADDU : MAddSubPseudo<MADDU, MipsMAddu>;
def PseudoMSUB : MAddSubPseudo<MSUB, MipsMSub>;
def PseudoMSUBU : MAddSubPseudo<MSUBU, MipsMSubu>;
+}
-def RDHWR : ReadHardware<CPURegs, HWRegsOpnd>, RDHWR_FM;
-
-def EXT : ExtBase<"ext", CPURegsOpnd>, EXT_FM<0>;
-def INS : InsBase<"ins", CPURegsOpnd>, EXT_FM<4>;
-
-/// Move Control Registers From/To CPU Registers
-def MFC0_3OP : MFC3OP<(outs CPURegsOpnd:$rt),
- (ins CPURegsOpnd:$rd, uimm16:$sel),
- "mfc0\t$rt, $rd, $sel">, MFC3OP_FM<0x10, 0>;
+def PseudoSDIV : MultDivPseudo<SDIV, ACC64, GPR32Opnd, MipsDivRem, IIIdiv,
+ 0, 1, 1>;
+def PseudoUDIV : MultDivPseudo<UDIV, ACC64, GPR32Opnd, MipsDivRemU, IIIdiv,
+ 0, 1, 1>;
-def MTC0_3OP : MFC3OP<(outs CPURegsOpnd:$rd, uimm16:$sel),
- (ins CPURegsOpnd:$rt),
- "mtc0\t$rt, $rd, $sel">, MFC3OP_FM<0x10, 4>;
+def RDHWR : ReadHardware<GPR32Opnd, HWRegsOpnd>, RDHWR_FM;
-def MFC2_3OP : MFC3OP<(outs CPURegsOpnd:$rt),
- (ins CPURegsOpnd:$rd, uimm16:$sel),
- "mfc2\t$rt, $rd, $sel">, MFC3OP_FM<0x12, 0>;
+def EXT : MMRel, ExtBase<"ext", GPR32Opnd, uimm5, MipsExt>, EXT_FM<0>;
+def INS : MMRel, InsBase<"ins", GPR32Opnd, uimm5, MipsIns>, EXT_FM<4>;
-def MTC2_3OP : MFC3OP<(outs CPURegsOpnd:$rd, uimm16:$sel),
- (ins CPURegsOpnd:$rt),
- "mtc2\t$rt, $rd, $sel">, MFC3OP_FM<0x12, 4>;
+/// Move Control Registers From/To CPU Registers
+def MFC0 : MFC3OP<"mfc0", GPR32Opnd>, MFC3OP_FM<0x10, 0>;
+def MTC0 : MFC3OP<"mtc0", GPR32Opnd>, MFC3OP_FM<0x10, 4>;
+def MFC2 : MFC3OP<"mfc2", GPR32Opnd>, MFC3OP_FM<0x12, 0>;
+def MTC2 : MFC3OP<"mtc2", GPR32Opnd>, MFC3OP_FM<0x12, 4>;
//===----------------------------------------------------------------------===//
// Instruction aliases
//===----------------------------------------------------------------------===//
def : InstAlias<"move $dst, $src",
- (ADDu CPURegsOpnd:$dst, CPURegsOpnd:$src,ZERO), 1>,
- Requires<[NotMips64]>;
-def : InstAlias<"move $dst, $src",
- (OR CPURegsOpnd:$dst, CPURegsOpnd:$src,ZERO), 1>,
+ (ADDu GPR32Opnd:$dst, GPR32Opnd:$src,ZERO), 1>,
Requires<[NotMips64]>;
-def : InstAlias<"bal $offset", (BGEZAL RA, brtarget:$offset), 1>;
+def : InstAlias<"bal $offset", (BGEZAL ZERO, brtarget:$offset), 0>;
def : InstAlias<"addu $rs, $rt, $imm",
- (ADDiu CPURegsOpnd:$rs, CPURegsOpnd:$rt, simm16:$imm), 0>;
+ (ADDiu GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
def : InstAlias<"add $rs, $rt, $imm",
- (ADDi CPURegsOpnd:$rs, CPURegsOpnd:$rt, simm16:$imm), 0>;
+ (ADDi GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
def : InstAlias<"and $rs, $rt, $imm",
- (ANDi CPURegsOpnd:$rs, CPURegsOpnd:$rt, simm16:$imm), 0>;
-def : InstAlias<"j $rs", (JR CPURegs:$rs), 0>,
- Requires<[NotMips64]>;
-def : InstAlias<"jalr $rs", (JALR RA, CPURegs:$rs)>, Requires<[NotMips64]>;
-def : InstAlias<"jal $rs", (JALR RA, CPURegs:$rs), 0>, Requires<[NotMips64]>;
-def : InstAlias<"jal $rd,$rs", (JALR CPURegs:$rd, CPURegs:$rs), 0>,
- Requires<[NotMips64]>;
+ (ANDi GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
+def : InstAlias<"j $rs", (JR GPR32Opnd:$rs), 0>;
+def : InstAlias<"jalr $rs", (JALR RA, GPR32Opnd:$rs), 0>;
+def : InstAlias<"jal $rs", (JALR RA, GPR32Opnd:$rs), 0>;
+def : InstAlias<"jal $rd,$rs", (JALR GPR32Opnd:$rd, GPR32Opnd:$rs), 0>;
def : InstAlias<"not $rt, $rs",
- (NOR CPURegsOpnd:$rt, CPURegsOpnd:$rs, ZERO), 1>;
+ (NOR GPR32Opnd:$rt, GPR32Opnd:$rs, ZERO), 0>;
def : InstAlias<"neg $rt, $rs",
- (SUB CPURegsOpnd:$rt, ZERO, CPURegsOpnd:$rs), 1>;
+ (SUB GPR32Opnd:$rt, ZERO, GPR32Opnd:$rs), 1>;
def : InstAlias<"negu $rt, $rs",
- (SUBu CPURegsOpnd:$rt, ZERO, CPURegsOpnd:$rs), 1>;
+ (SUBu GPR32Opnd:$rt, ZERO, GPR32Opnd:$rs), 1>;
def : InstAlias<"slt $rs, $rt, $imm",
- (SLTi CPURegsOpnd:$rs, CPURegs:$rt, simm16:$imm), 0>;
+ (SLTi GPR32Opnd:$rs, GPR32Opnd:$rt, simm16:$imm), 0>;
def : InstAlias<"xor $rs, $rt, $imm",
- (XORi CPURegsOpnd:$rs, CPURegsOpnd:$rt, uimm16:$imm), 1>,
- Requires<[NotMips64]>;
+ (XORi GPR32Opnd:$rs, GPR32Opnd:$rt, uimm16:$imm), 0>;
def : InstAlias<"or $rs, $rt, $imm",
- (ORi CPURegsOpnd:$rs, CPURegsOpnd:$rt, uimm16:$imm), 1>,
- Requires<[NotMips64]>;
+ (ORi GPR32Opnd:$rs, GPR32Opnd:$rt, uimm16:$imm), 0>;
def : InstAlias<"nop", (SLL ZERO, ZERO, 0), 1>;
-def : InstAlias<"mfc0 $rt, $rd",
- (MFC0_3OP CPURegsOpnd:$rt, CPURegsOpnd:$rd, 0), 0>;
-def : InstAlias<"mtc0 $rt, $rd",
- (MTC0_3OP CPURegsOpnd:$rd, 0, CPURegsOpnd:$rt), 0>;
-def : InstAlias<"mfc2 $rt, $rd",
- (MFC2_3OP CPURegsOpnd:$rt, CPURegsOpnd:$rd, 0), 0>;
-def : InstAlias<"mtc2 $rt, $rd",
- (MTC2_3OP CPURegsOpnd:$rd, 0, CPURegsOpnd:$rt), 0>;
+def : InstAlias<"mfc0 $rt, $rd", (MFC0 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
+def : InstAlias<"mtc0 $rt, $rd", (MTC0 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
+def : InstAlias<"mfc2 $rt, $rd", (MFC2 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
+def : InstAlias<"mtc2 $rt, $rd", (MTC2 GPR32Opnd:$rt, GPR32Opnd:$rd, 0), 0>;
+def : InstAlias<"b $offset", (BEQ ZERO, ZERO, brtarget:$offset), 0>;
+def : InstAlias<"bnez $rs,$offset",
+ (BNE GPR32Opnd:$rs, ZERO, brtarget:$offset), 0>;
+def : InstAlias<"beqz $rs,$offset",
+ (BEQ GPR32Opnd:$rs, ZERO, brtarget:$offset), 0>;
+def : InstAlias<"syscall", (SYSCALL 0), 1>;
+
+def : InstAlias<"break $imm", (BREAK uimm10:$imm, 0), 1>;
+def : InstAlias<"break", (BREAK 0, 0), 1>;
+def : InstAlias<"ei", (EI ZERO), 1>;
+def : InstAlias<"di", (DI ZERO), 1>;
+
+def : InstAlias<"teq $rs, $rt", (TEQ GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+def : InstAlias<"tge $rs, $rt", (TGE GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+def : InstAlias<"tgeu $rs, $rt", (TGEU GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+def : InstAlias<"tlt $rs, $rt", (TLT GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+def : InstAlias<"tltu $rs, $rt", (TLTU GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+def : InstAlias<"tne $rs, $rt", (TNE GPR32Opnd:$rs, GPR32Opnd:$rt, 0), 1>;
+def : InstAlias<"sub, $rd, $rs, $imm",
+ (ADDi GPR32Opnd:$rd, GPR32Opnd:$rs, InvertedImOperand:$imm)>;
+def : InstAlias<"subu, $rd, $rs, $imm",
+ (ADDiu GPR32Opnd:$rd, GPR32Opnd:$rs, InvertedImOperand:$imm)>;
//===----------------------------------------------------------------------===//
// Assembler Pseudo Instructions
@@ -1103,19 +1179,17 @@ def : InstAlias<"mtc2 $rt, $rd",
class LoadImm32< string instr_asm, Operand Od, RegisterOperand RO> :
MipsAsmPseudoInst<(outs RO:$rt), (ins Od:$imm32),
!strconcat(instr_asm, "\t$rt, $imm32")> ;
-def LoadImm32Reg : LoadImm32<"li", shamt,CPURegsOpnd>;
+def LoadImm32Reg : LoadImm32<"li", uimm5, GPR32Opnd>;
class LoadAddress<string instr_asm, Operand MemOpnd, RegisterOperand RO> :
MipsAsmPseudoInst<(outs RO:$rt), (ins MemOpnd:$addr),
!strconcat(instr_asm, "\t$rt, $addr")> ;
-def LoadAddr32Reg : LoadAddress<"la", mem, CPURegsOpnd>;
+def LoadAddr32Reg : LoadAddress<"la", mem, GPR32Opnd>;
class LoadAddressImm<string instr_asm, Operand Od, RegisterOperand RO> :
MipsAsmPseudoInst<(outs RO:$rt), (ins Od:$imm32),
!strconcat(instr_asm, "\t$rt, $imm32")> ;
-def LoadAddr32Imm : LoadAddressImm<"la", shamt,CPURegsOpnd>;
-
-
+def LoadAddr32Imm : LoadAddressImm<"la", uimm5, GPR32Opnd>;
//===----------------------------------------------------------------------===//
// Arbitrary patterns that map to one or more instructions
@@ -1141,13 +1215,13 @@ def : MipsPat<(i32 imm:$imm),
(ORi (LUi (HI16 imm:$imm)), (LO16 imm:$imm))>;
// Carry MipsPatterns
-def : MipsPat<(subc CPURegs:$lhs, CPURegs:$rhs),
- (SUBu CPURegs:$lhs, CPURegs:$rhs)>;
+def : MipsPat<(subc GPR32:$lhs, GPR32:$rhs),
+ (SUBu GPR32:$lhs, GPR32:$rhs)>;
let Predicates = [HasStdEnc, NotDSP] in {
- def : MipsPat<(addc CPURegs:$lhs, CPURegs:$rhs),
- (ADDu CPURegs:$lhs, CPURegs:$rhs)>;
- def : MipsPat<(addc CPURegs:$src, immSExt16:$imm),
- (ADDiu CPURegs:$src, imm:$imm)>;
+ def : MipsPat<(addc GPR32:$lhs, GPR32:$rhs),
+ (ADDu GPR32:$lhs, GPR32:$rhs)>;
+ def : MipsPat<(addc GPR32:$src, immSExt16:$imm),
+ (ADDiu GPR32:$src, imm:$imm)>;
}
// Call
@@ -1155,8 +1229,8 @@ def : MipsPat<(MipsJmpLink (i32 tglobaladdr:$dst)),
(JAL tglobaladdr:$dst)>;
def : MipsPat<(MipsJmpLink (i32 texternalsym:$dst)),
(JAL texternalsym:$dst)>;
-//def : MipsPat<(MipsJmpLink CPURegs:$dst),
-// (JALR CPURegs:$dst)>;
+//def : MipsPat<(MipsJmpLink GPR32:$dst),
+// (JALR GPR32:$dst)>;
// Tail call
def : MipsPat<(MipsTailCall (iPTR tglobaladdr:$dst)),
@@ -1178,58 +1252,49 @@ def : MipsPat<(MipsLo tconstpool:$in), (ADDiu ZERO, tconstpool:$in)>;
def : MipsPat<(MipsLo tglobaltlsaddr:$in), (ADDiu ZERO, tglobaltlsaddr:$in)>;
def : MipsPat<(MipsLo texternalsym:$in), (ADDiu ZERO, texternalsym:$in)>;
-def : MipsPat<(add CPURegs:$hi, (MipsLo tglobaladdr:$lo)),
- (ADDiu CPURegs:$hi, tglobaladdr:$lo)>;
-def : MipsPat<(add CPURegs:$hi, (MipsLo tblockaddress:$lo)),
- (ADDiu CPURegs:$hi, tblockaddress:$lo)>;
-def : MipsPat<(add CPURegs:$hi, (MipsLo tjumptable:$lo)),
- (ADDiu CPURegs:$hi, tjumptable:$lo)>;
-def : MipsPat<(add CPURegs:$hi, (MipsLo tconstpool:$lo)),
- (ADDiu CPURegs:$hi, tconstpool:$lo)>;
-def : MipsPat<(add CPURegs:$hi, (MipsLo tglobaltlsaddr:$lo)),
- (ADDiu CPURegs:$hi, tglobaltlsaddr:$lo)>;
+def : MipsPat<(add GPR32:$hi, (MipsLo tglobaladdr:$lo)),
+ (ADDiu GPR32:$hi, tglobaladdr:$lo)>;
+def : MipsPat<(add GPR32:$hi, (MipsLo tblockaddress:$lo)),
+ (ADDiu GPR32:$hi, tblockaddress:$lo)>;
+def : MipsPat<(add GPR32:$hi, (MipsLo tjumptable:$lo)),
+ (ADDiu GPR32:$hi, tjumptable:$lo)>;
+def : MipsPat<(add GPR32:$hi, (MipsLo tconstpool:$lo)),
+ (ADDiu GPR32:$hi, tconstpool:$lo)>;
+def : MipsPat<(add GPR32:$hi, (MipsLo tglobaltlsaddr:$lo)),
+ (ADDiu GPR32:$hi, tglobaltlsaddr:$lo)>;
// gp_rel relocs
-def : MipsPat<(add CPURegs:$gp, (MipsGPRel tglobaladdr:$in)),
- (ADDiu CPURegs:$gp, tglobaladdr:$in)>;
-def : MipsPat<(add CPURegs:$gp, (MipsGPRel tconstpool:$in)),
- (ADDiu CPURegs:$gp, tconstpool:$in)>;
+def : MipsPat<(add GPR32:$gp, (MipsGPRel tglobaladdr:$in)),
+ (ADDiu GPR32:$gp, tglobaladdr:$in)>;
+def : MipsPat<(add GPR32:$gp, (MipsGPRel tconstpool:$in)),
+ (ADDiu GPR32:$gp, tconstpool:$in)>;
// wrapper_pic
class WrapperPat<SDNode node, Instruction ADDiuOp, RegisterClass RC>:
MipsPat<(MipsWrapper RC:$gp, node:$in),
(ADDiuOp RC:$gp, node:$in)>;
-def : WrapperPat<tglobaladdr, ADDiu, CPURegs>;
-def : WrapperPat<tconstpool, ADDiu, CPURegs>;
-def : WrapperPat<texternalsym, ADDiu, CPURegs>;
-def : WrapperPat<tblockaddress, ADDiu, CPURegs>;
-def : WrapperPat<tjumptable, ADDiu, CPURegs>;
-def : WrapperPat<tglobaltlsaddr, ADDiu, CPURegs>;
+def : WrapperPat<tglobaladdr, ADDiu, GPR32>;
+def : WrapperPat<tconstpool, ADDiu, GPR32>;
+def : WrapperPat<texternalsym, ADDiu, GPR32>;
+def : WrapperPat<tblockaddress, ADDiu, GPR32>;
+def : WrapperPat<tjumptable, ADDiu, GPR32>;
+def : WrapperPat<tglobaltlsaddr, ADDiu, GPR32>;
// Mips does not have "not", so we expand our way
-def : MipsPat<(not CPURegs:$in),
- (NOR CPURegsOpnd:$in, ZERO)>;
+def : MipsPat<(not GPR32:$in),
+ (NOR GPR32Opnd:$in, ZERO)>;
// extended loads
-let Predicates = [NotN64, HasStdEnc] in {
+let Predicates = [HasStdEnc] in {
def : MipsPat<(i32 (extloadi1 addr:$src)), (LBu addr:$src)>;
def : MipsPat<(i32 (extloadi8 addr:$src)), (LBu addr:$src)>;
def : MipsPat<(i32 (extloadi16 addr:$src)), (LHu addr:$src)>;
}
-let Predicates = [IsN64, HasStdEnc] in {
- def : MipsPat<(i32 (extloadi1 addr:$src)), (LBu_P8 addr:$src)>;
- def : MipsPat<(i32 (extloadi8 addr:$src)), (LBu_P8 addr:$src)>;
- def : MipsPat<(i32 (extloadi16 addr:$src)), (LHu_P8 addr:$src)>;
-}
// peepholes
-let Predicates = [NotN64, HasStdEnc] in {
- def : MipsPat<(store (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;
-}
-let Predicates = [IsN64, HasStdEnc] in {
- def : MipsPat<(store (i32 0), addr:$dst), (SW_P8 ZERO, addr:$dst)>;
-}
+let Predicates = [HasStdEnc] in
+def : MipsPat<(store (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;
// brcond patterns
multiclass BrcondPats<RegisterClass RC, Instruction BEQOp, Instruction BNEOp,
@@ -1248,6 +1313,10 @@ def : MipsPat<(brcond (i32 (setge RC:$lhs, immSExt16:$rhs)), bb:$dst),
(BEQ (SLTiOp RC:$lhs, immSExt16:$rhs), ZERO, bb:$dst)>;
def : MipsPat<(brcond (i32 (setuge RC:$lhs, immSExt16:$rhs)), bb:$dst),
(BEQ (SLTiuOp RC:$lhs, immSExt16:$rhs), ZERO, bb:$dst)>;
+def : MipsPat<(brcond (i32 (setgt RC:$lhs, immSExt16Plus1:$rhs)), bb:$dst),
+ (BEQ (SLTiOp RC:$lhs, (Plus1 imm:$rhs)), ZERO, bb:$dst)>;
+def : MipsPat<(brcond (i32 (setugt RC:$lhs, immSExt16Plus1:$rhs)), bb:$dst),
+ (BEQ (SLTiuOp RC:$lhs, (Plus1 imm:$rhs)), ZERO, bb:$dst)>;
def : MipsPat<(brcond (i32 (setle RC:$lhs, RC:$rhs)), bb:$dst),
(BEQ (SLTOp RC:$rhs, RC:$lhs), ZERO, bb:$dst)>;
@@ -1258,11 +1327,20 @@ def : MipsPat<(brcond RC:$cond, bb:$dst),
(BNEOp RC:$cond, ZEROReg, bb:$dst)>;
}
-defm : BrcondPats<CPURegs, BEQ, BNE, SLT, SLTu, SLTi, SLTiu, ZERO>;
+defm : BrcondPats<GPR32, BEQ, BNE, SLT, SLTu, SLTi, SLTiu, ZERO>;
+
+def : MipsPat<(brcond (i32 (setlt i32:$lhs, 1)), bb:$dst),
+ (BLEZ i32:$lhs, bb:$dst)>;
+def : MipsPat<(brcond (i32 (setgt i32:$lhs, -1)), bb:$dst),
+ (BGEZ i32:$lhs, bb:$dst)>;
// setcc patterns
multiclass SeteqPats<RegisterClass RC, Instruction SLTiuOp, Instruction XOROp,
Instruction SLTuOp, Register ZEROReg> {
+ def : MipsPat<(seteq RC:$lhs, 0),
+ (SLTiuOp RC:$lhs, 1)>;
+ def : MipsPat<(setne RC:$lhs, 0),
+ (SLTuOp ZEROReg, RC:$lhs)>;
def : MipsPat<(seteq RC:$lhs, RC:$rhs),
(SLTiuOp (XOROp RC:$lhs, RC:$rhs), 1)>;
def : MipsPat<(setne RC:$lhs, RC:$rhs),
@@ -1298,31 +1376,22 @@ multiclass SetgeImmPats<RegisterClass RC, Instruction SLTiOp,
(XORi (SLTiuOp RC:$lhs, immSExt16:$rhs), 1)>;
}
-defm : SeteqPats<CPURegs, SLTiu, XOR, SLTu, ZERO>;
-defm : SetlePats<CPURegs, SLT, SLTu>;
-defm : SetgtPats<CPURegs, SLT, SLTu>;
-defm : SetgePats<CPURegs, SLT, SLTu>;
-defm : SetgeImmPats<CPURegs, SLTi, SLTiu>;
+defm : SeteqPats<GPR32, SLTiu, XOR, SLTu, ZERO>;
+defm : SetlePats<GPR32, SLT, SLTu>;
+defm : SetgtPats<GPR32, SLT, SLTu>;
+defm : SetgePats<GPR32, SLT, SLTu>;
+defm : SetgeImmPats<GPR32, SLTi, SLTiu>;
// bswap pattern
-def : MipsPat<(bswap CPURegs:$rt), (ROTR (WSBH CPURegs:$rt), 16)>;
-
-// mflo/hi patterns.
-def : MipsPat<(i32 (ExtractLOHI ACRegs:$ac, imm:$lohi_idx)),
- (EXTRACT_SUBREG ACRegs:$ac, imm:$lohi_idx)>;
+def : MipsPat<(bswap GPR32:$rt), (ROTR (WSBH GPR32:$rt), 16)>;
// Load halfword/word patterns.
let AddedComplexity = 40 in {
- let Predicates = [NotN64, HasStdEnc] in {
+ let Predicates = [HasStdEnc] in {
def : LoadRegImmPat<LBu, i32, zextloadi8>;
def : LoadRegImmPat<LH, i32, sextloadi16>;
def : LoadRegImmPat<LW, i32, load>;
}
- let Predicates = [IsN64, HasStdEnc] in {
- def : LoadRegImmPat<LBu_P8, i32, zextloadi8>;
- def : LoadRegImmPat<LH_P8, i32, sextloadi16>;
- def : LoadRegImmPat<LW_P8, i32, load>;
- }
}
//===----------------------------------------------------------------------===//
@@ -1343,6 +1412,10 @@ include "Mips16InstrInfo.td"
include "MipsDSPInstrFormats.td"
include "MipsDSPInstrInfo.td"
+// MSA
+include "MipsMSAInstrFormats.td"
+include "MipsMSAInstrInfo.td"
+
// Micromips
include "MicroMipsInstrFormats.td"
include "MicroMipsInstrInfo.td"
diff --git a/lib/Target/Mips/MipsJITInfo.cpp b/lib/Target/Mips/MipsJITInfo.cpp
index 1b2a325d3ce6..d76cb1da2ddc 100644
--- a/lib/Target/Mips/MipsJITInfo.cpp
+++ b/lib/Target/Mips/MipsJITInfo.cpp
@@ -218,9 +218,9 @@ void *MipsJITInfo::emitFunctionStub(const Function *F, void *Fn,
Hi++;
int Lo = (int)(EmittedAddr & 0xffff);
- // lui t9, %hi(EmittedAddr)
- // addiu t9, t9, %lo(EmittedAddr)
- // jalr t8, t9
+ // lui $t9, %hi(EmittedAddr)
+ // addiu $t9, $t9, %lo(EmittedAddr)
+ // jalr $t8, $t9
// nop
if (IsLittleEndian) {
JCE.emitWordLE(0xf << 26 | 25 << 16 | Hi);
diff --git a/lib/Target/Mips/MipsLongBranch.cpp b/lib/Target/Mips/MipsLongBranch.cpp
index bf5ad3703119..2efe57847add 100644
--- a/lib/Target/Mips/MipsLongBranch.cpp
+++ b/lib/Target/Mips/MipsLongBranch.cpp
@@ -65,7 +65,6 @@ namespace {
static char ID;
MipsLongBranch(TargetMachine &tm)
: MachineFunctionPass(ID), TM(tm),
- TII(static_cast<const MipsInstrInfo*>(tm.getInstrInfo())),
IsPIC(TM.getRelocationModel() == Reloc::PIC_),
ABI(TM.getSubtarget<MipsSubtarget>().getTargetABI()),
LongBranchSeqSize(!IsPIC ? 2 : (ABI == MipsSubtarget::N64 ? 13 : 9)) {}
@@ -85,7 +84,6 @@ namespace {
void expandToLongBranch(MBBInfo &Info);
const TargetMachine &TM;
- const MipsInstrInfo *TII;
MachineFunction *MF;
SmallVector<MBBInfo, 16> MBBInfos;
bool IsPIC;
@@ -172,6 +170,8 @@ void MipsLongBranch::initMBBInfo() {
MBBInfos.clear();
MBBInfos.resize(MF->size());
+ const MipsInstrInfo *TII =
+ static_cast<const MipsInstrInfo*>(TM.getInstrInfo());
for (unsigned I = 0, E = MBBInfos.size(); I < E; ++I) {
MachineBasicBlock *MBB = MF->getBlockNumbered(I);
@@ -217,7 +217,9 @@ int64_t MipsLongBranch::computeOffset(const MachineInstr *Br) {
// MachineBasicBlock operand MBBOpnd.
void MipsLongBranch::replaceBranch(MachineBasicBlock &MBB, Iter Br,
DebugLoc DL, MachineBasicBlock *MBBOpnd) {
- unsigned NewOpc = TII->GetOppositeBranchOpc(Br->getOpcode());
+ const MipsInstrInfo *TII =
+ static_cast<const MipsInstrInfo*>(TM.getInstrInfo());
+ unsigned NewOpc = TII->getOppositeBranchOpc(Br->getOpcode());
const MCInstrDesc &NewDesc = TII->get(NewOpc);
MachineInstrBuilder MIB = BuildMI(MBB, Br, DL, NewDesc);
@@ -235,6 +237,11 @@ void MipsLongBranch::replaceBranch(MachineBasicBlock &MBB, Iter Br,
MIB.addMBB(MBBOpnd);
+ // Bundle the instruction in the delay slot to the newly created branch
+ // and erase the original branch.
+ assert(Br->isBundledWithSucc());
+ MachineBasicBlock::instr_iterator II(Br);
+ MIBundleBuilder(&*MIB).append((++II)->removeFromBundle());
Br->eraseFromParent();
}
@@ -247,6 +254,9 @@ void MipsLongBranch::expandToLongBranch(MBBInfo &I) {
MachineFunction::iterator FallThroughMBB = ++MachineFunction::iterator(MBB);
MachineBasicBlock *LongBrMBB = MF->CreateMachineBasicBlock(BB);
+ const MipsInstrInfo *TII =
+ static_cast<const MipsInstrInfo*>(TM.getInstrInfo());
+
MF->insert(FallThroughMBB, LongBrMBB);
MBB->removeSuccessor(TgtMBB);
MBB->addSuccessor(LongBrMBB);
@@ -399,6 +409,9 @@ static void emitGPDisp(MachineFunction &F, const MipsInstrInfo *TII) {
}
bool MipsLongBranch::runOnMachineFunction(MachineFunction &F) {
+ const MipsInstrInfo *TII =
+ static_cast<const MipsInstrInfo*>(TM.getInstrInfo());
+
if (TM.getSubtarget<MipsSubtarget>().inMips16Mode())
return false;
if ((TM.getRelocationModel() == Reloc::PIC_) &&
@@ -412,7 +425,7 @@ bool MipsLongBranch::runOnMachineFunction(MachineFunction &F) {
MF = &F;
initMBBInfo();
- SmallVector<MBBInfo, 16>::iterator I, E = MBBInfos.end();
+ SmallVectorImpl<MBBInfo>::iterator I, E = MBBInfos.end();
bool EverMadeChange = false, MadeChange = true;
while (MadeChange) {
@@ -424,8 +437,10 @@ bool MipsLongBranch::runOnMachineFunction(MachineFunction &F) {
if (!I->Br || I->HasLongBranch)
continue;
+ int ShVal = TM.getSubtarget<MipsSubtarget>().inMicroMipsMode() ? 2 : 4;
+
// Check if offset fits into 16-bit immediate field of branches.
- if (!ForceLongBranch && isInt<16>(computeOffset(I->Br) / 4))
+ if (!ForceLongBranch && isInt<16>(computeOffset(I->Br) / ShVal))
continue;
I->HasLongBranch = true;
diff --git a/lib/Target/Mips/MipsMCInstLower.cpp b/lib/Target/Mips/MipsMCInstLower.cpp
index d836975eb7d2..b6dfadce14e9 100644
--- a/lib/Target/Mips/MipsMCInstLower.cpp
+++ b/lib/Target/Mips/MipsMCInstLower.cpp
@@ -28,8 +28,7 @@ using namespace llvm;
MipsMCInstLower::MipsMCInstLower(MipsAsmPrinter &asmprinter)
: AsmPrinter(asmprinter) {}
-void MipsMCInstLower::Initialize(Mangler *M, MCContext *C) {
- Mang = M;
+void MipsMCInstLower::Initialize(MCContext *C) {
Ctx = C;
}
@@ -74,7 +73,7 @@ MCOperand MipsMCInstLower::LowerSymbolOperand(const MachineOperand &MO,
break;
case MachineOperand::MO_GlobalAddress:
- Symbol = Mang->getSymbol(MO.getGlobal());
+ Symbol = AsmPrinter.getSymbol(MO.getGlobal());
Offset += MO.getOffset();
break;
diff --git a/lib/Target/Mips/MipsMCInstLower.h b/lib/Target/Mips/MipsMCInstLower.h
index c4a6016105b2..4570bd90997f 100644
--- a/lib/Target/Mips/MipsMCInstLower.h
+++ b/lib/Target/Mips/MipsMCInstLower.h
@@ -19,7 +19,6 @@ namespace llvm {
class MCOperand;
class MachineInstr;
class MachineFunction;
- class Mangler;
class MipsAsmPrinter;
/// MipsMCInstLower - This class is used to lower an MachineInstr into an
@@ -27,11 +26,10 @@ namespace llvm {
class LLVM_LIBRARY_VISIBILITY MipsMCInstLower {
typedef MachineOperand::MachineOperandType MachineOperandType;
MCContext *Ctx;
- Mangler *Mang;
MipsAsmPrinter &AsmPrinter;
public:
MipsMCInstLower(MipsAsmPrinter &asmprinter);
- void Initialize(Mangler *mang, MCContext *C);
+ void Initialize(MCContext *C);
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
MCOperand LowerOperand(const MachineOperand& MO, unsigned offset = 0) const;
diff --git a/lib/Target/Mips/MipsMSAInstrFormats.td b/lib/Target/Mips/MipsMSAInstrFormats.td
new file mode 100644
index 000000000000..875dc0b4034d
--- /dev/null
+++ b/lib/Target/Mips/MipsMSAInstrFormats.td
@@ -0,0 +1,406 @@
+//===- MipsMSAInstrFormats.td - Mips Instruction Formats ---*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+def HasMSA : Predicate<"Subtarget.hasMSA()">,
+ AssemblerPredicate<"FeatureMSA">;
+
+class MSAInst : MipsInst<(outs), (ins), "", [], NoItinerary, FrmOther> {
+ let Predicates = [HasMSA];
+ let Inst{31-26} = 0b011110;
+}
+
+class MSACBranch : MSAInst {
+ let Inst{31-26} = 0b010001;
+}
+
+class MSASpecial : MSAInst {
+ let Inst{31-26} = 0b000000;
+}
+
+class PseudoMSA<dag outs, dag ins, list<dag> pattern,
+ InstrItinClass itin = IIPseudo>:
+ MipsPseudo<outs, ins, pattern, itin> {
+ let Predicates = [HasMSA];
+}
+
+class MSA_BIT_B_FMT<bits<3> major, bits<6> minor>: MSAInst {
+ bits<5> ws;
+ bits<5> wd;
+ bits<3> m;
+
+ let Inst{25-23} = major;
+ let Inst{22-19} = 0b1110;
+ let Inst{18-16} = m;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_BIT_H_FMT<bits<3> major, bits<6> minor>: MSAInst {
+ bits<5> ws;
+ bits<5> wd;
+ bits<4> m;
+
+ let Inst{25-23} = major;
+ let Inst{22-20} = 0b110;
+ let Inst{19-16} = m;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_BIT_W_FMT<bits<3> major, bits<6> minor>: MSAInst {
+ bits<5> ws;
+ bits<5> wd;
+ bits<5> m;
+
+ let Inst{25-23} = major;
+ let Inst{22-21} = 0b10;
+ let Inst{20-16} = m;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_BIT_D_FMT<bits<3> major, bits<6> minor>: MSAInst {
+ bits<5> ws;
+ bits<5> wd;
+ bits<6> m;
+
+ let Inst{25-23} = major;
+ let Inst{22} = 0b0;
+ let Inst{21-16} = m;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_2R_FILL_FMT<bits<8> major, bits<2> df, bits<6> minor>: MSAInst {
+ bits<5> rs;
+ bits<5> wd;
+
+ let Inst{25-18} = major;
+ let Inst{17-16} = df;
+ let Inst{15-11} = rs;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_2R_FMT<bits<8> major, bits<2> df, bits<6> minor>: MSAInst {
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-18} = major;
+ let Inst{17-16} = df;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_2RF_FMT<bits<9> major, bits<1> df, bits<6> minor>: MSAInst {
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-17} = major;
+ let Inst{16} = df;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_3R_FMT<bits<3> major, bits<2> df, bits<6> minor>: MSAInst {
+ bits<5> wt;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-23} = major;
+ let Inst{22-21} = df;
+ let Inst{20-16} = wt;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_3RF_FMT<bits<4> major, bits<1> df, bits<6> minor>: MSAInst {
+ bits<5> wt;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-22} = major;
+ let Inst{21} = df;
+ let Inst{20-16} = wt;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_3R_INDEX_FMT<bits<3> major, bits<2> df, bits<6> minor>: MSAInst {
+ bits<5> rt;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-23} = major;
+ let Inst{22-21} = df;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_FMT<bits<10> major, bits<6> minor>: MSAInst {
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-16} = major;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_CFCMSA_FMT<bits<10> major, bits<6> minor>: MSAInst {
+ bits<5> rd;
+ bits<5> cs;
+
+ let Inst{25-16} = major;
+ let Inst{15-11} = cs;
+ let Inst{10-6} = rd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_CTCMSA_FMT<bits<10> major, bits<6> minor>: MSAInst {
+ bits<5> rs;
+ bits<5> cd;
+
+ let Inst{25-16} = major;
+ let Inst{15-11} = rs;
+ let Inst{10-6} = cd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_B_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<4> n;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-22} = major;
+ let Inst{21-20} = 0b00;
+ let Inst{19-16} = n{3-0};
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_H_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<4> n;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-22} = major;
+ let Inst{21-19} = 0b100;
+ let Inst{18-16} = n{2-0};
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_W_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<4> n;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-22} = major;
+ let Inst{21-18} = 0b1100;
+ let Inst{17-16} = n{1-0};
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_D_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<4> n;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-22} = major;
+ let Inst{21-17} = 0b11100;
+ let Inst{16} = n{0};
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_COPY_B_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<4> n;
+ bits<5> ws;
+ bits<5> rd;
+
+ let Inst{25-22} = major;
+ let Inst{21-20} = 0b00;
+ let Inst{19-16} = n{3-0};
+ let Inst{15-11} = ws;
+ let Inst{10-6} = rd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_COPY_H_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<4> n;
+ bits<5> ws;
+ bits<5> rd;
+
+ let Inst{25-22} = major;
+ let Inst{21-19} = 0b100;
+ let Inst{18-16} = n{2-0};
+ let Inst{15-11} = ws;
+ let Inst{10-6} = rd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_COPY_W_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<4> n;
+ bits<5> ws;
+ bits<5> rd;
+
+ let Inst{25-22} = major;
+ let Inst{21-18} = 0b1100;
+ let Inst{17-16} = n{1-0};
+ let Inst{15-11} = ws;
+ let Inst{10-6} = rd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_INSERT_B_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<6> n;
+ bits<5> rs;
+ bits<5> wd;
+
+ let Inst{25-22} = major;
+ let Inst{21-20} = 0b00;
+ let Inst{19-16} = n{3-0};
+ let Inst{15-11} = rs;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_INSERT_H_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<6> n;
+ bits<5> rs;
+ bits<5> wd;
+
+ let Inst{25-22} = major;
+ let Inst{21-19} = 0b100;
+ let Inst{18-16} = n{2-0};
+ let Inst{15-11} = rs;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_ELM_INSERT_W_FMT<bits<4> major, bits<6> minor>: MSAInst {
+ bits<6> n;
+ bits<5> rs;
+ bits<5> wd;
+
+ let Inst{25-22} = major;
+ let Inst{21-18} = 0b1100;
+ let Inst{17-16} = n{1-0};
+ let Inst{15-11} = rs;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_I5_FMT<bits<3> major, bits<2> df, bits<6> minor>: MSAInst {
+ bits<5> imm;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-23} = major;
+ let Inst{22-21} = df;
+ let Inst{20-16} = imm;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_I8_FMT<bits<2> major, bits<6> minor>: MSAInst {
+ bits<8> u8;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-24} = major;
+ let Inst{23-16} = u8;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_I10_FMT<bits<3> major, bits<2> df, bits<6> minor>: MSAInst {
+ bits<10> s10;
+ bits<5> wd;
+
+ let Inst{25-23} = major;
+ let Inst{22-21} = df;
+ let Inst{20-11} = s10;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_MI10_FMT<bits<2> df, bits<4> minor>: MSAInst {
+ bits<21> addr;
+ bits<5> wd;
+
+ let Inst{25-16} = addr{9-0};
+ let Inst{15-11} = addr{20-16};
+ let Inst{10-6} = wd;
+ let Inst{5-2} = minor;
+ let Inst{1-0} = df;
+}
+
+class MSA_VEC_FMT<bits<5> major, bits<6> minor>: MSAInst {
+ bits<5> wt;
+ bits<5> ws;
+ bits<5> wd;
+
+ let Inst{25-21} = major;
+ let Inst{20-16} = wt;
+ let Inst{15-11} = ws;
+ let Inst{10-6} = wd;
+ let Inst{5-0} = minor;
+}
+
+class MSA_CBRANCH_FMT<bits<3> major, bits<2> df>: MSACBranch {
+ bits<16> offset;
+ bits<5> wt;
+
+ let Inst{25-23} = major;
+ let Inst{22-21} = df;
+ let Inst{20-16} = wt;
+ let Inst{15-0} = offset;
+}
+
+class MSA_CBRANCH_V_FMT<bits<5> major>: MSACBranch {
+ bits<16> offset;
+ bits<5> wt;
+
+ let Inst{25-21} = major;
+ let Inst{20-16} = wt;
+ let Inst{15-0} = offset;
+}
+
+class SPECIAL_LSA_FMT<bits<6> minor>: MSASpecial {
+ bits<5> rs;
+ bits<5> rt;
+ bits<5> rd;
+ bits<2> sa;
+
+ let Inst{25-21} = rs;
+ let Inst{20-16} = rt;
+ let Inst{15-11} = rd;
+ let Inst{10-8} = 0b000;
+ let Inst{7-6} = sa;
+ let Inst{5-0} = minor;
+}
diff --git a/lib/Target/Mips/MipsMSAInstrInfo.td b/lib/Target/Mips/MipsMSAInstrInfo.td
new file mode 100644
index 000000000000..82c51a6473da
--- /dev/null
+++ b/lib/Target/Mips/MipsMSAInstrInfo.td
@@ -0,0 +1,3694 @@
+//===- MipsMSAInstrInfo.td - MSA ASE instructions -*- tablegen ------------*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes Mips MSA ASE instructions.
+//
+//===----------------------------------------------------------------------===//
+
+def SDT_MipsVecCond : SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>]>;
+def SDT_VSetCC : SDTypeProfile<1, 3, [SDTCisInt<0>,
+ SDTCisInt<1>,
+ SDTCisSameAs<1, 2>,
+ SDTCisVT<3, OtherVT>]>;
+def SDT_VFSetCC : SDTypeProfile<1, 3, [SDTCisInt<0>,
+ SDTCisFP<1>,
+ SDTCisSameAs<1, 2>,
+ SDTCisVT<3, OtherVT>]>;
+def SDT_VSHF : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisVec<0>,
+ SDTCisInt<1>, SDTCisVec<1>,
+ SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>]>;
+def SDT_SHF : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisVec<0>,
+ SDTCisVT<1, i32>, SDTCisSameAs<0, 2>]>;
+def SDT_ILV : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisVec<0>,
+ SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>]>;
+
+def MipsVAllNonZero : SDNode<"MipsISD::VALL_NONZERO", SDT_MipsVecCond>;
+def MipsVAnyNonZero : SDNode<"MipsISD::VANY_NONZERO", SDT_MipsVecCond>;
+def MipsVAllZero : SDNode<"MipsISD::VALL_ZERO", SDT_MipsVecCond>;
+def MipsVAnyZero : SDNode<"MipsISD::VANY_ZERO", SDT_MipsVecCond>;
+def MipsVSMax : SDNode<"MipsISD::VSMAX", SDTIntBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def MipsVSMin : SDNode<"MipsISD::VSMIN", SDTIntBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def MipsVUMax : SDNode<"MipsISD::VUMAX", SDTIntBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def MipsVUMin : SDNode<"MipsISD::VUMIN", SDTIntBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def MipsVNOR : SDNode<"MipsISD::VNOR", SDTIntBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def MipsVSHF : SDNode<"MipsISD::VSHF", SDT_VSHF>;
+def MipsSHF : SDNode<"MipsISD::SHF", SDT_SHF>;
+def MipsILVEV : SDNode<"MipsISD::ILVEV", SDT_ILV>;
+def MipsILVOD : SDNode<"MipsISD::ILVOD", SDT_ILV>;
+def MipsILVL : SDNode<"MipsISD::ILVL", SDT_ILV>;
+def MipsILVR : SDNode<"MipsISD::ILVR", SDT_ILV>;
+def MipsPCKEV : SDNode<"MipsISD::PCKEV", SDT_ILV>;
+def MipsPCKOD : SDNode<"MipsISD::PCKOD", SDT_ILV>;
+
+def vsetcc : SDNode<"ISD::SETCC", SDT_VSetCC>;
+def vfsetcc : SDNode<"ISD::SETCC", SDT_VFSetCC>;
+
+def MipsVExtractSExt : SDNode<"MipsISD::VEXTRACT_SEXT_ELT",
+ SDTypeProfile<1, 3, [SDTCisPtrTy<2>]>, []>;
+def MipsVExtractZExt : SDNode<"MipsISD::VEXTRACT_ZEXT_ELT",
+ SDTypeProfile<1, 3, [SDTCisPtrTy<2>]>, []>;
+
+// Operands
+
+def uimm2 : Operand<i32> {
+ let PrintMethod = "printUnsignedImm";
+}
+
+// The immediate of an LSA instruction needs special handling
+// as the encoded value should be subtracted by one.
+def uimm2LSAAsmOperand : AsmOperandClass {
+ let Name = "LSAImm";
+ let ParserMethod = "parseLSAImm";
+ let RenderMethod = "addImmOperands";
+}
+
+def LSAImm : Operand<i32> {
+ let PrintMethod = "printUnsignedImm";
+ let EncoderMethod = "getLSAImmEncoding";
+ let DecoderMethod = "DecodeLSAImm";
+ let ParserMatchClass = uimm2LSAAsmOperand;
+}
+
+def uimm3 : Operand<i32> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def uimm4 : Operand<i32> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def uimm8 : Operand<i32> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def simm5 : Operand<i32>;
+
+def simm10 : Operand<i32>;
+
+def vsplat_uimm1 : Operand<vAny> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def vsplat_uimm2 : Operand<vAny> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def vsplat_uimm3 : Operand<vAny> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def vsplat_uimm4 : Operand<vAny> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def vsplat_uimm5 : Operand<vAny> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def vsplat_uimm6 : Operand<vAny> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def vsplat_uimm8 : Operand<vAny> {
+ let PrintMethod = "printUnsignedImm8";
+}
+
+def vsplat_simm5 : Operand<vAny>;
+
+def vsplat_simm10 : Operand<vAny>;
+
+def immZExt2Lsa : ImmLeaf<i32, [{return isUInt<2>(Imm - 1);}]>;
+
+// Pattern fragments
+def vextract_sext_i8 : PatFrag<(ops node:$vec, node:$idx),
+ (MipsVExtractSExt node:$vec, node:$idx, i8)>;
+def vextract_sext_i16 : PatFrag<(ops node:$vec, node:$idx),
+ (MipsVExtractSExt node:$vec, node:$idx, i16)>;
+def vextract_sext_i32 : PatFrag<(ops node:$vec, node:$idx),
+ (MipsVExtractSExt node:$vec, node:$idx, i32)>;
+
+def vextract_zext_i8 : PatFrag<(ops node:$vec, node:$idx),
+ (MipsVExtractZExt node:$vec, node:$idx, i8)>;
+def vextract_zext_i16 : PatFrag<(ops node:$vec, node:$idx),
+ (MipsVExtractZExt node:$vec, node:$idx, i16)>;
+def vextract_zext_i32 : PatFrag<(ops node:$vec, node:$idx),
+ (MipsVExtractZExt node:$vec, node:$idx, i32)>;
+
+def vinsert_v16i8 : PatFrag<(ops node:$vec, node:$val, node:$idx),
+ (v16i8 (vector_insert node:$vec, node:$val, node:$idx))>;
+def vinsert_v8i16 : PatFrag<(ops node:$vec, node:$val, node:$idx),
+ (v8i16 (vector_insert node:$vec, node:$val, node:$idx))>;
+def vinsert_v4i32 : PatFrag<(ops node:$vec, node:$val, node:$idx),
+ (v4i32 (vector_insert node:$vec, node:$val, node:$idx))>;
+
+class vfsetcc_type<ValueType ResTy, ValueType OpTy, CondCode CC> :
+ PatFrag<(ops node:$lhs, node:$rhs),
+ (ResTy (vfsetcc (OpTy node:$lhs), (OpTy node:$rhs), CC))>;
+
+// ISD::SETFALSE cannot occur
+def vfsetoeq_v4f32 : vfsetcc_type<v4i32, v4f32, SETOEQ>;
+def vfsetoeq_v2f64 : vfsetcc_type<v2i64, v2f64, SETOEQ>;
+def vfsetoge_v4f32 : vfsetcc_type<v4i32, v4f32, SETOGE>;
+def vfsetoge_v2f64 : vfsetcc_type<v2i64, v2f64, SETOGE>;
+def vfsetogt_v4f32 : vfsetcc_type<v4i32, v4f32, SETOGT>;
+def vfsetogt_v2f64 : vfsetcc_type<v2i64, v2f64, SETOGT>;
+def vfsetole_v4f32 : vfsetcc_type<v4i32, v4f32, SETOLE>;
+def vfsetole_v2f64 : vfsetcc_type<v2i64, v2f64, SETOLE>;
+def vfsetolt_v4f32 : vfsetcc_type<v4i32, v4f32, SETOLT>;
+def vfsetolt_v2f64 : vfsetcc_type<v2i64, v2f64, SETOLT>;
+def vfsetone_v4f32 : vfsetcc_type<v4i32, v4f32, SETONE>;
+def vfsetone_v2f64 : vfsetcc_type<v2i64, v2f64, SETONE>;
+def vfsetord_v4f32 : vfsetcc_type<v4i32, v4f32, SETO>;
+def vfsetord_v2f64 : vfsetcc_type<v2i64, v2f64, SETO>;
+def vfsetun_v4f32 : vfsetcc_type<v4i32, v4f32, SETUO>;
+def vfsetun_v2f64 : vfsetcc_type<v2i64, v2f64, SETUO>;
+def vfsetueq_v4f32 : vfsetcc_type<v4i32, v4f32, SETUEQ>;
+def vfsetueq_v2f64 : vfsetcc_type<v2i64, v2f64, SETUEQ>;
+def vfsetuge_v4f32 : vfsetcc_type<v4i32, v4f32, SETUGE>;
+def vfsetuge_v2f64 : vfsetcc_type<v2i64, v2f64, SETUGE>;
+def vfsetugt_v4f32 : vfsetcc_type<v4i32, v4f32, SETUGT>;
+def vfsetugt_v2f64 : vfsetcc_type<v2i64, v2f64, SETUGT>;
+def vfsetule_v4f32 : vfsetcc_type<v4i32, v4f32, SETULE>;
+def vfsetule_v2f64 : vfsetcc_type<v2i64, v2f64, SETULE>;
+def vfsetult_v4f32 : vfsetcc_type<v4i32, v4f32, SETULT>;
+def vfsetult_v2f64 : vfsetcc_type<v2i64, v2f64, SETULT>;
+def vfsetune_v4f32 : vfsetcc_type<v4i32, v4f32, SETUNE>;
+def vfsetune_v2f64 : vfsetcc_type<v2i64, v2f64, SETUNE>;
+// ISD::SETTRUE cannot occur
+// ISD::SETFALSE2 cannot occur
+// ISD::SETTRUE2 cannot occur
+
+class vsetcc_type<ValueType ResTy, CondCode CC> :
+ PatFrag<(ops node:$lhs, node:$rhs),
+ (ResTy (vsetcc node:$lhs, node:$rhs, CC))>;
+
+def vseteq_v16i8 : vsetcc_type<v16i8, SETEQ>;
+def vseteq_v8i16 : vsetcc_type<v8i16, SETEQ>;
+def vseteq_v4i32 : vsetcc_type<v4i32, SETEQ>;
+def vseteq_v2i64 : vsetcc_type<v2i64, SETEQ>;
+def vsetle_v16i8 : vsetcc_type<v16i8, SETLE>;
+def vsetle_v8i16 : vsetcc_type<v8i16, SETLE>;
+def vsetle_v4i32 : vsetcc_type<v4i32, SETLE>;
+def vsetle_v2i64 : vsetcc_type<v2i64, SETLE>;
+def vsetlt_v16i8 : vsetcc_type<v16i8, SETLT>;
+def vsetlt_v8i16 : vsetcc_type<v8i16, SETLT>;
+def vsetlt_v4i32 : vsetcc_type<v4i32, SETLT>;
+def vsetlt_v2i64 : vsetcc_type<v2i64, SETLT>;
+def vsetule_v16i8 : vsetcc_type<v16i8, SETULE>;
+def vsetule_v8i16 : vsetcc_type<v8i16, SETULE>;
+def vsetule_v4i32 : vsetcc_type<v4i32, SETULE>;
+def vsetule_v2i64 : vsetcc_type<v2i64, SETULE>;
+def vsetult_v16i8 : vsetcc_type<v16i8, SETULT>;
+def vsetult_v8i16 : vsetcc_type<v8i16, SETULT>;
+def vsetult_v4i32 : vsetcc_type<v4i32, SETULT>;
+def vsetult_v2i64 : vsetcc_type<v2i64, SETULT>;
+
+def vsplati8 : PatFrag<(ops node:$e0),
+ (v16i8 (build_vector node:$e0, node:$e0,
+ node:$e0, node:$e0,
+ node:$e0, node:$e0,
+ node:$e0, node:$e0,
+ node:$e0, node:$e0,
+ node:$e0, node:$e0,
+ node:$e0, node:$e0,
+ node:$e0, node:$e0))>;
+def vsplati16 : PatFrag<(ops node:$e0),
+ (v8i16 (build_vector node:$e0, node:$e0,
+ node:$e0, node:$e0,
+ node:$e0, node:$e0,
+ node:$e0, node:$e0))>;
+def vsplati32 : PatFrag<(ops node:$e0),
+ (v4i32 (build_vector node:$e0, node:$e0,
+ node:$e0, node:$e0))>;
+def vsplati64 : PatFrag<(ops node:$e0),
+ (v2i64 (build_vector:$v0 node:$e0, node:$e0))>;
+def vsplatf32 : PatFrag<(ops node:$e0),
+ (v4f32 (build_vector node:$e0, node:$e0,
+ node:$e0, node:$e0))>;
+def vsplatf64 : PatFrag<(ops node:$e0),
+ (v2f64 (build_vector node:$e0, node:$e0))>;
+
+def vsplati8_elt : PatFrag<(ops node:$v, node:$i),
+ (MipsVSHF (vsplati8 node:$i), node:$v, node:$v)>;
+def vsplati16_elt : PatFrag<(ops node:$v, node:$i),
+ (MipsVSHF (vsplati16 node:$i), node:$v, node:$v)>;
+def vsplati32_elt : PatFrag<(ops node:$v, node:$i),
+ (MipsVSHF (vsplati32 node:$i), node:$v, node:$v)>;
+def vsplati64_elt : PatFrag<(ops node:$v, node:$i),
+ (MipsVSHF (vsplati64 node:$i), node:$v, node:$v)>;
+
+class SplatPatLeaf<Operand opclass, dag frag, code pred = [{}],
+ SDNodeXForm xform = NOOP_SDNodeXForm>
+ : PatLeaf<frag, pred, xform> {
+ Operand OpClass = opclass;
+}
+
+class SplatComplexPattern<Operand opclass, ValueType ty, int numops, string fn,
+ list<SDNode> roots = [],
+ list<SDNodeProperty> props = []> :
+ ComplexPattern<ty, numops, fn, roots, props> {
+ Operand OpClass = opclass;
+}
+
+def vsplati8_uimm3 : SplatComplexPattern<vsplat_uimm3, v16i8, 1,
+ "selectVSplatUimm3",
+ [build_vector, bitconvert]>;
+
+def vsplati8_uimm4 : SplatComplexPattern<vsplat_uimm4, v16i8, 1,
+ "selectVSplatUimm4",
+ [build_vector, bitconvert]>;
+
+def vsplati8_uimm5 : SplatComplexPattern<vsplat_uimm5, v16i8, 1,
+ "selectVSplatUimm5",
+ [build_vector, bitconvert]>;
+
+def vsplati8_uimm8 : SplatComplexPattern<vsplat_uimm8, v16i8, 1,
+ "selectVSplatUimm8",
+ [build_vector, bitconvert]>;
+
+def vsplati8_simm5 : SplatComplexPattern<vsplat_simm5, v16i8, 1,
+ "selectVSplatSimm5",
+ [build_vector, bitconvert]>;
+
+def vsplati16_uimm3 : SplatComplexPattern<vsplat_uimm3, v8i16, 1,
+ "selectVSplatUimm3",
+ [build_vector, bitconvert]>;
+
+def vsplati16_uimm4 : SplatComplexPattern<vsplat_uimm4, v8i16, 1,
+ "selectVSplatUimm4",
+ [build_vector, bitconvert]>;
+
+def vsplati16_uimm5 : SplatComplexPattern<vsplat_uimm5, v8i16, 1,
+ "selectVSplatUimm5",
+ [build_vector, bitconvert]>;
+
+def vsplati16_simm5 : SplatComplexPattern<vsplat_simm5, v8i16, 1,
+ "selectVSplatSimm5",
+ [build_vector, bitconvert]>;
+
+def vsplati32_uimm2 : SplatComplexPattern<vsplat_uimm2, v4i32, 1,
+ "selectVSplatUimm2",
+ [build_vector, bitconvert]>;
+
+def vsplati32_uimm5 : SplatComplexPattern<vsplat_uimm5, v4i32, 1,
+ "selectVSplatUimm5",
+ [build_vector, bitconvert]>;
+
+def vsplati32_simm5 : SplatComplexPattern<vsplat_simm5, v4i32, 1,
+ "selectVSplatSimm5",
+ [build_vector, bitconvert]>;
+
+def vsplati64_uimm1 : SplatComplexPattern<vsplat_uimm1, v2i64, 1,
+ "selectVSplatUimm1",
+ [build_vector, bitconvert]>;
+
+def vsplati64_uimm5 : SplatComplexPattern<vsplat_uimm5, v2i64, 1,
+ "selectVSplatUimm5",
+ [build_vector, bitconvert]>;
+
+def vsplati64_uimm6 : SplatComplexPattern<vsplat_uimm6, v2i64, 1,
+ "selectVSplatUimm6",
+ [build_vector, bitconvert]>;
+
+def vsplati64_simm5 : SplatComplexPattern<vsplat_simm5, v2i64, 1,
+ "selectVSplatSimm5",
+ [build_vector, bitconvert]>;
+
+// Any build_vector that is a constant splat with a value that is an exact
+// power of 2
+def vsplat_uimm_pow2 : ComplexPattern<vAny, 1, "selectVSplatUimmPow2",
+ [build_vector, bitconvert]>;
+
+// Any build_vector that is a constant splat with a value that is the bitwise
+// inverse of an exact power of 2
+def vsplat_uimm_inv_pow2 : ComplexPattern<vAny, 1, "selectVSplatUimmInvPow2",
+ [build_vector, bitconvert]>;
+
+// Any build_vector that is a constant splat with only a consecutive sequence
+// of left-most bits set.
+def vsplat_maskl_bits : SplatComplexPattern<vsplat_uimm8, vAny, 1,
+ "selectVSplatMaskL",
+ [build_vector, bitconvert]>;
+
+// Any build_vector that is a constant splat with only a consecutive sequence
+// of right-most bits set.
+def vsplat_maskr_bits : SplatComplexPattern<vsplat_uimm8, vAny, 1,
+ "selectVSplatMaskR",
+ [build_vector, bitconvert]>;
+
+// Any build_vector that is a constant splat with a value that equals 1
+// FIXME: These should be a ComplexPattern but we can't use them because the
+// ISel generator requires the uses to have a name, but providing a name
+// causes other errors ("used in pattern but not operand list")
+def vsplat_imm_eq_1 : PatLeaf<(build_vector), [{
+ APInt Imm;
+ EVT EltTy = N->getValueType(0).getVectorElementType();
+
+ return selectVSplat (N, Imm) &&
+ Imm.getBitWidth() == EltTy.getSizeInBits() && Imm == 1;
+}]>;
+
+def vsplati64_imm_eq_1 : PatLeaf<(bitconvert (v4i32 (build_vector))), [{
+ APInt Imm;
+ SDNode *BV = N->getOperand(0).getNode();
+ EVT EltTy = N->getValueType(0).getVectorElementType();
+
+ return selectVSplat (BV, Imm) &&
+ Imm.getBitWidth() == EltTy.getSizeInBits() && Imm == 1;
+}]>;
+
+def vbclr_b : PatFrag<(ops node:$ws, node:$wt),
+ (and node:$ws, (xor (shl vsplat_imm_eq_1, node:$wt),
+ immAllOnesV))>;
+def vbclr_h : PatFrag<(ops node:$ws, node:$wt),
+ (and node:$ws, (xor (shl vsplat_imm_eq_1, node:$wt),
+ immAllOnesV))>;
+def vbclr_w : PatFrag<(ops node:$ws, node:$wt),
+ (and node:$ws, (xor (shl vsplat_imm_eq_1, node:$wt),
+ immAllOnesV))>;
+def vbclr_d : PatFrag<(ops node:$ws, node:$wt),
+ (and node:$ws, (xor (shl (v2i64 vsplati64_imm_eq_1),
+ node:$wt),
+ (bitconvert (v4i32 immAllOnesV))))>;
+
+def vbneg_b : PatFrag<(ops node:$ws, node:$wt),
+ (xor node:$ws, (shl vsplat_imm_eq_1, node:$wt))>;
+def vbneg_h : PatFrag<(ops node:$ws, node:$wt),
+ (xor node:$ws, (shl vsplat_imm_eq_1, node:$wt))>;
+def vbneg_w : PatFrag<(ops node:$ws, node:$wt),
+ (xor node:$ws, (shl vsplat_imm_eq_1, node:$wt))>;
+def vbneg_d : PatFrag<(ops node:$ws, node:$wt),
+ (xor node:$ws, (shl (v2i64 vsplati64_imm_eq_1),
+ node:$wt))>;
+
+def vbset_b : PatFrag<(ops node:$ws, node:$wt),
+ (or node:$ws, (shl vsplat_imm_eq_1, node:$wt))>;
+def vbset_h : PatFrag<(ops node:$ws, node:$wt),
+ (or node:$ws, (shl vsplat_imm_eq_1, node:$wt))>;
+def vbset_w : PatFrag<(ops node:$ws, node:$wt),
+ (or node:$ws, (shl vsplat_imm_eq_1, node:$wt))>;
+def vbset_d : PatFrag<(ops node:$ws, node:$wt),
+ (or node:$ws, (shl (v2i64 vsplati64_imm_eq_1),
+ node:$wt))>;
+
+def fms : PatFrag<(ops node:$wd, node:$ws, node:$wt),
+ (fsub node:$wd, (fmul node:$ws, node:$wt))>;
+
+def muladd : PatFrag<(ops node:$wd, node:$ws, node:$wt),
+ (add node:$wd, (mul node:$ws, node:$wt))>;
+
+def mulsub : PatFrag<(ops node:$wd, node:$ws, node:$wt),
+ (sub node:$wd, (mul node:$ws, node:$wt))>;
+
+def mul_fexp2 : PatFrag<(ops node:$ws, node:$wt),
+ (fmul node:$ws, (fexp2 node:$wt))>;
+
+// Immediates
+def immSExt5 : ImmLeaf<i32, [{return isInt<5>(Imm);}]>;
+def immSExt10: ImmLeaf<i32, [{return isInt<10>(Imm);}]>;
+
+// Instruction encoding.
+class ADD_A_B_ENC : MSA_3R_FMT<0b000, 0b00, 0b010000>;
+class ADD_A_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b010000>;
+class ADD_A_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b010000>;
+class ADD_A_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b010000>;
+
+class ADDS_A_B_ENC : MSA_3R_FMT<0b001, 0b00, 0b010000>;
+class ADDS_A_H_ENC : MSA_3R_FMT<0b001, 0b01, 0b010000>;
+class ADDS_A_W_ENC : MSA_3R_FMT<0b001, 0b10, 0b010000>;
+class ADDS_A_D_ENC : MSA_3R_FMT<0b001, 0b11, 0b010000>;
+
+class ADDS_S_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b010000>;
+class ADDS_S_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b010000>;
+class ADDS_S_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b010000>;
+class ADDS_S_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b010000>;
+
+class ADDS_U_B_ENC : MSA_3R_FMT<0b011, 0b00, 0b010000>;
+class ADDS_U_H_ENC : MSA_3R_FMT<0b011, 0b01, 0b010000>;
+class ADDS_U_W_ENC : MSA_3R_FMT<0b011, 0b10, 0b010000>;
+class ADDS_U_D_ENC : MSA_3R_FMT<0b011, 0b11, 0b010000>;
+
+class ADDV_B_ENC : MSA_3R_FMT<0b000, 0b00, 0b001110>;
+class ADDV_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b001110>;
+class ADDV_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b001110>;
+class ADDV_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b001110>;
+
+class ADDVI_B_ENC : MSA_I5_FMT<0b000, 0b00, 0b000110>;
+class ADDVI_H_ENC : MSA_I5_FMT<0b000, 0b01, 0b000110>;
+class ADDVI_W_ENC : MSA_I5_FMT<0b000, 0b10, 0b000110>;
+class ADDVI_D_ENC : MSA_I5_FMT<0b000, 0b11, 0b000110>;
+
+class AND_V_ENC : MSA_VEC_FMT<0b00000, 0b011110>;
+
+class ANDI_B_ENC : MSA_I8_FMT<0b00, 0b000000>;
+
+class ASUB_S_B_ENC : MSA_3R_FMT<0b100, 0b00, 0b010001>;
+class ASUB_S_H_ENC : MSA_3R_FMT<0b100, 0b01, 0b010001>;
+class ASUB_S_W_ENC : MSA_3R_FMT<0b100, 0b10, 0b010001>;
+class ASUB_S_D_ENC : MSA_3R_FMT<0b100, 0b11, 0b010001>;
+
+class ASUB_U_B_ENC : MSA_3R_FMT<0b101, 0b00, 0b010001>;
+class ASUB_U_H_ENC : MSA_3R_FMT<0b101, 0b01, 0b010001>;
+class ASUB_U_W_ENC : MSA_3R_FMT<0b101, 0b10, 0b010001>;
+class ASUB_U_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b010001>;
+
+class AVE_S_B_ENC : MSA_3R_FMT<0b100, 0b00, 0b010000>;
+class AVE_S_H_ENC : MSA_3R_FMT<0b100, 0b01, 0b010000>;
+class AVE_S_W_ENC : MSA_3R_FMT<0b100, 0b10, 0b010000>;
+class AVE_S_D_ENC : MSA_3R_FMT<0b100, 0b11, 0b010000>;
+
+class AVE_U_B_ENC : MSA_3R_FMT<0b101, 0b00, 0b010000>;
+class AVE_U_H_ENC : MSA_3R_FMT<0b101, 0b01, 0b010000>;
+class AVE_U_W_ENC : MSA_3R_FMT<0b101, 0b10, 0b010000>;
+class AVE_U_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b010000>;
+
+class AVER_S_B_ENC : MSA_3R_FMT<0b110, 0b00, 0b010000>;
+class AVER_S_H_ENC : MSA_3R_FMT<0b110, 0b01, 0b010000>;
+class AVER_S_W_ENC : MSA_3R_FMT<0b110, 0b10, 0b010000>;
+class AVER_S_D_ENC : MSA_3R_FMT<0b110, 0b11, 0b010000>;
+
+class AVER_U_B_ENC : MSA_3R_FMT<0b111, 0b00, 0b010000>;
+class AVER_U_H_ENC : MSA_3R_FMT<0b111, 0b01, 0b010000>;
+class AVER_U_W_ENC : MSA_3R_FMT<0b111, 0b10, 0b010000>;
+class AVER_U_D_ENC : MSA_3R_FMT<0b111, 0b11, 0b010000>;
+
+class BCLR_B_ENC : MSA_3R_FMT<0b011, 0b00, 0b001101>;
+class BCLR_H_ENC : MSA_3R_FMT<0b011, 0b01, 0b001101>;
+class BCLR_W_ENC : MSA_3R_FMT<0b011, 0b10, 0b001101>;
+class BCLR_D_ENC : MSA_3R_FMT<0b011, 0b11, 0b001101>;
+
+class BCLRI_B_ENC : MSA_BIT_B_FMT<0b011, 0b001001>;
+class BCLRI_H_ENC : MSA_BIT_H_FMT<0b011, 0b001001>;
+class BCLRI_W_ENC : MSA_BIT_W_FMT<0b011, 0b001001>;
+class BCLRI_D_ENC : MSA_BIT_D_FMT<0b011, 0b001001>;
+
+class BINSL_B_ENC : MSA_3R_FMT<0b110, 0b00, 0b001101>;
+class BINSL_H_ENC : MSA_3R_FMT<0b110, 0b01, 0b001101>;
+class BINSL_W_ENC : MSA_3R_FMT<0b110, 0b10, 0b001101>;
+class BINSL_D_ENC : MSA_3R_FMT<0b110, 0b11, 0b001101>;
+
+class BINSLI_B_ENC : MSA_BIT_B_FMT<0b110, 0b001001>;
+class BINSLI_H_ENC : MSA_BIT_H_FMT<0b110, 0b001001>;
+class BINSLI_W_ENC : MSA_BIT_W_FMT<0b110, 0b001001>;
+class BINSLI_D_ENC : MSA_BIT_D_FMT<0b110, 0b001001>;
+
+class BINSR_B_ENC : MSA_3R_FMT<0b111, 0b00, 0b001101>;
+class BINSR_H_ENC : MSA_3R_FMT<0b111, 0b01, 0b001101>;
+class BINSR_W_ENC : MSA_3R_FMT<0b111, 0b10, 0b001101>;
+class BINSR_D_ENC : MSA_3R_FMT<0b111, 0b11, 0b001101>;
+
+class BINSRI_B_ENC : MSA_BIT_B_FMT<0b111, 0b001001>;
+class BINSRI_H_ENC : MSA_BIT_H_FMT<0b111, 0b001001>;
+class BINSRI_W_ENC : MSA_BIT_W_FMT<0b111, 0b001001>;
+class BINSRI_D_ENC : MSA_BIT_D_FMT<0b111, 0b001001>;
+
+class BMNZ_V_ENC : MSA_VEC_FMT<0b00100, 0b011110>;
+
+class BMNZI_B_ENC : MSA_I8_FMT<0b00, 0b000001>;
+
+class BMZ_V_ENC : MSA_VEC_FMT<0b00101, 0b011110>;
+
+class BMZI_B_ENC : MSA_I8_FMT<0b01, 0b000001>;
+
+class BNEG_B_ENC : MSA_3R_FMT<0b101, 0b00, 0b001101>;
+class BNEG_H_ENC : MSA_3R_FMT<0b101, 0b01, 0b001101>;
+class BNEG_W_ENC : MSA_3R_FMT<0b101, 0b10, 0b001101>;
+class BNEG_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b001101>;
+
+class BNEGI_B_ENC : MSA_BIT_B_FMT<0b101, 0b001001>;
+class BNEGI_H_ENC : MSA_BIT_H_FMT<0b101, 0b001001>;
+class BNEGI_W_ENC : MSA_BIT_W_FMT<0b101, 0b001001>;
+class BNEGI_D_ENC : MSA_BIT_D_FMT<0b101, 0b001001>;
+
+class BNZ_B_ENC : MSA_CBRANCH_FMT<0b111, 0b00>;
+class BNZ_H_ENC : MSA_CBRANCH_FMT<0b111, 0b01>;
+class BNZ_W_ENC : MSA_CBRANCH_FMT<0b111, 0b10>;
+class BNZ_D_ENC : MSA_CBRANCH_FMT<0b111, 0b11>;
+
+class BNZ_V_ENC : MSA_CBRANCH_V_FMT<0b01111>;
+
+class BSEL_V_ENC : MSA_VEC_FMT<0b00110, 0b011110>;
+
+class BSELI_B_ENC : MSA_I8_FMT<0b10, 0b000001>;
+
+class BSET_B_ENC : MSA_3R_FMT<0b100, 0b00, 0b001101>;
+class BSET_H_ENC : MSA_3R_FMT<0b100, 0b01, 0b001101>;
+class BSET_W_ENC : MSA_3R_FMT<0b100, 0b10, 0b001101>;
+class BSET_D_ENC : MSA_3R_FMT<0b100, 0b11, 0b001101>;
+
+class BSETI_B_ENC : MSA_BIT_B_FMT<0b100, 0b001001>;
+class BSETI_H_ENC : MSA_BIT_H_FMT<0b100, 0b001001>;
+class BSETI_W_ENC : MSA_BIT_W_FMT<0b100, 0b001001>;
+class BSETI_D_ENC : MSA_BIT_D_FMT<0b100, 0b001001>;
+
+class BZ_B_ENC : MSA_CBRANCH_FMT<0b110, 0b00>;
+class BZ_H_ENC : MSA_CBRANCH_FMT<0b110, 0b01>;
+class BZ_W_ENC : MSA_CBRANCH_FMT<0b110, 0b10>;
+class BZ_D_ENC : MSA_CBRANCH_FMT<0b110, 0b11>;
+
+class BZ_V_ENC : MSA_CBRANCH_V_FMT<0b01011>;
+
+class CEQ_B_ENC : MSA_3R_FMT<0b000, 0b00, 0b001111>;
+class CEQ_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b001111>;
+class CEQ_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b001111>;
+class CEQ_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b001111>;
+
+class CEQI_B_ENC : MSA_I5_FMT<0b000, 0b00, 0b000111>;
+class CEQI_H_ENC : MSA_I5_FMT<0b000, 0b01, 0b000111>;
+class CEQI_W_ENC : MSA_I5_FMT<0b000, 0b10, 0b000111>;
+class CEQI_D_ENC : MSA_I5_FMT<0b000, 0b11, 0b000111>;
+
+class CFCMSA_ENC : MSA_ELM_CFCMSA_FMT<0b0001111110, 0b011001>;
+
+class CLE_S_B_ENC : MSA_3R_FMT<0b100, 0b00, 0b001111>;
+class CLE_S_H_ENC : MSA_3R_FMT<0b100, 0b01, 0b001111>;
+class CLE_S_W_ENC : MSA_3R_FMT<0b100, 0b10, 0b001111>;
+class CLE_S_D_ENC : MSA_3R_FMT<0b100, 0b11, 0b001111>;
+
+class CLE_U_B_ENC : MSA_3R_FMT<0b101, 0b00, 0b001111>;
+class CLE_U_H_ENC : MSA_3R_FMT<0b101, 0b01, 0b001111>;
+class CLE_U_W_ENC : MSA_3R_FMT<0b101, 0b10, 0b001111>;
+class CLE_U_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b001111>;
+
+class CLEI_S_B_ENC : MSA_I5_FMT<0b100, 0b00, 0b000111>;
+class CLEI_S_H_ENC : MSA_I5_FMT<0b100, 0b01, 0b000111>;
+class CLEI_S_W_ENC : MSA_I5_FMT<0b100, 0b10, 0b000111>;
+class CLEI_S_D_ENC : MSA_I5_FMT<0b100, 0b11, 0b000111>;
+
+class CLEI_U_B_ENC : MSA_I5_FMT<0b101, 0b00, 0b000111>;
+class CLEI_U_H_ENC : MSA_I5_FMT<0b101, 0b01, 0b000111>;
+class CLEI_U_W_ENC : MSA_I5_FMT<0b101, 0b10, 0b000111>;
+class CLEI_U_D_ENC : MSA_I5_FMT<0b101, 0b11, 0b000111>;
+
+class CLT_S_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b001111>;
+class CLT_S_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b001111>;
+class CLT_S_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b001111>;
+class CLT_S_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b001111>;
+
+class CLT_U_B_ENC : MSA_3R_FMT<0b011, 0b00, 0b001111>;
+class CLT_U_H_ENC : MSA_3R_FMT<0b011, 0b01, 0b001111>;
+class CLT_U_W_ENC : MSA_3R_FMT<0b011, 0b10, 0b001111>;
+class CLT_U_D_ENC : MSA_3R_FMT<0b011, 0b11, 0b001111>;
+
+class CLTI_S_B_ENC : MSA_I5_FMT<0b010, 0b00, 0b000111>;
+class CLTI_S_H_ENC : MSA_I5_FMT<0b010, 0b01, 0b000111>;
+class CLTI_S_W_ENC : MSA_I5_FMT<0b010, 0b10, 0b000111>;
+class CLTI_S_D_ENC : MSA_I5_FMT<0b010, 0b11, 0b000111>;
+
+class CLTI_U_B_ENC : MSA_I5_FMT<0b011, 0b00, 0b000111>;
+class CLTI_U_H_ENC : MSA_I5_FMT<0b011, 0b01, 0b000111>;
+class CLTI_U_W_ENC : MSA_I5_FMT<0b011, 0b10, 0b000111>;
+class CLTI_U_D_ENC : MSA_I5_FMT<0b011, 0b11, 0b000111>;
+
+class COPY_S_B_ENC : MSA_ELM_COPY_B_FMT<0b0010, 0b011001>;
+class COPY_S_H_ENC : MSA_ELM_COPY_H_FMT<0b0010, 0b011001>;
+class COPY_S_W_ENC : MSA_ELM_COPY_W_FMT<0b0010, 0b011001>;
+
+class COPY_U_B_ENC : MSA_ELM_COPY_B_FMT<0b0011, 0b011001>;
+class COPY_U_H_ENC : MSA_ELM_COPY_H_FMT<0b0011, 0b011001>;
+class COPY_U_W_ENC : MSA_ELM_COPY_W_FMT<0b0011, 0b011001>;
+
+class CTCMSA_ENC : MSA_ELM_CTCMSA_FMT<0b0000111110, 0b011001>;
+
+class DIV_S_B_ENC : MSA_3R_FMT<0b100, 0b00, 0b010010>;
+class DIV_S_H_ENC : MSA_3R_FMT<0b100, 0b01, 0b010010>;
+class DIV_S_W_ENC : MSA_3R_FMT<0b100, 0b10, 0b010010>;
+class DIV_S_D_ENC : MSA_3R_FMT<0b100, 0b11, 0b010010>;
+
+class DIV_U_B_ENC : MSA_3R_FMT<0b101, 0b00, 0b010010>;
+class DIV_U_H_ENC : MSA_3R_FMT<0b101, 0b01, 0b010010>;
+class DIV_U_W_ENC : MSA_3R_FMT<0b101, 0b10, 0b010010>;
+class DIV_U_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b010010>;
+
+class DOTP_S_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b010011>;
+class DOTP_S_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b010011>;
+class DOTP_S_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b010011>;
+
+class DOTP_U_H_ENC : MSA_3R_FMT<0b001, 0b01, 0b010011>;
+class DOTP_U_W_ENC : MSA_3R_FMT<0b001, 0b10, 0b010011>;
+class DOTP_U_D_ENC : MSA_3R_FMT<0b001, 0b11, 0b010011>;
+
+class DPADD_S_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b010011>;
+class DPADD_S_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b010011>;
+class DPADD_S_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b010011>;
+
+class DPADD_U_H_ENC : MSA_3R_FMT<0b011, 0b01, 0b010011>;
+class DPADD_U_W_ENC : MSA_3R_FMT<0b011, 0b10, 0b010011>;
+class DPADD_U_D_ENC : MSA_3R_FMT<0b011, 0b11, 0b010011>;
+
+class DPSUB_S_H_ENC : MSA_3R_FMT<0b100, 0b01, 0b010011>;
+class DPSUB_S_W_ENC : MSA_3R_FMT<0b100, 0b10, 0b010011>;
+class DPSUB_S_D_ENC : MSA_3R_FMT<0b100, 0b11, 0b010011>;
+
+class DPSUB_U_H_ENC : MSA_3R_FMT<0b101, 0b01, 0b010011>;
+class DPSUB_U_W_ENC : MSA_3R_FMT<0b101, 0b10, 0b010011>;
+class DPSUB_U_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b010011>;
+
+class FADD_W_ENC : MSA_3RF_FMT<0b0000, 0b0, 0b011011>;
+class FADD_D_ENC : MSA_3RF_FMT<0b0000, 0b1, 0b011011>;
+
+class FCAF_W_ENC : MSA_3RF_FMT<0b0000, 0b0, 0b011010>;
+class FCAF_D_ENC : MSA_3RF_FMT<0b0000, 0b1, 0b011010>;
+
+class FCEQ_W_ENC : MSA_3RF_FMT<0b0010, 0b0, 0b011010>;
+class FCEQ_D_ENC : MSA_3RF_FMT<0b0010, 0b1, 0b011010>;
+
+class FCLASS_W_ENC : MSA_2RF_FMT<0b110010000, 0b0, 0b011110>;
+class FCLASS_D_ENC : MSA_2RF_FMT<0b110010000, 0b1, 0b011110>;
+
+class FCLE_W_ENC : MSA_3RF_FMT<0b0110, 0b0, 0b011010>;
+class FCLE_D_ENC : MSA_3RF_FMT<0b0110, 0b1, 0b011010>;
+
+class FCLT_W_ENC : MSA_3RF_FMT<0b0100, 0b0, 0b011010>;
+class FCLT_D_ENC : MSA_3RF_FMT<0b0100, 0b1, 0b011010>;
+
+class FCNE_W_ENC : MSA_3RF_FMT<0b0011, 0b0, 0b011100>;
+class FCNE_D_ENC : MSA_3RF_FMT<0b0011, 0b1, 0b011100>;
+
+class FCOR_W_ENC : MSA_3RF_FMT<0b0001, 0b0, 0b011100>;
+class FCOR_D_ENC : MSA_3RF_FMT<0b0001, 0b1, 0b011100>;
+
+class FCUEQ_W_ENC : MSA_3RF_FMT<0b0011, 0b0, 0b011010>;
+class FCUEQ_D_ENC : MSA_3RF_FMT<0b0011, 0b1, 0b011010>;
+
+class FCULE_W_ENC : MSA_3RF_FMT<0b0111, 0b0, 0b011010>;
+class FCULE_D_ENC : MSA_3RF_FMT<0b0111, 0b1, 0b011010>;
+
+class FCULT_W_ENC : MSA_3RF_FMT<0b0101, 0b0, 0b011010>;
+class FCULT_D_ENC : MSA_3RF_FMT<0b0101, 0b1, 0b011010>;
+
+class FCUN_W_ENC : MSA_3RF_FMT<0b0001, 0b0, 0b011010>;
+class FCUN_D_ENC : MSA_3RF_FMT<0b0001, 0b1, 0b011010>;
+
+class FCUNE_W_ENC : MSA_3RF_FMT<0b0010, 0b0, 0b011100>;
+class FCUNE_D_ENC : MSA_3RF_FMT<0b0010, 0b1, 0b011100>;
+
+class FDIV_W_ENC : MSA_3RF_FMT<0b0011, 0b0, 0b011011>;
+class FDIV_D_ENC : MSA_3RF_FMT<0b0011, 0b1, 0b011011>;
+
+class FEXDO_H_ENC : MSA_3RF_FMT<0b1000, 0b0, 0b011011>;
+class FEXDO_W_ENC : MSA_3RF_FMT<0b1000, 0b1, 0b011011>;
+
+class FEXP2_W_ENC : MSA_3RF_FMT<0b0111, 0b0, 0b011011>;
+class FEXP2_D_ENC : MSA_3RF_FMT<0b0111, 0b1, 0b011011>;
+
+class FEXUPL_W_ENC : MSA_2RF_FMT<0b110011000, 0b0, 0b011110>;
+class FEXUPL_D_ENC : MSA_2RF_FMT<0b110011000, 0b1, 0b011110>;
+
+class FEXUPR_W_ENC : MSA_2RF_FMT<0b110011001, 0b0, 0b011110>;
+class FEXUPR_D_ENC : MSA_2RF_FMT<0b110011001, 0b1, 0b011110>;
+
+class FFINT_S_W_ENC : MSA_2RF_FMT<0b110011110, 0b0, 0b011110>;
+class FFINT_S_D_ENC : MSA_2RF_FMT<0b110011110, 0b1, 0b011110>;
+
+class FFINT_U_W_ENC : MSA_2RF_FMT<0b110011111, 0b0, 0b011110>;
+class FFINT_U_D_ENC : MSA_2RF_FMT<0b110011111, 0b1, 0b011110>;
+
+class FFQL_W_ENC : MSA_2RF_FMT<0b110011010, 0b0, 0b011110>;
+class FFQL_D_ENC : MSA_2RF_FMT<0b110011010, 0b1, 0b011110>;
+
+class FFQR_W_ENC : MSA_2RF_FMT<0b110011011, 0b0, 0b011110>;
+class FFQR_D_ENC : MSA_2RF_FMT<0b110011011, 0b1, 0b011110>;
+
+class FILL_B_ENC : MSA_2R_FILL_FMT<0b11000000, 0b00, 0b011110>;
+class FILL_H_ENC : MSA_2R_FILL_FMT<0b11000000, 0b01, 0b011110>;
+class FILL_W_ENC : MSA_2R_FILL_FMT<0b11000000, 0b10, 0b011110>;
+
+class FLOG2_W_ENC : MSA_2RF_FMT<0b110010111, 0b0, 0b011110>;
+class FLOG2_D_ENC : MSA_2RF_FMT<0b110010111, 0b1, 0b011110>;
+
+class FMADD_W_ENC : MSA_3RF_FMT<0b0100, 0b0, 0b011011>;
+class FMADD_D_ENC : MSA_3RF_FMT<0b0100, 0b1, 0b011011>;
+
+class FMAX_W_ENC : MSA_3RF_FMT<0b1110, 0b0, 0b011011>;
+class FMAX_D_ENC : MSA_3RF_FMT<0b1110, 0b1, 0b011011>;
+
+class FMAX_A_W_ENC : MSA_3RF_FMT<0b1111, 0b0, 0b011011>;
+class FMAX_A_D_ENC : MSA_3RF_FMT<0b1111, 0b1, 0b011011>;
+
+class FMIN_W_ENC : MSA_3RF_FMT<0b1100, 0b0, 0b011011>;
+class FMIN_D_ENC : MSA_3RF_FMT<0b1100, 0b1, 0b011011>;
+
+class FMIN_A_W_ENC : MSA_3RF_FMT<0b1101, 0b0, 0b011011>;
+class FMIN_A_D_ENC : MSA_3RF_FMT<0b1101, 0b1, 0b011011>;
+
+class FMSUB_W_ENC : MSA_3RF_FMT<0b0101, 0b0, 0b011011>;
+class FMSUB_D_ENC : MSA_3RF_FMT<0b0101, 0b1, 0b011011>;
+
+class FMUL_W_ENC : MSA_3RF_FMT<0b0010, 0b0, 0b011011>;
+class FMUL_D_ENC : MSA_3RF_FMT<0b0010, 0b1, 0b011011>;
+
+class FRINT_W_ENC : MSA_2RF_FMT<0b110010110, 0b0, 0b011110>;
+class FRINT_D_ENC : MSA_2RF_FMT<0b110010110, 0b1, 0b011110>;
+
+class FRCP_W_ENC : MSA_2RF_FMT<0b110010101, 0b0, 0b011110>;
+class FRCP_D_ENC : MSA_2RF_FMT<0b110010101, 0b1, 0b011110>;
+
+class FRSQRT_W_ENC : MSA_2RF_FMT<0b110010100, 0b0, 0b011110>;
+class FRSQRT_D_ENC : MSA_2RF_FMT<0b110010100, 0b1, 0b011110>;
+
+class FSAF_W_ENC : MSA_3RF_FMT<0b1000, 0b0, 0b011010>;
+class FSAF_D_ENC : MSA_3RF_FMT<0b1000, 0b1, 0b011010>;
+
+class FSEQ_W_ENC : MSA_3RF_FMT<0b1010, 0b0, 0b011010>;
+class FSEQ_D_ENC : MSA_3RF_FMT<0b1010, 0b1, 0b011010>;
+
+class FSLE_W_ENC : MSA_3RF_FMT<0b1110, 0b0, 0b011010>;
+class FSLE_D_ENC : MSA_3RF_FMT<0b1110, 0b1, 0b011010>;
+
+class FSLT_W_ENC : MSA_3RF_FMT<0b1100, 0b0, 0b011010>;
+class FSLT_D_ENC : MSA_3RF_FMT<0b1100, 0b1, 0b011010>;
+
+class FSNE_W_ENC : MSA_3RF_FMT<0b1011, 0b0, 0b011100>;
+class FSNE_D_ENC : MSA_3RF_FMT<0b1011, 0b1, 0b011100>;
+
+class FSOR_W_ENC : MSA_3RF_FMT<0b1001, 0b0, 0b011100>;
+class FSOR_D_ENC : MSA_3RF_FMT<0b1001, 0b1, 0b011100>;
+
+class FSQRT_W_ENC : MSA_2RF_FMT<0b110010011, 0b0, 0b011110>;
+class FSQRT_D_ENC : MSA_2RF_FMT<0b110010011, 0b1, 0b011110>;
+
+class FSUB_W_ENC : MSA_3RF_FMT<0b0001, 0b0, 0b011011>;
+class FSUB_D_ENC : MSA_3RF_FMT<0b0001, 0b1, 0b011011>;
+
+class FSUEQ_W_ENC : MSA_3RF_FMT<0b1011, 0b0, 0b011010>;
+class FSUEQ_D_ENC : MSA_3RF_FMT<0b1011, 0b1, 0b011010>;
+
+class FSULE_W_ENC : MSA_3RF_FMT<0b1111, 0b0, 0b011010>;
+class FSULE_D_ENC : MSA_3RF_FMT<0b1111, 0b1, 0b011010>;
+
+class FSULT_W_ENC : MSA_3RF_FMT<0b1101, 0b0, 0b011010>;
+class FSULT_D_ENC : MSA_3RF_FMT<0b1101, 0b1, 0b011010>;
+
+class FSUN_W_ENC : MSA_3RF_FMT<0b1001, 0b0, 0b011010>;
+class FSUN_D_ENC : MSA_3RF_FMT<0b1001, 0b1, 0b011010>;
+
+class FSUNE_W_ENC : MSA_3RF_FMT<0b1010, 0b0, 0b011100>;
+class FSUNE_D_ENC : MSA_3RF_FMT<0b1010, 0b1, 0b011100>;
+
+class FTINT_S_W_ENC : MSA_2RF_FMT<0b110011100, 0b0, 0b011110>;
+class FTINT_S_D_ENC : MSA_2RF_FMT<0b110011100, 0b1, 0b011110>;
+
+class FTINT_U_W_ENC : MSA_2RF_FMT<0b110011101, 0b0, 0b011110>;
+class FTINT_U_D_ENC : MSA_2RF_FMT<0b110011101, 0b1, 0b011110>;
+
+class FTQ_H_ENC : MSA_3RF_FMT<0b1010, 0b0, 0b011011>;
+class FTQ_W_ENC : MSA_3RF_FMT<0b1010, 0b1, 0b011011>;
+
+class FTRUNC_S_W_ENC : MSA_2RF_FMT<0b110010001, 0b0, 0b011110>;
+class FTRUNC_S_D_ENC : MSA_2RF_FMT<0b110010001, 0b1, 0b011110>;
+
+class FTRUNC_U_W_ENC : MSA_2RF_FMT<0b110010010, 0b0, 0b011110>;
+class FTRUNC_U_D_ENC : MSA_2RF_FMT<0b110010010, 0b1, 0b011110>;
+
+class HADD_S_H_ENC : MSA_3R_FMT<0b100, 0b01, 0b010101>;
+class HADD_S_W_ENC : MSA_3R_FMT<0b100, 0b10, 0b010101>;
+class HADD_S_D_ENC : MSA_3R_FMT<0b100, 0b11, 0b010101>;
+
+class HADD_U_H_ENC : MSA_3R_FMT<0b101, 0b01, 0b010101>;
+class HADD_U_W_ENC : MSA_3R_FMT<0b101, 0b10, 0b010101>;
+class HADD_U_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b010101>;
+
+class HSUB_S_H_ENC : MSA_3R_FMT<0b110, 0b01, 0b010101>;
+class HSUB_S_W_ENC : MSA_3R_FMT<0b110, 0b10, 0b010101>;
+class HSUB_S_D_ENC : MSA_3R_FMT<0b110, 0b11, 0b010101>;
+
+class HSUB_U_H_ENC : MSA_3R_FMT<0b111, 0b01, 0b010101>;
+class HSUB_U_W_ENC : MSA_3R_FMT<0b111, 0b10, 0b010101>;
+class HSUB_U_D_ENC : MSA_3R_FMT<0b111, 0b11, 0b010101>;
+
+class ILVEV_B_ENC : MSA_3R_FMT<0b110, 0b00, 0b010100>;
+class ILVEV_H_ENC : MSA_3R_FMT<0b110, 0b01, 0b010100>;
+class ILVEV_W_ENC : MSA_3R_FMT<0b110, 0b10, 0b010100>;
+class ILVEV_D_ENC : MSA_3R_FMT<0b110, 0b11, 0b010100>;
+
+class ILVL_B_ENC : MSA_3R_FMT<0b100, 0b00, 0b010100>;
+class ILVL_H_ENC : MSA_3R_FMT<0b100, 0b01, 0b010100>;
+class ILVL_W_ENC : MSA_3R_FMT<0b100, 0b10, 0b010100>;
+class ILVL_D_ENC : MSA_3R_FMT<0b100, 0b11, 0b010100>;
+
+class ILVOD_B_ENC : MSA_3R_FMT<0b111, 0b00, 0b010100>;
+class ILVOD_H_ENC : MSA_3R_FMT<0b111, 0b01, 0b010100>;
+class ILVOD_W_ENC : MSA_3R_FMT<0b111, 0b10, 0b010100>;
+class ILVOD_D_ENC : MSA_3R_FMT<0b111, 0b11, 0b010100>;
+
+class ILVR_B_ENC : MSA_3R_FMT<0b101, 0b00, 0b010100>;
+class ILVR_H_ENC : MSA_3R_FMT<0b101, 0b01, 0b010100>;
+class ILVR_W_ENC : MSA_3R_FMT<0b101, 0b10, 0b010100>;
+class ILVR_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b010100>;
+
+class INSERT_B_ENC : MSA_ELM_INSERT_B_FMT<0b0100, 0b011001>;
+class INSERT_H_ENC : MSA_ELM_INSERT_H_FMT<0b0100, 0b011001>;
+class INSERT_W_ENC : MSA_ELM_INSERT_W_FMT<0b0100, 0b011001>;
+
+class INSVE_B_ENC : MSA_ELM_B_FMT<0b0101, 0b011001>;
+class INSVE_H_ENC : MSA_ELM_H_FMT<0b0101, 0b011001>;
+class INSVE_W_ENC : MSA_ELM_W_FMT<0b0101, 0b011001>;
+class INSVE_D_ENC : MSA_ELM_D_FMT<0b0101, 0b011001>;
+
+class LD_B_ENC : MSA_MI10_FMT<0b00, 0b1000>;
+class LD_H_ENC : MSA_MI10_FMT<0b01, 0b1000>;
+class LD_W_ENC : MSA_MI10_FMT<0b10, 0b1000>;
+class LD_D_ENC : MSA_MI10_FMT<0b11, 0b1000>;
+
+class LDI_B_ENC : MSA_I10_FMT<0b110, 0b00, 0b000111>;
+class LDI_H_ENC : MSA_I10_FMT<0b110, 0b01, 0b000111>;
+class LDI_W_ENC : MSA_I10_FMT<0b110, 0b10, 0b000111>;
+class LDI_D_ENC : MSA_I10_FMT<0b110, 0b11, 0b000111>;
+
+class LSA_ENC : SPECIAL_LSA_FMT<0b000101>;
+
+class MADD_Q_H_ENC : MSA_3RF_FMT<0b0101, 0b0, 0b011100>;
+class MADD_Q_W_ENC : MSA_3RF_FMT<0b0101, 0b1, 0b011100>;
+
+class MADDR_Q_H_ENC : MSA_3RF_FMT<0b1101, 0b0, 0b011100>;
+class MADDR_Q_W_ENC : MSA_3RF_FMT<0b1101, 0b1, 0b011100>;
+
+class MADDV_B_ENC : MSA_3R_FMT<0b001, 0b00, 0b010010>;
+class MADDV_H_ENC : MSA_3R_FMT<0b001, 0b01, 0b010010>;
+class MADDV_W_ENC : MSA_3R_FMT<0b001, 0b10, 0b010010>;
+class MADDV_D_ENC : MSA_3R_FMT<0b001, 0b11, 0b010010>;
+
+class MAX_A_B_ENC : MSA_3R_FMT<0b110, 0b00, 0b001110>;
+class MAX_A_H_ENC : MSA_3R_FMT<0b110, 0b01, 0b001110>;
+class MAX_A_W_ENC : MSA_3R_FMT<0b110, 0b10, 0b001110>;
+class MAX_A_D_ENC : MSA_3R_FMT<0b110, 0b11, 0b001110>;
+
+class MAX_S_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b001110>;
+class MAX_S_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b001110>;
+class MAX_S_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b001110>;
+class MAX_S_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b001110>;
+
+class MAX_U_B_ENC : MSA_3R_FMT<0b011, 0b00, 0b001110>;
+class MAX_U_H_ENC : MSA_3R_FMT<0b011, 0b01, 0b001110>;
+class MAX_U_W_ENC : MSA_3R_FMT<0b011, 0b10, 0b001110>;
+class MAX_U_D_ENC : MSA_3R_FMT<0b011, 0b11, 0b001110>;
+
+class MAXI_S_B_ENC : MSA_I5_FMT<0b010, 0b00, 0b000110>;
+class MAXI_S_H_ENC : MSA_I5_FMT<0b010, 0b01, 0b000110>;
+class MAXI_S_W_ENC : MSA_I5_FMT<0b010, 0b10, 0b000110>;
+class MAXI_S_D_ENC : MSA_I5_FMT<0b010, 0b11, 0b000110>;
+
+class MAXI_U_B_ENC : MSA_I5_FMT<0b011, 0b00, 0b000110>;
+class MAXI_U_H_ENC : MSA_I5_FMT<0b011, 0b01, 0b000110>;
+class MAXI_U_W_ENC : MSA_I5_FMT<0b011, 0b10, 0b000110>;
+class MAXI_U_D_ENC : MSA_I5_FMT<0b011, 0b11, 0b000110>;
+
+class MIN_A_B_ENC : MSA_3R_FMT<0b111, 0b00, 0b001110>;
+class MIN_A_H_ENC : MSA_3R_FMT<0b111, 0b01, 0b001110>;
+class MIN_A_W_ENC : MSA_3R_FMT<0b111, 0b10, 0b001110>;
+class MIN_A_D_ENC : MSA_3R_FMT<0b111, 0b11, 0b001110>;
+
+class MIN_S_B_ENC : MSA_3R_FMT<0b100, 0b00, 0b001110>;
+class MIN_S_H_ENC : MSA_3R_FMT<0b100, 0b01, 0b001110>;
+class MIN_S_W_ENC : MSA_3R_FMT<0b100, 0b10, 0b001110>;
+class MIN_S_D_ENC : MSA_3R_FMT<0b100, 0b11, 0b001110>;
+
+class MIN_U_B_ENC : MSA_3R_FMT<0b101, 0b00, 0b001110>;
+class MIN_U_H_ENC : MSA_3R_FMT<0b101, 0b01, 0b001110>;
+class MIN_U_W_ENC : MSA_3R_FMT<0b101, 0b10, 0b001110>;
+class MIN_U_D_ENC : MSA_3R_FMT<0b101, 0b11, 0b001110>;
+
+class MINI_S_B_ENC : MSA_I5_FMT<0b100, 0b00, 0b000110>;
+class MINI_S_H_ENC : MSA_I5_FMT<0b100, 0b01, 0b000110>;
+class MINI_S_W_ENC : MSA_I5_FMT<0b100, 0b10, 0b000110>;
+class MINI_S_D_ENC : MSA_I5_FMT<0b100, 0b11, 0b000110>;
+
+class MINI_U_B_ENC : MSA_I5_FMT<0b101, 0b00, 0b000110>;
+class MINI_U_H_ENC : MSA_I5_FMT<0b101, 0b01, 0b000110>;
+class MINI_U_W_ENC : MSA_I5_FMT<0b101, 0b10, 0b000110>;
+class MINI_U_D_ENC : MSA_I5_FMT<0b101, 0b11, 0b000110>;
+
+class MOD_S_B_ENC : MSA_3R_FMT<0b110, 0b00, 0b010010>;
+class MOD_S_H_ENC : MSA_3R_FMT<0b110, 0b01, 0b010010>;
+class MOD_S_W_ENC : MSA_3R_FMT<0b110, 0b10, 0b010010>;
+class MOD_S_D_ENC : MSA_3R_FMT<0b110, 0b11, 0b010010>;
+
+class MOD_U_B_ENC : MSA_3R_FMT<0b111, 0b00, 0b010010>;
+class MOD_U_H_ENC : MSA_3R_FMT<0b111, 0b01, 0b010010>;
+class MOD_U_W_ENC : MSA_3R_FMT<0b111, 0b10, 0b010010>;
+class MOD_U_D_ENC : MSA_3R_FMT<0b111, 0b11, 0b010010>;
+
+class MOVE_V_ENC : MSA_ELM_FMT<0b0010111110, 0b011001>;
+
+class MSUB_Q_H_ENC : MSA_3RF_FMT<0b0110, 0b0, 0b011100>;
+class MSUB_Q_W_ENC : MSA_3RF_FMT<0b0110, 0b1, 0b011100>;
+
+class MSUBR_Q_H_ENC : MSA_3RF_FMT<0b1110, 0b0, 0b011100>;
+class MSUBR_Q_W_ENC : MSA_3RF_FMT<0b1110, 0b1, 0b011100>;
+
+class MSUBV_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b010010>;
+class MSUBV_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b010010>;
+class MSUBV_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b010010>;
+class MSUBV_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b010010>;
+
+class MUL_Q_H_ENC : MSA_3RF_FMT<0b0100, 0b0, 0b011100>;
+class MUL_Q_W_ENC : MSA_3RF_FMT<0b0100, 0b1, 0b011100>;
+
+class MULR_Q_H_ENC : MSA_3RF_FMT<0b1100, 0b0, 0b011100>;
+class MULR_Q_W_ENC : MSA_3RF_FMT<0b1100, 0b1, 0b011100>;
+
+class MULV_B_ENC : MSA_3R_FMT<0b000, 0b00, 0b010010>;
+class MULV_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b010010>;
+class MULV_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b010010>;
+class MULV_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b010010>;
+
+class NLOC_B_ENC : MSA_2R_FMT<0b11000010, 0b00, 0b011110>;
+class NLOC_H_ENC : MSA_2R_FMT<0b11000010, 0b01, 0b011110>;
+class NLOC_W_ENC : MSA_2R_FMT<0b11000010, 0b10, 0b011110>;
+class NLOC_D_ENC : MSA_2R_FMT<0b11000010, 0b11, 0b011110>;
+
+class NLZC_B_ENC : MSA_2R_FMT<0b11000011, 0b00, 0b011110>;
+class NLZC_H_ENC : MSA_2R_FMT<0b11000011, 0b01, 0b011110>;
+class NLZC_W_ENC : MSA_2R_FMT<0b11000011, 0b10, 0b011110>;
+class NLZC_D_ENC : MSA_2R_FMT<0b11000011, 0b11, 0b011110>;
+
+class NOR_V_ENC : MSA_VEC_FMT<0b00010, 0b011110>;
+
+class NORI_B_ENC : MSA_I8_FMT<0b10, 0b000000>;
+
+class OR_V_ENC : MSA_VEC_FMT<0b00001, 0b011110>;
+
+class ORI_B_ENC : MSA_I8_FMT<0b01, 0b000000>;
+
+class PCKEV_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b010100>;
+class PCKEV_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b010100>;
+class PCKEV_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b010100>;
+class PCKEV_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b010100>;
+
+class PCKOD_B_ENC : MSA_3R_FMT<0b011, 0b00, 0b010100>;
+class PCKOD_H_ENC : MSA_3R_FMT<0b011, 0b01, 0b010100>;
+class PCKOD_W_ENC : MSA_3R_FMT<0b011, 0b10, 0b010100>;
+class PCKOD_D_ENC : MSA_3R_FMT<0b011, 0b11, 0b010100>;
+
+class PCNT_B_ENC : MSA_2R_FMT<0b11000001, 0b00, 0b011110>;
+class PCNT_H_ENC : MSA_2R_FMT<0b11000001, 0b01, 0b011110>;
+class PCNT_W_ENC : MSA_2R_FMT<0b11000001, 0b10, 0b011110>;
+class PCNT_D_ENC : MSA_2R_FMT<0b11000001, 0b11, 0b011110>;
+
+class SAT_S_B_ENC : MSA_BIT_B_FMT<0b000, 0b001010>;
+class SAT_S_H_ENC : MSA_BIT_H_FMT<0b000, 0b001010>;
+class SAT_S_W_ENC : MSA_BIT_W_FMT<0b000, 0b001010>;
+class SAT_S_D_ENC : MSA_BIT_D_FMT<0b000, 0b001010>;
+
+class SAT_U_B_ENC : MSA_BIT_B_FMT<0b001, 0b001010>;
+class SAT_U_H_ENC : MSA_BIT_H_FMT<0b001, 0b001010>;
+class SAT_U_W_ENC : MSA_BIT_W_FMT<0b001, 0b001010>;
+class SAT_U_D_ENC : MSA_BIT_D_FMT<0b001, 0b001010>;
+
+class SHF_B_ENC : MSA_I8_FMT<0b00, 0b000010>;
+class SHF_H_ENC : MSA_I8_FMT<0b01, 0b000010>;
+class SHF_W_ENC : MSA_I8_FMT<0b10, 0b000010>;
+
+class SLD_B_ENC : MSA_3R_INDEX_FMT<0b000, 0b00, 0b010100>;
+class SLD_H_ENC : MSA_3R_INDEX_FMT<0b000, 0b01, 0b010100>;
+class SLD_W_ENC : MSA_3R_INDEX_FMT<0b000, 0b10, 0b010100>;
+class SLD_D_ENC : MSA_3R_INDEX_FMT<0b000, 0b11, 0b010100>;
+
+class SLDI_B_ENC : MSA_ELM_B_FMT<0b0000, 0b011001>;
+class SLDI_H_ENC : MSA_ELM_H_FMT<0b0000, 0b011001>;
+class SLDI_W_ENC : MSA_ELM_W_FMT<0b0000, 0b011001>;
+class SLDI_D_ENC : MSA_ELM_D_FMT<0b0000, 0b011001>;
+
+class SLL_B_ENC : MSA_3R_FMT<0b000, 0b00, 0b001101>;
+class SLL_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b001101>;
+class SLL_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b001101>;
+class SLL_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b001101>;
+
+class SLLI_B_ENC : MSA_BIT_B_FMT<0b000, 0b001001>;
+class SLLI_H_ENC : MSA_BIT_H_FMT<0b000, 0b001001>;
+class SLLI_W_ENC : MSA_BIT_W_FMT<0b000, 0b001001>;
+class SLLI_D_ENC : MSA_BIT_D_FMT<0b000, 0b001001>;
+
+class SPLAT_B_ENC : MSA_3R_INDEX_FMT<0b001, 0b00, 0b010100>;
+class SPLAT_H_ENC : MSA_3R_INDEX_FMT<0b001, 0b01, 0b010100>;
+class SPLAT_W_ENC : MSA_3R_INDEX_FMT<0b001, 0b10, 0b010100>;
+class SPLAT_D_ENC : MSA_3R_INDEX_FMT<0b001, 0b11, 0b010100>;
+
+class SPLATI_B_ENC : MSA_ELM_B_FMT<0b0001, 0b011001>;
+class SPLATI_H_ENC : MSA_ELM_H_FMT<0b0001, 0b011001>;
+class SPLATI_W_ENC : MSA_ELM_W_FMT<0b0001, 0b011001>;
+class SPLATI_D_ENC : MSA_ELM_D_FMT<0b0001, 0b011001>;
+
+class SRA_B_ENC : MSA_3R_FMT<0b001, 0b00, 0b001101>;
+class SRA_H_ENC : MSA_3R_FMT<0b001, 0b01, 0b001101>;
+class SRA_W_ENC : MSA_3R_FMT<0b001, 0b10, 0b001101>;
+class SRA_D_ENC : MSA_3R_FMT<0b001, 0b11, 0b001101>;
+
+class SRAI_B_ENC : MSA_BIT_B_FMT<0b001, 0b001001>;
+class SRAI_H_ENC : MSA_BIT_H_FMT<0b001, 0b001001>;
+class SRAI_W_ENC : MSA_BIT_W_FMT<0b001, 0b001001>;
+class SRAI_D_ENC : MSA_BIT_D_FMT<0b001, 0b001001>;
+
+class SRAR_B_ENC : MSA_3R_FMT<0b001, 0b00, 0b010101>;
+class SRAR_H_ENC : MSA_3R_FMT<0b001, 0b01, 0b010101>;
+class SRAR_W_ENC : MSA_3R_FMT<0b001, 0b10, 0b010101>;
+class SRAR_D_ENC : MSA_3R_FMT<0b001, 0b11, 0b010101>;
+
+class SRARI_B_ENC : MSA_BIT_B_FMT<0b010, 0b001010>;
+class SRARI_H_ENC : MSA_BIT_H_FMT<0b010, 0b001010>;
+class SRARI_W_ENC : MSA_BIT_W_FMT<0b010, 0b001010>;
+class SRARI_D_ENC : MSA_BIT_D_FMT<0b010, 0b001010>;
+
+class SRL_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b001101>;
+class SRL_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b001101>;
+class SRL_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b001101>;
+class SRL_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b001101>;
+
+class SRLI_B_ENC : MSA_BIT_B_FMT<0b010, 0b001001>;
+class SRLI_H_ENC : MSA_BIT_H_FMT<0b010, 0b001001>;
+class SRLI_W_ENC : MSA_BIT_W_FMT<0b010, 0b001001>;
+class SRLI_D_ENC : MSA_BIT_D_FMT<0b010, 0b001001>;
+
+class SRLR_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b010101>;
+class SRLR_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b010101>;
+class SRLR_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b010101>;
+class SRLR_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b010101>;
+
+class SRLRI_B_ENC : MSA_BIT_B_FMT<0b011, 0b001010>;
+class SRLRI_H_ENC : MSA_BIT_H_FMT<0b011, 0b001010>;
+class SRLRI_W_ENC : MSA_BIT_W_FMT<0b011, 0b001010>;
+class SRLRI_D_ENC : MSA_BIT_D_FMT<0b011, 0b001010>;
+
+class ST_B_ENC : MSA_MI10_FMT<0b00, 0b1001>;
+class ST_H_ENC : MSA_MI10_FMT<0b01, 0b1001>;
+class ST_W_ENC : MSA_MI10_FMT<0b10, 0b1001>;
+class ST_D_ENC : MSA_MI10_FMT<0b11, 0b1001>;
+
+class SUBS_S_B_ENC : MSA_3R_FMT<0b000, 0b00, 0b010001>;
+class SUBS_S_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b010001>;
+class SUBS_S_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b010001>;
+class SUBS_S_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b010001>;
+
+class SUBS_U_B_ENC : MSA_3R_FMT<0b001, 0b00, 0b010001>;
+class SUBS_U_H_ENC : MSA_3R_FMT<0b001, 0b01, 0b010001>;
+class SUBS_U_W_ENC : MSA_3R_FMT<0b001, 0b10, 0b010001>;
+class SUBS_U_D_ENC : MSA_3R_FMT<0b001, 0b11, 0b010001>;
+
+class SUBSUS_U_B_ENC : MSA_3R_FMT<0b010, 0b00, 0b010001>;
+class SUBSUS_U_H_ENC : MSA_3R_FMT<0b010, 0b01, 0b010001>;
+class SUBSUS_U_W_ENC : MSA_3R_FMT<0b010, 0b10, 0b010001>;
+class SUBSUS_U_D_ENC : MSA_3R_FMT<0b010, 0b11, 0b010001>;
+
+class SUBSUU_S_B_ENC : MSA_3R_FMT<0b011, 0b00, 0b010001>;
+class SUBSUU_S_H_ENC : MSA_3R_FMT<0b011, 0b01, 0b010001>;
+class SUBSUU_S_W_ENC : MSA_3R_FMT<0b011, 0b10, 0b010001>;
+class SUBSUU_S_D_ENC : MSA_3R_FMT<0b011, 0b11, 0b010001>;
+
+class SUBV_B_ENC : MSA_3R_FMT<0b001, 0b00, 0b001110>;
+class SUBV_H_ENC : MSA_3R_FMT<0b001, 0b01, 0b001110>;
+class SUBV_W_ENC : MSA_3R_FMT<0b001, 0b10, 0b001110>;
+class SUBV_D_ENC : MSA_3R_FMT<0b001, 0b11, 0b001110>;
+
+class SUBVI_B_ENC : MSA_I5_FMT<0b001, 0b00, 0b000110>;
+class SUBVI_H_ENC : MSA_I5_FMT<0b001, 0b01, 0b000110>;
+class SUBVI_W_ENC : MSA_I5_FMT<0b001, 0b10, 0b000110>;
+class SUBVI_D_ENC : MSA_I5_FMT<0b001, 0b11, 0b000110>;
+
+class VSHF_B_ENC : MSA_3R_FMT<0b000, 0b00, 0b010101>;
+class VSHF_H_ENC : MSA_3R_FMT<0b000, 0b01, 0b010101>;
+class VSHF_W_ENC : MSA_3R_FMT<0b000, 0b10, 0b010101>;
+class VSHF_D_ENC : MSA_3R_FMT<0b000, 0b11, 0b010101>;
+
+class XOR_V_ENC : MSA_VEC_FMT<0b00011, 0b011110>;
+
+class XORI_B_ENC : MSA_I8_FMT<0b11, 0b000000>;
+
+// Instruction desc.
+class MSA_BIT_B_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ ComplexPattern Imm, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, vsplat_uimm3:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, Imm:$m))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_BIT_H_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ ComplexPattern Imm, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, vsplat_uimm4:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, Imm:$m))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_BIT_W_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ ComplexPattern Imm, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, vsplat_uimm5:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, Imm:$m))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_BIT_D_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ ComplexPattern Imm, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, vsplat_uimm6:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, Imm:$m))];
+ InstrItinClass Itinerary = itin;
+}
+
+// This class is deprecated and will be removed soon.
+class MSA_BIT_B_X_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, uimm3:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, immZExt3:$m))];
+ InstrItinClass Itinerary = itin;
+}
+
+// This class is deprecated and will be removed soon.
+class MSA_BIT_H_X_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, uimm4:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, immZExt4:$m))];
+ InstrItinClass Itinerary = itin;
+}
+
+// This class is deprecated and will be removed soon.
+class MSA_BIT_W_X_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, uimm5:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, immZExt5:$m))];
+ InstrItinClass Itinerary = itin;
+}
+
+// This class is deprecated and will be removed soon.
+class MSA_BIT_D_X_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, uimm6:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, immZExt6:$m))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_BIT_BINSXI_DESC_BASE<string instr_asm, ValueType Ty,
+ ComplexPattern Mask, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWD:$wd_in, ROWS:$ws, vsplat_uimm8:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (vselect (Ty Mask:$m), (Ty ROWD:$wd_in),
+ ROWS:$ws))];
+ InstrItinClass Itinerary = itin;
+ string Constraints = "$wd = $wd_in";
+}
+
+class MSA_BIT_BINSLI_DESC_BASE<string instr_asm, ValueType Ty,
+ RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> :
+ MSA_BIT_BINSXI_DESC_BASE<instr_asm, Ty, vsplat_maskl_bits, ROWD, ROWS, itin>;
+
+class MSA_BIT_BINSRI_DESC_BASE<string instr_asm, ValueType Ty,
+ RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> :
+ MSA_BIT_BINSXI_DESC_BASE<instr_asm, Ty, vsplat_maskr_bits, ROWD, ROWS, itin>;
+
+class MSA_BIT_SPLAT_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ SplatComplexPattern SplatImm,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, SplatImm.OpClass:$m);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $m");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, SplatImm:$m))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_COPY_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ ValueType VecTy, RegisterOperand ROD,
+ RegisterOperand ROWS,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROD:$rd);
+ dag InOperandList = (ins ROWS:$ws, uimm4:$n);
+ string AsmString = !strconcat(instr_asm, "\t$rd, $ws[$n]");
+ list<dag> Pattern = [(set ROD:$rd, (OpNode (VecTy ROWS:$ws), immZExt4:$n))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_ELM_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, uimm4:$n);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws[$n]");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, immZExt4:$n))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_COPY_PSEUDO_BASE<SDPatternOperator OpNode, ValueType VecTy,
+ RegisterClass RCD, RegisterClass RCWS> :
+ MipsPseudo<(outs RCD:$wd), (ins RCWS:$ws, uimm4:$n),
+ [(set RCD:$wd, (OpNode (VecTy RCWS:$ws), immZExt4:$n))]> {
+ bit usesCustomInserter = 1;
+}
+
+class MSA_I5_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ SplatComplexPattern SplatImm, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, SplatImm.OpClass:$imm);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $imm");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, SplatImm:$imm))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_I8_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ SplatComplexPattern SplatImm, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, SplatImm.OpClass:$u8);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $u8");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, SplatImm:$u8))];
+ InstrItinClass Itinerary = itin;
+}
+
+// This class is deprecated and will be removed in the next few patches
+class MSA_I8_X_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, uimm8:$u8);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $u8");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, immZExt8:$u8))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_I8_SHF_DESC_BASE<string instr_asm, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, uimm8:$u8);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $u8");
+ list<dag> Pattern = [(set ROWD:$wd, (MipsSHF immZExt8:$u8, ROWS:$ws))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_I10_LDI_DESC_BASE<string instr_asm, RegisterOperand ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins vsplat_simm10:$s10);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $s10");
+ // LDI is matched using custom matching code in MipsSEISelDAGToDAG.cpp
+ list<dag> Pattern = [];
+ bit hasSideEffects = 0;
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_2R_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_2R_FILL_DESC_BASE<string instr_asm, ValueType VT,
+ SDPatternOperator OpNode, RegisterOperand ROWD,
+ RegisterOperand ROS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROS:$rs);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $rs");
+ list<dag> Pattern = [(set ROWD:$wd, (VT (OpNode ROS:$rs)))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_2R_FILL_PSEUDO_BASE<ValueType VT, SDPatternOperator OpNode,
+ RegisterClass RCWD, RegisterClass RCWS = RCWD> :
+ MipsPseudo<(outs RCWD:$wd), (ins RCWS:$fs),
+ [(set RCWD:$wd, (OpNode RCWS:$fs))]> {
+ let usesCustomInserter = 1;
+}
+
+class MSA_2RF_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_3R_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ RegisterOperand ROWT = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, ROWT:$wt);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $wt");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, ROWT:$wt))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_3R_BINSX_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ RegisterOperand ROWT = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWD:$wd_in, ROWS:$ws, ROWT:$wt);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $wt");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWD:$wd_in, ROWS:$ws,
+ ROWT:$wt))];
+ string Constraints = "$wd = $wd_in";
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_3R_SPLAT_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, GPR32:$rt);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws[$rt]");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, GPR32:$rt))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_3R_VSHF_DESC_BASE<string instr_asm, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ RegisterOperand ROWT = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWD:$wd_in, ROWS:$ws, ROWT:$wt);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $wt");
+ list<dag> Pattern = [(set ROWD:$wd, (MipsVSHF ROWD:$wd_in, ROWS:$ws,
+ ROWT:$wt))];
+ string Constraints = "$wd = $wd_in";
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_3R_SLD_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, GPR32:$rt);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws[$rt]");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, GPR32:$rt))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_3R_4R_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ RegisterOperand ROWT = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWD:$wd_in, ROWS:$ws, ROWT:$wt);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $wt");
+ list<dag> Pattern = [(set ROWD:$wd,
+ (OpNode ROWD:$wd_in, ROWS:$ws, ROWT:$wt))];
+ InstrItinClass Itinerary = itin;
+ string Constraints = "$wd = $wd_in";
+}
+
+class MSA_3RF_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ RegisterOperand ROWT = ROWD,
+ InstrItinClass itin = NoItinerary> :
+ MSA_3R_DESC_BASE<instr_asm, OpNode, ROWD, ROWS, ROWT, itin>;
+
+class MSA_3RF_4RF_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ RegisterOperand ROWT = ROWD,
+ InstrItinClass itin = NoItinerary> :
+ MSA_3R_4R_DESC_BASE<instr_asm, OpNode, ROWD, ROWS, ROWT, itin>;
+
+class MSA_CBRANCH_DESC_BASE<string instr_asm, RegisterOperand ROWD> {
+ dag OutOperandList = (outs);
+ dag InOperandList = (ins ROWD:$wt, brtarget:$offset);
+ string AsmString = !strconcat(instr_asm, "\t$wt, $offset");
+ list<dag> Pattern = [];
+ InstrItinClass Itinerary = IIBranch;
+ bit isBranch = 1;
+ bit isTerminator = 1;
+ bit hasDelaySlot = 1;
+ list<Register> Defs = [AT];
+}
+
+class MSA_INSERT_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROS,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWD:$wd_in, ROS:$rs, uimm6:$n);
+ string AsmString = !strconcat(instr_asm, "\t$wd[$n], $rs");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWD:$wd_in,
+ ROS:$rs,
+ immZExt6:$n))];
+ InstrItinClass Itinerary = itin;
+ string Constraints = "$wd = $wd_in";
+}
+
+class MSA_INSERT_PSEUDO_BASE<SDPatternOperator OpNode, ValueType Ty,
+ RegisterOperand ROWD, RegisterOperand ROFS> :
+ MipsPseudo<(outs ROWD:$wd), (ins ROWD:$wd_in, uimm6:$n, ROFS:$fs),
+ [(set ROWD:$wd, (OpNode (Ty ROWD:$wd_in), ROFS:$fs,
+ immZExt6:$n))]> {
+ bit usesCustomInserter = 1;
+ string Constraints = "$wd = $wd_in";
+}
+
+class MSA_INSVE_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWD:$wd_in, uimm6:$n, ROWS:$ws);
+ string AsmString = !strconcat(instr_asm, "\t$wd[$n], $ws[0]");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWD:$wd_in,
+ immZExt6:$n,
+ ROWS:$ws))];
+ InstrItinClass Itinerary = itin;
+ string Constraints = "$wd = $wd_in";
+}
+
+class MSA_VEC_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ RegisterOperand ROWD, RegisterOperand ROWS = ROWD,
+ RegisterOperand ROWT = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, ROWT:$wt);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws, $wt");
+ list<dag> Pattern = [(set ROWD:$wd, (OpNode ROWS:$ws, ROWT:$wt))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_ELM_SPLAT_DESC_BASE<string instr_asm, SplatComplexPattern SplatImm,
+ RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins ROWS:$ws, SplatImm.OpClass:$n);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $ws[$n]");
+ list<dag> Pattern = [(set ROWD:$wd, (MipsVSHF SplatImm:$n, ROWS:$ws,
+ ROWS:$ws))];
+ InstrItinClass Itinerary = itin;
+}
+
+class MSA_VEC_PSEUDO_BASE<SDPatternOperator OpNode, RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ RegisterOperand ROWT = ROWD> :
+ MipsPseudo<(outs ROWD:$wd), (ins ROWS:$ws, ROWT:$wt),
+ [(set ROWD:$wd, (OpNode ROWS:$ws, ROWT:$wt))]>;
+
+class ADD_A_B_DESC : MSA_3R_DESC_BASE<"add_a.b", int_mips_add_a_b, MSA128BOpnd>,
+ IsCommutable;
+class ADD_A_H_DESC : MSA_3R_DESC_BASE<"add_a.h", int_mips_add_a_h, MSA128HOpnd>,
+ IsCommutable;
+class ADD_A_W_DESC : MSA_3R_DESC_BASE<"add_a.w", int_mips_add_a_w, MSA128WOpnd>,
+ IsCommutable;
+class ADD_A_D_DESC : MSA_3R_DESC_BASE<"add_a.d", int_mips_add_a_d, MSA128DOpnd>,
+ IsCommutable;
+
+class ADDS_A_B_DESC : MSA_3R_DESC_BASE<"adds_a.b", int_mips_adds_a_b,
+ MSA128BOpnd>, IsCommutable;
+class ADDS_A_H_DESC : MSA_3R_DESC_BASE<"adds_a.h", int_mips_adds_a_h,
+ MSA128HOpnd>, IsCommutable;
+class ADDS_A_W_DESC : MSA_3R_DESC_BASE<"adds_a.w", int_mips_adds_a_w,
+ MSA128WOpnd>, IsCommutable;
+class ADDS_A_D_DESC : MSA_3R_DESC_BASE<"adds_a.d", int_mips_adds_a_d,
+ MSA128DOpnd>, IsCommutable;
+
+class ADDS_S_B_DESC : MSA_3R_DESC_BASE<"adds_s.b", int_mips_adds_s_b,
+ MSA128BOpnd>, IsCommutable;
+class ADDS_S_H_DESC : MSA_3R_DESC_BASE<"adds_s.h", int_mips_adds_s_h,
+ MSA128HOpnd>, IsCommutable;
+class ADDS_S_W_DESC : MSA_3R_DESC_BASE<"adds_s.w", int_mips_adds_s_w,
+ MSA128WOpnd>, IsCommutable;
+class ADDS_S_D_DESC : MSA_3R_DESC_BASE<"adds_s.d", int_mips_adds_s_d,
+ MSA128DOpnd>, IsCommutable;
+
+class ADDS_U_B_DESC : MSA_3R_DESC_BASE<"adds_u.b", int_mips_adds_u_b,
+ MSA128BOpnd>, IsCommutable;
+class ADDS_U_H_DESC : MSA_3R_DESC_BASE<"adds_u.h", int_mips_adds_u_h,
+ MSA128HOpnd>, IsCommutable;
+class ADDS_U_W_DESC : MSA_3R_DESC_BASE<"adds_u.w", int_mips_adds_u_w,
+ MSA128WOpnd>, IsCommutable;
+class ADDS_U_D_DESC : MSA_3R_DESC_BASE<"adds_u.d", int_mips_adds_u_d,
+ MSA128DOpnd>, IsCommutable;
+
+class ADDV_B_DESC : MSA_3R_DESC_BASE<"addv.b", add, MSA128BOpnd>, IsCommutable;
+class ADDV_H_DESC : MSA_3R_DESC_BASE<"addv.h", add, MSA128HOpnd>, IsCommutable;
+class ADDV_W_DESC : MSA_3R_DESC_BASE<"addv.w", add, MSA128WOpnd>, IsCommutable;
+class ADDV_D_DESC : MSA_3R_DESC_BASE<"addv.d", add, MSA128DOpnd>, IsCommutable;
+
+class ADDVI_B_DESC : MSA_I5_DESC_BASE<"addvi.b", add, vsplati8_uimm5,
+ MSA128BOpnd>;
+class ADDVI_H_DESC : MSA_I5_DESC_BASE<"addvi.h", add, vsplati16_uimm5,
+ MSA128HOpnd>;
+class ADDVI_W_DESC : MSA_I5_DESC_BASE<"addvi.w", add, vsplati32_uimm5,
+ MSA128WOpnd>;
+class ADDVI_D_DESC : MSA_I5_DESC_BASE<"addvi.d", add, vsplati64_uimm5,
+ MSA128DOpnd>;
+
+class AND_V_DESC : MSA_VEC_DESC_BASE<"and.v", and, MSA128BOpnd>;
+class AND_V_H_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<and, MSA128HOpnd>;
+class AND_V_W_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<and, MSA128WOpnd>;
+class AND_V_D_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<and, MSA128DOpnd>;
+
+class ANDI_B_DESC : MSA_I8_DESC_BASE<"andi.b", and, vsplati8_uimm8,
+ MSA128BOpnd>;
+
+class ASUB_S_B_DESC : MSA_3R_DESC_BASE<"asub_s.b", int_mips_asub_s_b,
+ MSA128BOpnd>;
+class ASUB_S_H_DESC : MSA_3R_DESC_BASE<"asub_s.h", int_mips_asub_s_h,
+ MSA128HOpnd>;
+class ASUB_S_W_DESC : MSA_3R_DESC_BASE<"asub_s.w", int_mips_asub_s_w,
+ MSA128WOpnd>;
+class ASUB_S_D_DESC : MSA_3R_DESC_BASE<"asub_s.d", int_mips_asub_s_d,
+ MSA128DOpnd>;
+
+class ASUB_U_B_DESC : MSA_3R_DESC_BASE<"asub_u.b", int_mips_asub_u_b,
+ MSA128BOpnd>;
+class ASUB_U_H_DESC : MSA_3R_DESC_BASE<"asub_u.h", int_mips_asub_u_h,
+ MSA128HOpnd>;
+class ASUB_U_W_DESC : MSA_3R_DESC_BASE<"asub_u.w", int_mips_asub_u_w,
+ MSA128WOpnd>;
+class ASUB_U_D_DESC : MSA_3R_DESC_BASE<"asub_u.d", int_mips_asub_u_d,
+ MSA128DOpnd>;
+
+class AVE_S_B_DESC : MSA_3R_DESC_BASE<"ave_s.b", int_mips_ave_s_b, MSA128BOpnd>,
+ IsCommutable;
+class AVE_S_H_DESC : MSA_3R_DESC_BASE<"ave_s.h", int_mips_ave_s_h, MSA128HOpnd>,
+ IsCommutable;
+class AVE_S_W_DESC : MSA_3R_DESC_BASE<"ave_s.w", int_mips_ave_s_w, MSA128WOpnd>,
+ IsCommutable;
+class AVE_S_D_DESC : MSA_3R_DESC_BASE<"ave_s.d", int_mips_ave_s_d, MSA128DOpnd>,
+ IsCommutable;
+
+class AVE_U_B_DESC : MSA_3R_DESC_BASE<"ave_u.b", int_mips_ave_u_b, MSA128BOpnd>,
+ IsCommutable;
+class AVE_U_H_DESC : MSA_3R_DESC_BASE<"ave_u.h", int_mips_ave_u_h, MSA128HOpnd>,
+ IsCommutable;
+class AVE_U_W_DESC : MSA_3R_DESC_BASE<"ave_u.w", int_mips_ave_u_w, MSA128WOpnd>,
+ IsCommutable;
+class AVE_U_D_DESC : MSA_3R_DESC_BASE<"ave_u.d", int_mips_ave_u_d, MSA128DOpnd>,
+ IsCommutable;
+
+class AVER_S_B_DESC : MSA_3R_DESC_BASE<"aver_s.b", int_mips_aver_s_b,
+ MSA128BOpnd>, IsCommutable;
+class AVER_S_H_DESC : MSA_3R_DESC_BASE<"aver_s.h", int_mips_aver_s_h,
+ MSA128HOpnd>, IsCommutable;
+class AVER_S_W_DESC : MSA_3R_DESC_BASE<"aver_s.w", int_mips_aver_s_w,
+ MSA128WOpnd>, IsCommutable;
+class AVER_S_D_DESC : MSA_3R_DESC_BASE<"aver_s.d", int_mips_aver_s_d,
+ MSA128DOpnd>, IsCommutable;
+
+class AVER_U_B_DESC : MSA_3R_DESC_BASE<"aver_u.b", int_mips_aver_u_b,
+ MSA128BOpnd>, IsCommutable;
+class AVER_U_H_DESC : MSA_3R_DESC_BASE<"aver_u.h", int_mips_aver_u_h,
+ MSA128HOpnd>, IsCommutable;
+class AVER_U_W_DESC : MSA_3R_DESC_BASE<"aver_u.w", int_mips_aver_u_w,
+ MSA128WOpnd>, IsCommutable;
+class AVER_U_D_DESC : MSA_3R_DESC_BASE<"aver_u.d", int_mips_aver_u_d,
+ MSA128DOpnd>, IsCommutable;
+
+class BCLR_B_DESC : MSA_3R_DESC_BASE<"bclr.b", vbclr_b, MSA128BOpnd>;
+class BCLR_H_DESC : MSA_3R_DESC_BASE<"bclr.h", vbclr_h, MSA128HOpnd>;
+class BCLR_W_DESC : MSA_3R_DESC_BASE<"bclr.w", vbclr_w, MSA128WOpnd>;
+class BCLR_D_DESC : MSA_3R_DESC_BASE<"bclr.d", vbclr_d, MSA128DOpnd>;
+
+class BCLRI_B_DESC : MSA_BIT_B_DESC_BASE<"bclri.b", and, vsplat_uimm_inv_pow2,
+ MSA128BOpnd>;
+class BCLRI_H_DESC : MSA_BIT_H_DESC_BASE<"bclri.h", and, vsplat_uimm_inv_pow2,
+ MSA128HOpnd>;
+class BCLRI_W_DESC : MSA_BIT_W_DESC_BASE<"bclri.w", and, vsplat_uimm_inv_pow2,
+ MSA128WOpnd>;
+class BCLRI_D_DESC : MSA_BIT_D_DESC_BASE<"bclri.d", and, vsplat_uimm_inv_pow2,
+ MSA128DOpnd>;
+
+class BINSL_B_DESC : MSA_3R_BINSX_DESC_BASE<"binsl.b", int_mips_binsl_b,
+ MSA128BOpnd>;
+class BINSL_H_DESC : MSA_3R_BINSX_DESC_BASE<"binsl.h", int_mips_binsl_h,
+ MSA128HOpnd>;
+class BINSL_W_DESC : MSA_3R_BINSX_DESC_BASE<"binsl.w", int_mips_binsl_w,
+ MSA128WOpnd>;
+class BINSL_D_DESC : MSA_3R_BINSX_DESC_BASE<"binsl.d", int_mips_binsl_d,
+ MSA128DOpnd>;
+
+class BINSLI_B_DESC : MSA_BIT_BINSLI_DESC_BASE<"binsli.b", v16i8, MSA128BOpnd>;
+class BINSLI_H_DESC : MSA_BIT_BINSLI_DESC_BASE<"binsli.h", v8i16, MSA128HOpnd>;
+class BINSLI_W_DESC : MSA_BIT_BINSLI_DESC_BASE<"binsli.w", v4i32, MSA128WOpnd>;
+class BINSLI_D_DESC : MSA_BIT_BINSLI_DESC_BASE<"binsli.d", v2i64, MSA128DOpnd>;
+
+class BINSR_B_DESC : MSA_3R_BINSX_DESC_BASE<"binsr.b", int_mips_binsr_b,
+ MSA128BOpnd>;
+class BINSR_H_DESC : MSA_3R_BINSX_DESC_BASE<"binsr.h", int_mips_binsr_h,
+ MSA128HOpnd>;
+class BINSR_W_DESC : MSA_3R_BINSX_DESC_BASE<"binsr.w", int_mips_binsr_w,
+ MSA128WOpnd>;
+class BINSR_D_DESC : MSA_3R_BINSX_DESC_BASE<"binsr.d", int_mips_binsr_d,
+ MSA128DOpnd>;
+
+class BINSRI_B_DESC : MSA_BIT_BINSRI_DESC_BASE<"binsri.b", v16i8, MSA128BOpnd>;
+class BINSRI_H_DESC : MSA_BIT_BINSRI_DESC_BASE<"binsri.h", v8i16, MSA128HOpnd>;
+class BINSRI_W_DESC : MSA_BIT_BINSRI_DESC_BASE<"binsri.w", v4i32, MSA128WOpnd>;
+class BINSRI_D_DESC : MSA_BIT_BINSRI_DESC_BASE<"binsri.d", v2i64, MSA128DOpnd>;
+
+class BMNZ_V_DESC {
+ dag OutOperandList = (outs MSA128BOpnd:$wd);
+ dag InOperandList = (ins MSA128BOpnd:$wd_in, MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt);
+ string AsmString = "bmnz.v\t$wd, $ws, $wt";
+ list<dag> Pattern = [(set MSA128BOpnd:$wd, (vselect MSA128BOpnd:$wt,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wd_in))];
+ InstrItinClass Itinerary = NoItinerary;
+ string Constraints = "$wd = $wd_in";
+}
+
+class BMNZI_B_DESC {
+ dag OutOperandList = (outs MSA128BOpnd:$wd);
+ dag InOperandList = (ins MSA128BOpnd:$wd_in, MSA128BOpnd:$ws,
+ vsplat_uimm8:$u8);
+ string AsmString = "bmnzi.b\t$wd, $ws, $u8";
+ list<dag> Pattern = [(set MSA128BOpnd:$wd, (vselect vsplati8_uimm8:$u8,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wd_in))];
+ InstrItinClass Itinerary = NoItinerary;
+ string Constraints = "$wd = $wd_in";
+}
+
+class BMZ_V_DESC {
+ dag OutOperandList = (outs MSA128BOpnd:$wd);
+ dag InOperandList = (ins MSA128BOpnd:$wd_in, MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt);
+ string AsmString = "bmz.v\t$wd, $ws, $wt";
+ list<dag> Pattern = [(set MSA128BOpnd:$wd, (vselect MSA128BOpnd:$wt,
+ MSA128BOpnd:$wd_in,
+ MSA128BOpnd:$ws))];
+ InstrItinClass Itinerary = NoItinerary;
+ string Constraints = "$wd = $wd_in";
+}
+
+class BMZI_B_DESC {
+ dag OutOperandList = (outs MSA128BOpnd:$wd);
+ dag InOperandList = (ins MSA128BOpnd:$wd_in, MSA128BOpnd:$ws,
+ vsplat_uimm8:$u8);
+ string AsmString = "bmzi.b\t$wd, $ws, $u8";
+ list<dag> Pattern = [(set MSA128BOpnd:$wd, (vselect vsplati8_uimm8:$u8,
+ MSA128BOpnd:$wd_in,
+ MSA128BOpnd:$ws))];
+ InstrItinClass Itinerary = NoItinerary;
+ string Constraints = "$wd = $wd_in";
+}
+
+class BNEG_B_DESC : MSA_3R_DESC_BASE<"bneg.b", vbneg_b, MSA128BOpnd>;
+class BNEG_H_DESC : MSA_3R_DESC_BASE<"bneg.h", vbneg_h, MSA128HOpnd>;
+class BNEG_W_DESC : MSA_3R_DESC_BASE<"bneg.w", vbneg_w, MSA128WOpnd>;
+class BNEG_D_DESC : MSA_3R_DESC_BASE<"bneg.d", vbneg_d, MSA128DOpnd>;
+
+class BNEGI_B_DESC : MSA_BIT_B_DESC_BASE<"bnegi.b", xor, vsplat_uimm_pow2, MSA128BOpnd>;
+class BNEGI_H_DESC : MSA_BIT_H_DESC_BASE<"bnegi.h", xor, vsplat_uimm_pow2, MSA128HOpnd>;
+class BNEGI_W_DESC : MSA_BIT_W_DESC_BASE<"bnegi.w", xor, vsplat_uimm_pow2, MSA128WOpnd>;
+class BNEGI_D_DESC : MSA_BIT_D_DESC_BASE<"bnegi.d", xor, vsplat_uimm_pow2, MSA128DOpnd>;
+
+class BNZ_B_DESC : MSA_CBRANCH_DESC_BASE<"bnz.b", MSA128BOpnd>;
+class BNZ_H_DESC : MSA_CBRANCH_DESC_BASE<"bnz.h", MSA128HOpnd>;
+class BNZ_W_DESC : MSA_CBRANCH_DESC_BASE<"bnz.w", MSA128WOpnd>;
+class BNZ_D_DESC : MSA_CBRANCH_DESC_BASE<"bnz.d", MSA128DOpnd>;
+
+class BNZ_V_DESC : MSA_CBRANCH_DESC_BASE<"bnz.v", MSA128BOpnd>;
+
+class BSEL_V_DESC {
+ dag OutOperandList = (outs MSA128BOpnd:$wd);
+ dag InOperandList = (ins MSA128BOpnd:$wd_in, MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt);
+ string AsmString = "bsel.v\t$wd, $ws, $wt";
+ list<dag> Pattern = [(set MSA128BOpnd:$wd,
+ (vselect MSA128BOpnd:$wd_in, MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt))];
+ InstrItinClass Itinerary = NoItinerary;
+ string Constraints = "$wd = $wd_in";
+}
+
+class BSELI_B_DESC {
+ dag OutOperandList = (outs MSA128BOpnd:$wd);
+ dag InOperandList = (ins MSA128BOpnd:$wd_in, MSA128BOpnd:$ws,
+ vsplat_uimm8:$u8);
+ string AsmString = "bseli.b\t$wd, $ws, $u8";
+ list<dag> Pattern = [(set MSA128BOpnd:$wd, (vselect MSA128BOpnd:$wd_in,
+ MSA128BOpnd:$ws,
+ vsplati8_uimm8:$u8))];
+ InstrItinClass Itinerary = NoItinerary;
+ string Constraints = "$wd = $wd_in";
+}
+
+class BSET_B_DESC : MSA_3R_DESC_BASE<"bset.b", vbset_b, MSA128BOpnd>;
+class BSET_H_DESC : MSA_3R_DESC_BASE<"bset.h", vbset_h, MSA128HOpnd>;
+class BSET_W_DESC : MSA_3R_DESC_BASE<"bset.w", vbset_w, MSA128WOpnd>;
+class BSET_D_DESC : MSA_3R_DESC_BASE<"bset.d", vbset_d, MSA128DOpnd>;
+
+class BSETI_B_DESC : MSA_BIT_B_DESC_BASE<"bseti.b", or, vsplat_uimm_pow2,
+ MSA128BOpnd>;
+class BSETI_H_DESC : MSA_BIT_H_DESC_BASE<"bseti.h", or, vsplat_uimm_pow2,
+ MSA128HOpnd>;
+class BSETI_W_DESC : MSA_BIT_W_DESC_BASE<"bseti.w", or, vsplat_uimm_pow2,
+ MSA128WOpnd>;
+class BSETI_D_DESC : MSA_BIT_D_DESC_BASE<"bseti.d", or, vsplat_uimm_pow2,
+ MSA128DOpnd>;
+
+class BZ_B_DESC : MSA_CBRANCH_DESC_BASE<"bz.b", MSA128BOpnd>;
+class BZ_H_DESC : MSA_CBRANCH_DESC_BASE<"bz.h", MSA128HOpnd>;
+class BZ_W_DESC : MSA_CBRANCH_DESC_BASE<"bz.w", MSA128WOpnd>;
+class BZ_D_DESC : MSA_CBRANCH_DESC_BASE<"bz.d", MSA128DOpnd>;
+
+class BZ_V_DESC : MSA_CBRANCH_DESC_BASE<"bz.v", MSA128BOpnd>;
+
+class CEQ_B_DESC : MSA_3R_DESC_BASE<"ceq.b", vseteq_v16i8, MSA128BOpnd>,
+ IsCommutable;
+class CEQ_H_DESC : MSA_3R_DESC_BASE<"ceq.h", vseteq_v8i16, MSA128HOpnd>,
+ IsCommutable;
+class CEQ_W_DESC : MSA_3R_DESC_BASE<"ceq.w", vseteq_v4i32, MSA128WOpnd>,
+ IsCommutable;
+class CEQ_D_DESC : MSA_3R_DESC_BASE<"ceq.d", vseteq_v2i64, MSA128DOpnd>,
+ IsCommutable;
+
+class CEQI_B_DESC : MSA_I5_DESC_BASE<"ceqi.b", vseteq_v16i8, vsplati8_simm5,
+ MSA128BOpnd>;
+class CEQI_H_DESC : MSA_I5_DESC_BASE<"ceqi.h", vseteq_v8i16, vsplati16_simm5,
+ MSA128HOpnd>;
+class CEQI_W_DESC : MSA_I5_DESC_BASE<"ceqi.w", vseteq_v4i32, vsplati32_simm5,
+ MSA128WOpnd>;
+class CEQI_D_DESC : MSA_I5_DESC_BASE<"ceqi.d", vseteq_v2i64, vsplati64_simm5,
+ MSA128DOpnd>;
+
+class CFCMSA_DESC {
+ dag OutOperandList = (outs GPR32Opnd:$rd);
+ dag InOperandList = (ins MSA128CROpnd:$cs);
+ string AsmString = "cfcmsa\t$rd, $cs";
+ InstrItinClass Itinerary = NoItinerary;
+ bit hasSideEffects = 1;
+}
+
+class CLE_S_B_DESC : MSA_3R_DESC_BASE<"cle_s.b", vsetle_v16i8, MSA128BOpnd>;
+class CLE_S_H_DESC : MSA_3R_DESC_BASE<"cle_s.h", vsetle_v8i16, MSA128HOpnd>;
+class CLE_S_W_DESC : MSA_3R_DESC_BASE<"cle_s.w", vsetle_v4i32, MSA128WOpnd>;
+class CLE_S_D_DESC : MSA_3R_DESC_BASE<"cle_s.d", vsetle_v2i64, MSA128DOpnd>;
+
+class CLE_U_B_DESC : MSA_3R_DESC_BASE<"cle_u.b", vsetule_v16i8, MSA128BOpnd>;
+class CLE_U_H_DESC : MSA_3R_DESC_BASE<"cle_u.h", vsetule_v8i16, MSA128HOpnd>;
+class CLE_U_W_DESC : MSA_3R_DESC_BASE<"cle_u.w", vsetule_v4i32, MSA128WOpnd>;
+class CLE_U_D_DESC : MSA_3R_DESC_BASE<"cle_u.d", vsetule_v2i64, MSA128DOpnd>;
+
+class CLEI_S_B_DESC : MSA_I5_DESC_BASE<"clei_s.b", vsetle_v16i8,
+ vsplati8_simm5, MSA128BOpnd>;
+class CLEI_S_H_DESC : MSA_I5_DESC_BASE<"clei_s.h", vsetle_v8i16,
+ vsplati16_simm5, MSA128HOpnd>;
+class CLEI_S_W_DESC : MSA_I5_DESC_BASE<"clei_s.w", vsetle_v4i32,
+ vsplati32_simm5, MSA128WOpnd>;
+class CLEI_S_D_DESC : MSA_I5_DESC_BASE<"clei_s.d", vsetle_v2i64,
+ vsplati64_simm5, MSA128DOpnd>;
+
+class CLEI_U_B_DESC : MSA_I5_DESC_BASE<"clei_u.b", vsetule_v16i8,
+ vsplati8_uimm5, MSA128BOpnd>;
+class CLEI_U_H_DESC : MSA_I5_DESC_BASE<"clei_u.h", vsetule_v8i16,
+ vsplati16_uimm5, MSA128HOpnd>;
+class CLEI_U_W_DESC : MSA_I5_DESC_BASE<"clei_u.w", vsetule_v4i32,
+ vsplati32_uimm5, MSA128WOpnd>;
+class CLEI_U_D_DESC : MSA_I5_DESC_BASE<"clei_u.d", vsetule_v2i64,
+ vsplati64_uimm5, MSA128DOpnd>;
+
+class CLT_S_B_DESC : MSA_3R_DESC_BASE<"clt_s.b", vsetlt_v16i8, MSA128BOpnd>;
+class CLT_S_H_DESC : MSA_3R_DESC_BASE<"clt_s.h", vsetlt_v8i16, MSA128HOpnd>;
+class CLT_S_W_DESC : MSA_3R_DESC_BASE<"clt_s.w", vsetlt_v4i32, MSA128WOpnd>;
+class CLT_S_D_DESC : MSA_3R_DESC_BASE<"clt_s.d", vsetlt_v2i64, MSA128DOpnd>;
+
+class CLT_U_B_DESC : MSA_3R_DESC_BASE<"clt_u.b", vsetult_v16i8, MSA128BOpnd>;
+class CLT_U_H_DESC : MSA_3R_DESC_BASE<"clt_u.h", vsetult_v8i16, MSA128HOpnd>;
+class CLT_U_W_DESC : MSA_3R_DESC_BASE<"clt_u.w", vsetult_v4i32, MSA128WOpnd>;
+class CLT_U_D_DESC : MSA_3R_DESC_BASE<"clt_u.d", vsetult_v2i64, MSA128DOpnd>;
+
+class CLTI_S_B_DESC : MSA_I5_DESC_BASE<"clti_s.b", vsetlt_v16i8,
+ vsplati8_simm5, MSA128BOpnd>;
+class CLTI_S_H_DESC : MSA_I5_DESC_BASE<"clti_s.h", vsetlt_v8i16,
+ vsplati16_simm5, MSA128HOpnd>;
+class CLTI_S_W_DESC : MSA_I5_DESC_BASE<"clti_s.w", vsetlt_v4i32,
+ vsplati32_simm5, MSA128WOpnd>;
+class CLTI_S_D_DESC : MSA_I5_DESC_BASE<"clti_s.d", vsetlt_v2i64,
+ vsplati64_simm5, MSA128DOpnd>;
+
+class CLTI_U_B_DESC : MSA_I5_DESC_BASE<"clti_u.b", vsetult_v16i8,
+ vsplati8_uimm5, MSA128BOpnd>;
+class CLTI_U_H_DESC : MSA_I5_DESC_BASE<"clti_u.h", vsetult_v8i16,
+ vsplati16_uimm5, MSA128HOpnd>;
+class CLTI_U_W_DESC : MSA_I5_DESC_BASE<"clti_u.w", vsetult_v4i32,
+ vsplati32_uimm5, MSA128WOpnd>;
+class CLTI_U_D_DESC : MSA_I5_DESC_BASE<"clti_u.d", vsetult_v2i64,
+ vsplati64_uimm5, MSA128DOpnd>;
+
+class COPY_S_B_DESC : MSA_COPY_DESC_BASE<"copy_s.b", vextract_sext_i8, v16i8,
+ GPR32Opnd, MSA128BOpnd>;
+class COPY_S_H_DESC : MSA_COPY_DESC_BASE<"copy_s.h", vextract_sext_i16, v8i16,
+ GPR32Opnd, MSA128HOpnd>;
+class COPY_S_W_DESC : MSA_COPY_DESC_BASE<"copy_s.w", vextract_sext_i32, v4i32,
+ GPR32Opnd, MSA128WOpnd>;
+
+class COPY_U_B_DESC : MSA_COPY_DESC_BASE<"copy_u.b", vextract_zext_i8, v16i8,
+ GPR32Opnd, MSA128BOpnd>;
+class COPY_U_H_DESC : MSA_COPY_DESC_BASE<"copy_u.h", vextract_zext_i16, v8i16,
+ GPR32Opnd, MSA128HOpnd>;
+class COPY_U_W_DESC : MSA_COPY_DESC_BASE<"copy_u.w", vextract_zext_i32, v4i32,
+ GPR32Opnd, MSA128WOpnd>;
+
+class COPY_FW_PSEUDO_DESC : MSA_COPY_PSEUDO_BASE<vector_extract, v4f32, FGR32,
+ MSA128W>;
+class COPY_FD_PSEUDO_DESC : MSA_COPY_PSEUDO_BASE<vector_extract, v2f64, FGR64,
+ MSA128D>;
+
+class CTCMSA_DESC {
+ dag OutOperandList = (outs);
+ dag InOperandList = (ins MSA128CROpnd:$cd, GPR32Opnd:$rs);
+ string AsmString = "ctcmsa\t$cd, $rs";
+ InstrItinClass Itinerary = NoItinerary;
+ bit hasSideEffects = 1;
+}
+
+class DIV_S_B_DESC : MSA_3R_DESC_BASE<"div_s.b", sdiv, MSA128BOpnd>;
+class DIV_S_H_DESC : MSA_3R_DESC_BASE<"div_s.h", sdiv, MSA128HOpnd>;
+class DIV_S_W_DESC : MSA_3R_DESC_BASE<"div_s.w", sdiv, MSA128WOpnd>;
+class DIV_S_D_DESC : MSA_3R_DESC_BASE<"div_s.d", sdiv, MSA128DOpnd>;
+
+class DIV_U_B_DESC : MSA_3R_DESC_BASE<"div_u.b", udiv, MSA128BOpnd>;
+class DIV_U_H_DESC : MSA_3R_DESC_BASE<"div_u.h", udiv, MSA128HOpnd>;
+class DIV_U_W_DESC : MSA_3R_DESC_BASE<"div_u.w", udiv, MSA128WOpnd>;
+class DIV_U_D_DESC : MSA_3R_DESC_BASE<"div_u.d", udiv, MSA128DOpnd>;
+
+class DOTP_S_H_DESC : MSA_3R_DESC_BASE<"dotp_s.h", int_mips_dotp_s_h,
+ MSA128HOpnd, MSA128BOpnd, MSA128BOpnd>,
+ IsCommutable;
+class DOTP_S_W_DESC : MSA_3R_DESC_BASE<"dotp_s.w", int_mips_dotp_s_w,
+ MSA128WOpnd, MSA128HOpnd, MSA128HOpnd>,
+ IsCommutable;
+class DOTP_S_D_DESC : MSA_3R_DESC_BASE<"dotp_s.d", int_mips_dotp_s_d,
+ MSA128DOpnd, MSA128WOpnd, MSA128WOpnd>,
+ IsCommutable;
+
+class DOTP_U_H_DESC : MSA_3R_DESC_BASE<"dotp_u.h", int_mips_dotp_u_h,
+ MSA128HOpnd, MSA128BOpnd, MSA128BOpnd>,
+ IsCommutable;
+class DOTP_U_W_DESC : MSA_3R_DESC_BASE<"dotp_u.w", int_mips_dotp_u_w,
+ MSA128WOpnd, MSA128HOpnd, MSA128HOpnd>,
+ IsCommutable;
+class DOTP_U_D_DESC : MSA_3R_DESC_BASE<"dotp_u.d", int_mips_dotp_u_d,
+ MSA128DOpnd, MSA128WOpnd, MSA128WOpnd>,
+ IsCommutable;
+
+class DPADD_S_H_DESC : MSA_3R_4R_DESC_BASE<"dpadd_s.h", int_mips_dpadd_s_h,
+ MSA128HOpnd, MSA128BOpnd,
+ MSA128BOpnd>, IsCommutable;
+class DPADD_S_W_DESC : MSA_3R_4R_DESC_BASE<"dpadd_s.w", int_mips_dpadd_s_w,
+ MSA128WOpnd, MSA128HOpnd,
+ MSA128HOpnd>, IsCommutable;
+class DPADD_S_D_DESC : MSA_3R_4R_DESC_BASE<"dpadd_s.d", int_mips_dpadd_s_d,
+ MSA128DOpnd, MSA128WOpnd,
+ MSA128WOpnd>, IsCommutable;
+
+class DPADD_U_H_DESC : MSA_3R_4R_DESC_BASE<"dpadd_u.h", int_mips_dpadd_u_h,
+ MSA128HOpnd, MSA128BOpnd,
+ MSA128BOpnd>, IsCommutable;
+class DPADD_U_W_DESC : MSA_3R_4R_DESC_BASE<"dpadd_u.w", int_mips_dpadd_u_w,
+ MSA128WOpnd, MSA128HOpnd,
+ MSA128HOpnd>, IsCommutable;
+class DPADD_U_D_DESC : MSA_3R_4R_DESC_BASE<"dpadd_u.d", int_mips_dpadd_u_d,
+ MSA128DOpnd, MSA128WOpnd,
+ MSA128WOpnd>, IsCommutable;
+
+class DPSUB_S_H_DESC : MSA_3R_4R_DESC_BASE<"dpsub_s.h", int_mips_dpsub_s_h,
+ MSA128HOpnd, MSA128BOpnd,
+ MSA128BOpnd>;
+class DPSUB_S_W_DESC : MSA_3R_4R_DESC_BASE<"dpsub_s.w", int_mips_dpsub_s_w,
+ MSA128WOpnd, MSA128HOpnd,
+ MSA128HOpnd>;
+class DPSUB_S_D_DESC : MSA_3R_4R_DESC_BASE<"dpsub_s.d", int_mips_dpsub_s_d,
+ MSA128DOpnd, MSA128WOpnd,
+ MSA128WOpnd>;
+
+class DPSUB_U_H_DESC : MSA_3R_4R_DESC_BASE<"dpsub_u.h", int_mips_dpsub_u_h,
+ MSA128HOpnd, MSA128BOpnd,
+ MSA128BOpnd>;
+class DPSUB_U_W_DESC : MSA_3R_4R_DESC_BASE<"dpsub_u.w", int_mips_dpsub_u_w,
+ MSA128WOpnd, MSA128HOpnd,
+ MSA128HOpnd>;
+class DPSUB_U_D_DESC : MSA_3R_4R_DESC_BASE<"dpsub_u.d", int_mips_dpsub_u_d,
+ MSA128DOpnd, MSA128WOpnd,
+ MSA128WOpnd>;
+
+class FADD_W_DESC : MSA_3RF_DESC_BASE<"fadd.w", fadd, MSA128WOpnd>,
+ IsCommutable;
+class FADD_D_DESC : MSA_3RF_DESC_BASE<"fadd.d", fadd, MSA128DOpnd>,
+ IsCommutable;
+
+class FCAF_W_DESC : MSA_3RF_DESC_BASE<"fcaf.w", int_mips_fcaf_w, MSA128WOpnd>,
+ IsCommutable;
+class FCAF_D_DESC : MSA_3RF_DESC_BASE<"fcaf.d", int_mips_fcaf_d, MSA128DOpnd>,
+ IsCommutable;
+
+class FCEQ_W_DESC : MSA_3RF_DESC_BASE<"fceq.w", vfsetoeq_v4f32, MSA128WOpnd>,
+ IsCommutable;
+class FCEQ_D_DESC : MSA_3RF_DESC_BASE<"fceq.d", vfsetoeq_v2f64, MSA128DOpnd>,
+ IsCommutable;
+
+class FCLASS_W_DESC : MSA_2RF_DESC_BASE<"fclass.w", int_mips_fclass_w,
+ MSA128WOpnd>;
+class FCLASS_D_DESC : MSA_2RF_DESC_BASE<"fclass.d", int_mips_fclass_d,
+ MSA128DOpnd>;
+
+class FCLE_W_DESC : MSA_3RF_DESC_BASE<"fcle.w", vfsetole_v4f32, MSA128WOpnd>;
+class FCLE_D_DESC : MSA_3RF_DESC_BASE<"fcle.d", vfsetole_v2f64, MSA128DOpnd>;
+
+class FCLT_W_DESC : MSA_3RF_DESC_BASE<"fclt.w", vfsetolt_v4f32, MSA128WOpnd>;
+class FCLT_D_DESC : MSA_3RF_DESC_BASE<"fclt.d", vfsetolt_v2f64, MSA128DOpnd>;
+
+class FCNE_W_DESC : MSA_3RF_DESC_BASE<"fcne.w", vfsetone_v4f32, MSA128WOpnd>,
+ IsCommutable;
+class FCNE_D_DESC : MSA_3RF_DESC_BASE<"fcne.d", vfsetone_v2f64, MSA128DOpnd>,
+ IsCommutable;
+
+class FCOR_W_DESC : MSA_3RF_DESC_BASE<"fcor.w", vfsetord_v4f32, MSA128WOpnd>,
+ IsCommutable;
+class FCOR_D_DESC : MSA_3RF_DESC_BASE<"fcor.d", vfsetord_v2f64, MSA128DOpnd>,
+ IsCommutable;
+
+class FCUEQ_W_DESC : MSA_3RF_DESC_BASE<"fcueq.w", vfsetueq_v4f32, MSA128WOpnd>,
+ IsCommutable;
+class FCUEQ_D_DESC : MSA_3RF_DESC_BASE<"fcueq.d", vfsetueq_v2f64, MSA128DOpnd>,
+ IsCommutable;
+
+class FCULE_W_DESC : MSA_3RF_DESC_BASE<"fcule.w", vfsetule_v4f32, MSA128WOpnd>,
+ IsCommutable;
+class FCULE_D_DESC : MSA_3RF_DESC_BASE<"fcule.d", vfsetule_v2f64, MSA128DOpnd>,
+ IsCommutable;
+
+class FCULT_W_DESC : MSA_3RF_DESC_BASE<"fcult.w", vfsetult_v4f32, MSA128WOpnd>,
+ IsCommutable;
+class FCULT_D_DESC : MSA_3RF_DESC_BASE<"fcult.d", vfsetult_v2f64, MSA128DOpnd>,
+ IsCommutable;
+
+class FCUN_W_DESC : MSA_3RF_DESC_BASE<"fcun.w", vfsetun_v4f32, MSA128WOpnd>,
+ IsCommutable;
+class FCUN_D_DESC : MSA_3RF_DESC_BASE<"fcun.d", vfsetun_v2f64, MSA128DOpnd>,
+ IsCommutable;
+
+class FCUNE_W_DESC : MSA_3RF_DESC_BASE<"fcune.w", vfsetune_v4f32, MSA128WOpnd>,
+ IsCommutable;
+class FCUNE_D_DESC : MSA_3RF_DESC_BASE<"fcune.d", vfsetune_v2f64, MSA128DOpnd>,
+ IsCommutable;
+
+class FDIV_W_DESC : MSA_3RF_DESC_BASE<"fdiv.w", fdiv, MSA128WOpnd>;
+class FDIV_D_DESC : MSA_3RF_DESC_BASE<"fdiv.d", fdiv, MSA128DOpnd>;
+
+class FEXDO_H_DESC : MSA_3RF_DESC_BASE<"fexdo.h", int_mips_fexdo_h,
+ MSA128HOpnd, MSA128WOpnd, MSA128WOpnd>;
+class FEXDO_W_DESC : MSA_3RF_DESC_BASE<"fexdo.w", int_mips_fexdo_w,
+ MSA128WOpnd, MSA128DOpnd, MSA128DOpnd>;
+
+// The fexp2.df instruction multiplies the first operand by 2 to the power of
+// the second operand. We therefore need a pseudo-insn in order to invent the
+// 1.0 when we only need to match ISD::FEXP2.
+class FEXP2_W_DESC : MSA_3RF_DESC_BASE<"fexp2.w", mul_fexp2, MSA128WOpnd>;
+class FEXP2_D_DESC : MSA_3RF_DESC_BASE<"fexp2.d", mul_fexp2, MSA128DOpnd>;
+let usesCustomInserter = 1 in {
+ class FEXP2_W_1_PSEUDO_DESC :
+ MipsPseudo<(outs MSA128W:$wd), (ins MSA128W:$ws),
+ [(set MSA128W:$wd, (fexp2 MSA128W:$ws))]>;
+ class FEXP2_D_1_PSEUDO_DESC :
+ MipsPseudo<(outs MSA128D:$wd), (ins MSA128D:$ws),
+ [(set MSA128D:$wd, (fexp2 MSA128D:$ws))]>;
+}
+
+class FEXUPL_W_DESC : MSA_2RF_DESC_BASE<"fexupl.w", int_mips_fexupl_w,
+ MSA128WOpnd, MSA128HOpnd>;
+class FEXUPL_D_DESC : MSA_2RF_DESC_BASE<"fexupl.d", int_mips_fexupl_d,
+ MSA128DOpnd, MSA128WOpnd>;
+
+class FEXUPR_W_DESC : MSA_2RF_DESC_BASE<"fexupr.w", int_mips_fexupr_w,
+ MSA128WOpnd, MSA128HOpnd>;
+class FEXUPR_D_DESC : MSA_2RF_DESC_BASE<"fexupr.d", int_mips_fexupr_d,
+ MSA128DOpnd, MSA128WOpnd>;
+
+class FFINT_S_W_DESC : MSA_2RF_DESC_BASE<"ffint_s.w", sint_to_fp, MSA128WOpnd>;
+class FFINT_S_D_DESC : MSA_2RF_DESC_BASE<"ffint_s.d", sint_to_fp, MSA128DOpnd>;
+
+class FFINT_U_W_DESC : MSA_2RF_DESC_BASE<"ffint_u.w", uint_to_fp, MSA128WOpnd>;
+class FFINT_U_D_DESC : MSA_2RF_DESC_BASE<"ffint_u.d", uint_to_fp, MSA128DOpnd>;
+
+class FFQL_W_DESC : MSA_2RF_DESC_BASE<"ffql.w", int_mips_ffql_w,
+ MSA128WOpnd, MSA128HOpnd>;
+class FFQL_D_DESC : MSA_2RF_DESC_BASE<"ffql.d", int_mips_ffql_d,
+ MSA128DOpnd, MSA128WOpnd>;
+
+class FFQR_W_DESC : MSA_2RF_DESC_BASE<"ffqr.w", int_mips_ffqr_w,
+ MSA128WOpnd, MSA128HOpnd>;
+class FFQR_D_DESC : MSA_2RF_DESC_BASE<"ffqr.d", int_mips_ffqr_d,
+ MSA128DOpnd, MSA128WOpnd>;
+
+class FILL_B_DESC : MSA_2R_FILL_DESC_BASE<"fill.b", v16i8, vsplati8,
+ MSA128BOpnd, GPR32Opnd>;
+class FILL_H_DESC : MSA_2R_FILL_DESC_BASE<"fill.h", v8i16, vsplati16,
+ MSA128HOpnd, GPR32Opnd>;
+class FILL_W_DESC : MSA_2R_FILL_DESC_BASE<"fill.w", v4i32, vsplati32,
+ MSA128WOpnd, GPR32Opnd>;
+
+class FILL_FW_PSEUDO_DESC : MSA_2R_FILL_PSEUDO_BASE<v4f32, vsplatf32, MSA128W,
+ FGR32>;
+class FILL_FD_PSEUDO_DESC : MSA_2R_FILL_PSEUDO_BASE<v2f64, vsplatf64, MSA128D,
+ FGR64>;
+
+class FLOG2_W_DESC : MSA_2RF_DESC_BASE<"flog2.w", flog2, MSA128WOpnd>;
+class FLOG2_D_DESC : MSA_2RF_DESC_BASE<"flog2.d", flog2, MSA128DOpnd>;
+
+class FMADD_W_DESC : MSA_3RF_4RF_DESC_BASE<"fmadd.w", fma, MSA128WOpnd>;
+class FMADD_D_DESC : MSA_3RF_4RF_DESC_BASE<"fmadd.d", fma, MSA128DOpnd>;
+
+class FMAX_W_DESC : MSA_3RF_DESC_BASE<"fmax.w", int_mips_fmax_w, MSA128WOpnd>;
+class FMAX_D_DESC : MSA_3RF_DESC_BASE<"fmax.d", int_mips_fmax_d, MSA128DOpnd>;
+
+class FMAX_A_W_DESC : MSA_3RF_DESC_BASE<"fmax_a.w", int_mips_fmax_a_w,
+ MSA128WOpnd>;
+class FMAX_A_D_DESC : MSA_3RF_DESC_BASE<"fmax_a.d", int_mips_fmax_a_d,
+ MSA128DOpnd>;
+
+class FMIN_W_DESC : MSA_3RF_DESC_BASE<"fmin.w", int_mips_fmin_w, MSA128WOpnd>;
+class FMIN_D_DESC : MSA_3RF_DESC_BASE<"fmin.d", int_mips_fmin_d, MSA128DOpnd>;
+
+class FMIN_A_W_DESC : MSA_3RF_DESC_BASE<"fmin_a.w", int_mips_fmin_a_w,
+ MSA128WOpnd>;
+class FMIN_A_D_DESC : MSA_3RF_DESC_BASE<"fmin_a.d", int_mips_fmin_a_d,
+ MSA128DOpnd>;
+
+class FMSUB_W_DESC : MSA_3RF_4RF_DESC_BASE<"fmsub.w", fms, MSA128WOpnd>;
+class FMSUB_D_DESC : MSA_3RF_4RF_DESC_BASE<"fmsub.d", fms, MSA128DOpnd>;
+
+class FMUL_W_DESC : MSA_3RF_DESC_BASE<"fmul.w", fmul, MSA128WOpnd>;
+class FMUL_D_DESC : MSA_3RF_DESC_BASE<"fmul.d", fmul, MSA128DOpnd>;
+
+class FRINT_W_DESC : MSA_2RF_DESC_BASE<"frint.w", frint, MSA128WOpnd>;
+class FRINT_D_DESC : MSA_2RF_DESC_BASE<"frint.d", frint, MSA128DOpnd>;
+
+class FRCP_W_DESC : MSA_2RF_DESC_BASE<"frcp.w", int_mips_frcp_w, MSA128WOpnd>;
+class FRCP_D_DESC : MSA_2RF_DESC_BASE<"frcp.d", int_mips_frcp_d, MSA128DOpnd>;
+
+class FRSQRT_W_DESC : MSA_2RF_DESC_BASE<"frsqrt.w", int_mips_frsqrt_w,
+ MSA128WOpnd>;
+class FRSQRT_D_DESC : MSA_2RF_DESC_BASE<"frsqrt.d", int_mips_frsqrt_d,
+ MSA128DOpnd>;
+
+class FSAF_W_DESC : MSA_3RF_DESC_BASE<"fsaf.w", int_mips_fsaf_w, MSA128WOpnd>;
+class FSAF_D_DESC : MSA_3RF_DESC_BASE<"fsaf.d", int_mips_fsaf_d, MSA128DOpnd>;
+
+class FSEQ_W_DESC : MSA_3RF_DESC_BASE<"fseq.w", int_mips_fseq_w, MSA128WOpnd>;
+class FSEQ_D_DESC : MSA_3RF_DESC_BASE<"fseq.d", int_mips_fseq_d, MSA128DOpnd>;
+
+class FSLE_W_DESC : MSA_3RF_DESC_BASE<"fsle.w", int_mips_fsle_w, MSA128WOpnd>;
+class FSLE_D_DESC : MSA_3RF_DESC_BASE<"fsle.d", int_mips_fsle_d, MSA128DOpnd>;
+
+class FSLT_W_DESC : MSA_3RF_DESC_BASE<"fslt.w", int_mips_fslt_w, MSA128WOpnd>;
+class FSLT_D_DESC : MSA_3RF_DESC_BASE<"fslt.d", int_mips_fslt_d, MSA128DOpnd>;
+
+class FSNE_W_DESC : MSA_3RF_DESC_BASE<"fsne.w", int_mips_fsne_w, MSA128WOpnd>;
+class FSNE_D_DESC : MSA_3RF_DESC_BASE<"fsne.d", int_mips_fsne_d, MSA128DOpnd>;
+
+class FSOR_W_DESC : MSA_3RF_DESC_BASE<"fsor.w", int_mips_fsor_w, MSA128WOpnd>;
+class FSOR_D_DESC : MSA_3RF_DESC_BASE<"fsor.d", int_mips_fsor_d, MSA128DOpnd>;
+
+class FSQRT_W_DESC : MSA_2RF_DESC_BASE<"fsqrt.w", fsqrt, MSA128WOpnd>;
+class FSQRT_D_DESC : MSA_2RF_DESC_BASE<"fsqrt.d", fsqrt, MSA128DOpnd>;
+
+class FSUB_W_DESC : MSA_3RF_DESC_BASE<"fsub.w", fsub, MSA128WOpnd>;
+class FSUB_D_DESC : MSA_3RF_DESC_BASE<"fsub.d", fsub, MSA128DOpnd>;
+
+class FSUEQ_W_DESC : MSA_3RF_DESC_BASE<"fsueq.w", int_mips_fsueq_w,
+ MSA128WOpnd>;
+class FSUEQ_D_DESC : MSA_3RF_DESC_BASE<"fsueq.d", int_mips_fsueq_d,
+ MSA128DOpnd>;
+
+class FSULE_W_DESC : MSA_3RF_DESC_BASE<"fsule.w", int_mips_fsule_w,
+ MSA128WOpnd>;
+class FSULE_D_DESC : MSA_3RF_DESC_BASE<"fsule.d", int_mips_fsule_d,
+ MSA128DOpnd>;
+
+class FSULT_W_DESC : MSA_3RF_DESC_BASE<"fsult.w", int_mips_fsult_w,
+ MSA128WOpnd>;
+class FSULT_D_DESC : MSA_3RF_DESC_BASE<"fsult.d", int_mips_fsult_d,
+ MSA128DOpnd>;
+
+class FSUN_W_DESC : MSA_3RF_DESC_BASE<"fsun.w", int_mips_fsun_w,
+ MSA128WOpnd>;
+class FSUN_D_DESC : MSA_3RF_DESC_BASE<"fsun.d", int_mips_fsun_d,
+ MSA128DOpnd>;
+
+class FSUNE_W_DESC : MSA_3RF_DESC_BASE<"fsune.w", int_mips_fsune_w,
+ MSA128WOpnd>;
+class FSUNE_D_DESC : MSA_3RF_DESC_BASE<"fsune.d", int_mips_fsune_d,
+ MSA128DOpnd>;
+
+class FTINT_S_W_DESC : MSA_2RF_DESC_BASE<"ftint_s.w", int_mips_ftint_s_w,
+ MSA128WOpnd>;
+class FTINT_S_D_DESC : MSA_2RF_DESC_BASE<"ftint_s.d", int_mips_ftint_s_d,
+ MSA128DOpnd>;
+
+class FTINT_U_W_DESC : MSA_2RF_DESC_BASE<"ftint_u.w", int_mips_ftint_u_w,
+ MSA128WOpnd>;
+class FTINT_U_D_DESC : MSA_2RF_DESC_BASE<"ftint_u.d", int_mips_ftint_u_d,
+ MSA128DOpnd>;
+
+class FTQ_H_DESC : MSA_3RF_DESC_BASE<"ftq.h", int_mips_ftq_h,
+ MSA128HOpnd, MSA128WOpnd, MSA128WOpnd>;
+class FTQ_W_DESC : MSA_3RF_DESC_BASE<"ftq.w", int_mips_ftq_w,
+ MSA128WOpnd, MSA128DOpnd, MSA128DOpnd>;
+
+class FTRUNC_S_W_DESC : MSA_2RF_DESC_BASE<"ftrunc_s.w", fp_to_sint,
+ MSA128WOpnd>;
+class FTRUNC_S_D_DESC : MSA_2RF_DESC_BASE<"ftrunc_s.d", fp_to_sint,
+ MSA128DOpnd>;
+
+class FTRUNC_U_W_DESC : MSA_2RF_DESC_BASE<"ftrunc_u.w", fp_to_uint,
+ MSA128WOpnd>;
+class FTRUNC_U_D_DESC : MSA_2RF_DESC_BASE<"ftrunc_u.d", fp_to_uint,
+ MSA128DOpnd>;
+
+class HADD_S_H_DESC : MSA_3R_DESC_BASE<"hadd_s.h", int_mips_hadd_s_h,
+ MSA128HOpnd, MSA128BOpnd, MSA128BOpnd>;
+class HADD_S_W_DESC : MSA_3R_DESC_BASE<"hadd_s.w", int_mips_hadd_s_w,
+ MSA128WOpnd, MSA128HOpnd, MSA128HOpnd>;
+class HADD_S_D_DESC : MSA_3R_DESC_BASE<"hadd_s.d", int_mips_hadd_s_d,
+ MSA128DOpnd, MSA128WOpnd, MSA128WOpnd>;
+
+class HADD_U_H_DESC : MSA_3R_DESC_BASE<"hadd_u.h", int_mips_hadd_u_h,
+ MSA128HOpnd, MSA128BOpnd, MSA128BOpnd>;
+class HADD_U_W_DESC : MSA_3R_DESC_BASE<"hadd_u.w", int_mips_hadd_u_w,
+ MSA128WOpnd, MSA128HOpnd, MSA128HOpnd>;
+class HADD_U_D_DESC : MSA_3R_DESC_BASE<"hadd_u.d", int_mips_hadd_u_d,
+ MSA128DOpnd, MSA128WOpnd, MSA128WOpnd>;
+
+class HSUB_S_H_DESC : MSA_3R_DESC_BASE<"hsub_s.h", int_mips_hsub_s_h,
+ MSA128HOpnd, MSA128BOpnd, MSA128BOpnd>;
+class HSUB_S_W_DESC : MSA_3R_DESC_BASE<"hsub_s.w", int_mips_hsub_s_w,
+ MSA128WOpnd, MSA128HOpnd, MSA128HOpnd>;
+class HSUB_S_D_DESC : MSA_3R_DESC_BASE<"hsub_s.d", int_mips_hsub_s_d,
+ MSA128DOpnd, MSA128WOpnd, MSA128WOpnd>;
+
+class HSUB_U_H_DESC : MSA_3R_DESC_BASE<"hsub_u.h", int_mips_hsub_u_h,
+ MSA128HOpnd, MSA128BOpnd, MSA128BOpnd>;
+class HSUB_U_W_DESC : MSA_3R_DESC_BASE<"hsub_u.w", int_mips_hsub_u_w,
+ MSA128WOpnd, MSA128HOpnd, MSA128HOpnd>;
+class HSUB_U_D_DESC : MSA_3R_DESC_BASE<"hsub_u.d", int_mips_hsub_u_d,
+ MSA128DOpnd, MSA128WOpnd, MSA128WOpnd>;
+
+class ILVEV_B_DESC : MSA_3R_DESC_BASE<"ilvev.b", MipsILVEV, MSA128BOpnd>;
+class ILVEV_H_DESC : MSA_3R_DESC_BASE<"ilvev.h", MipsILVEV, MSA128HOpnd>;
+class ILVEV_W_DESC : MSA_3R_DESC_BASE<"ilvev.w", MipsILVEV, MSA128WOpnd>;
+class ILVEV_D_DESC : MSA_3R_DESC_BASE<"ilvev.d", MipsILVEV, MSA128DOpnd>;
+
+class ILVL_B_DESC : MSA_3R_DESC_BASE<"ilvl.b", MipsILVL, MSA128BOpnd>;
+class ILVL_H_DESC : MSA_3R_DESC_BASE<"ilvl.h", MipsILVL, MSA128HOpnd>;
+class ILVL_W_DESC : MSA_3R_DESC_BASE<"ilvl.w", MipsILVL, MSA128WOpnd>;
+class ILVL_D_DESC : MSA_3R_DESC_BASE<"ilvl.d", MipsILVL, MSA128DOpnd>;
+
+class ILVOD_B_DESC : MSA_3R_DESC_BASE<"ilvod.b", MipsILVOD, MSA128BOpnd>;
+class ILVOD_H_DESC : MSA_3R_DESC_BASE<"ilvod.h", MipsILVOD, MSA128HOpnd>;
+class ILVOD_W_DESC : MSA_3R_DESC_BASE<"ilvod.w", MipsILVOD, MSA128WOpnd>;
+class ILVOD_D_DESC : MSA_3R_DESC_BASE<"ilvod.d", MipsILVOD, MSA128DOpnd>;
+
+class ILVR_B_DESC : MSA_3R_DESC_BASE<"ilvr.b", MipsILVR, MSA128BOpnd>;
+class ILVR_H_DESC : MSA_3R_DESC_BASE<"ilvr.h", MipsILVR, MSA128HOpnd>;
+class ILVR_W_DESC : MSA_3R_DESC_BASE<"ilvr.w", MipsILVR, MSA128WOpnd>;
+class ILVR_D_DESC : MSA_3R_DESC_BASE<"ilvr.d", MipsILVR, MSA128DOpnd>;
+
+class INSERT_B_DESC : MSA_INSERT_DESC_BASE<"insert.b", vinsert_v16i8,
+ MSA128BOpnd, GPR32Opnd>;
+class INSERT_H_DESC : MSA_INSERT_DESC_BASE<"insert.h", vinsert_v8i16,
+ MSA128HOpnd, GPR32Opnd>;
+class INSERT_W_DESC : MSA_INSERT_DESC_BASE<"insert.w", vinsert_v4i32,
+ MSA128WOpnd, GPR32Opnd>;
+
+class INSERT_FW_PSEUDO_DESC : MSA_INSERT_PSEUDO_BASE<vector_insert, v4f32,
+ MSA128WOpnd, FGR32Opnd>;
+class INSERT_FD_PSEUDO_DESC : MSA_INSERT_PSEUDO_BASE<vector_insert, v2f64,
+ MSA128DOpnd, FGR64Opnd>;
+
+class INSVE_B_DESC : MSA_INSVE_DESC_BASE<"insve.b", int_mips_insve_b,
+ MSA128BOpnd>;
+class INSVE_H_DESC : MSA_INSVE_DESC_BASE<"insve.h", int_mips_insve_h,
+ MSA128HOpnd>;
+class INSVE_W_DESC : MSA_INSVE_DESC_BASE<"insve.w", int_mips_insve_w,
+ MSA128WOpnd>;
+class INSVE_D_DESC : MSA_INSVE_DESC_BASE<"insve.d", int_mips_insve_d,
+ MSA128DOpnd>;
+
+class LD_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ ValueType TyNode, RegisterOperand ROWD,
+ Operand MemOpnd = mem, ComplexPattern Addr = addrRegImm,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs ROWD:$wd);
+ dag InOperandList = (ins MemOpnd:$addr);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $addr");
+ list<dag> Pattern = [(set ROWD:$wd, (TyNode (OpNode Addr:$addr)))];
+ InstrItinClass Itinerary = itin;
+ string DecoderMethod = "DecodeMSA128Mem";
+}
+
+class LD_B_DESC : LD_DESC_BASE<"ld.b", load, v16i8, MSA128BOpnd>;
+class LD_H_DESC : LD_DESC_BASE<"ld.h", load, v8i16, MSA128HOpnd>;
+class LD_W_DESC : LD_DESC_BASE<"ld.w", load, v4i32, MSA128WOpnd>;
+class LD_D_DESC : LD_DESC_BASE<"ld.d", load, v2i64, MSA128DOpnd>;
+
+class LDI_B_DESC : MSA_I10_LDI_DESC_BASE<"ldi.b", MSA128BOpnd>;
+class LDI_H_DESC : MSA_I10_LDI_DESC_BASE<"ldi.h", MSA128HOpnd>;
+class LDI_W_DESC : MSA_I10_LDI_DESC_BASE<"ldi.w", MSA128WOpnd>;
+class LDI_D_DESC : MSA_I10_LDI_DESC_BASE<"ldi.d", MSA128DOpnd>;
+
+class LSA_DESC {
+ dag OutOperandList = (outs GPR32Opnd:$rd);
+ dag InOperandList = (ins GPR32Opnd:$rs, GPR32Opnd:$rt, LSAImm:$sa);
+ string AsmString = "lsa\t$rd, $rs, $rt, $sa";
+ list<dag> Pattern = [(set GPR32Opnd:$rd, (add GPR32Opnd:$rs,
+ (shl GPR32Opnd:$rt,
+ immZExt2Lsa:$sa)))];
+ InstrItinClass Itinerary = NoItinerary;
+}
+
+class MADD_Q_H_DESC : MSA_3RF_4RF_DESC_BASE<"madd_q.h", int_mips_madd_q_h,
+ MSA128HOpnd>;
+class MADD_Q_W_DESC : MSA_3RF_4RF_DESC_BASE<"madd_q.w", int_mips_madd_q_w,
+ MSA128WOpnd>;
+
+class MADDR_Q_H_DESC : MSA_3RF_4RF_DESC_BASE<"maddr_q.h", int_mips_maddr_q_h,
+ MSA128HOpnd>;
+class MADDR_Q_W_DESC : MSA_3RF_4RF_DESC_BASE<"maddr_q.w", int_mips_maddr_q_w,
+ MSA128WOpnd>;
+
+class MADDV_B_DESC : MSA_3R_4R_DESC_BASE<"maddv.b", muladd, MSA128BOpnd>;
+class MADDV_H_DESC : MSA_3R_4R_DESC_BASE<"maddv.h", muladd, MSA128HOpnd>;
+class MADDV_W_DESC : MSA_3R_4R_DESC_BASE<"maddv.w", muladd, MSA128WOpnd>;
+class MADDV_D_DESC : MSA_3R_4R_DESC_BASE<"maddv.d", muladd, MSA128DOpnd>;
+
+class MAX_A_B_DESC : MSA_3R_DESC_BASE<"max_a.b", int_mips_max_a_b, MSA128BOpnd>;
+class MAX_A_H_DESC : MSA_3R_DESC_BASE<"max_a.h", int_mips_max_a_h, MSA128HOpnd>;
+class MAX_A_W_DESC : MSA_3R_DESC_BASE<"max_a.w", int_mips_max_a_w, MSA128WOpnd>;
+class MAX_A_D_DESC : MSA_3R_DESC_BASE<"max_a.d", int_mips_max_a_d, MSA128DOpnd>;
+
+class MAX_S_B_DESC : MSA_3R_DESC_BASE<"max_s.b", MipsVSMax, MSA128BOpnd>;
+class MAX_S_H_DESC : MSA_3R_DESC_BASE<"max_s.h", MipsVSMax, MSA128HOpnd>;
+class MAX_S_W_DESC : MSA_3R_DESC_BASE<"max_s.w", MipsVSMax, MSA128WOpnd>;
+class MAX_S_D_DESC : MSA_3R_DESC_BASE<"max_s.d", MipsVSMax, MSA128DOpnd>;
+
+class MAX_U_B_DESC : MSA_3R_DESC_BASE<"max_u.b", MipsVUMax, MSA128BOpnd>;
+class MAX_U_H_DESC : MSA_3R_DESC_BASE<"max_u.h", MipsVUMax, MSA128HOpnd>;
+class MAX_U_W_DESC : MSA_3R_DESC_BASE<"max_u.w", MipsVUMax, MSA128WOpnd>;
+class MAX_U_D_DESC : MSA_3R_DESC_BASE<"max_u.d", MipsVUMax, MSA128DOpnd>;
+
+class MAXI_S_B_DESC : MSA_I5_DESC_BASE<"maxi_s.b", MipsVSMax, vsplati8_simm5,
+ MSA128BOpnd>;
+class MAXI_S_H_DESC : MSA_I5_DESC_BASE<"maxi_s.h", MipsVSMax, vsplati16_simm5,
+ MSA128HOpnd>;
+class MAXI_S_W_DESC : MSA_I5_DESC_BASE<"maxi_s.w", MipsVSMax, vsplati32_simm5,
+ MSA128WOpnd>;
+class MAXI_S_D_DESC : MSA_I5_DESC_BASE<"maxi_s.d", MipsVSMax, vsplati64_simm5,
+ MSA128DOpnd>;
+
+class MAXI_U_B_DESC : MSA_I5_DESC_BASE<"maxi_u.b", MipsVUMax, vsplati8_uimm5,
+ MSA128BOpnd>;
+class MAXI_U_H_DESC : MSA_I5_DESC_BASE<"maxi_u.h", MipsVUMax, vsplati16_uimm5,
+ MSA128HOpnd>;
+class MAXI_U_W_DESC : MSA_I5_DESC_BASE<"maxi_u.w", MipsVUMax, vsplati32_uimm5,
+ MSA128WOpnd>;
+class MAXI_U_D_DESC : MSA_I5_DESC_BASE<"maxi_u.d", MipsVUMax, vsplati64_uimm5,
+ MSA128DOpnd>;
+
+class MIN_A_B_DESC : MSA_3R_DESC_BASE<"min_a.b", int_mips_min_a_b, MSA128BOpnd>;
+class MIN_A_H_DESC : MSA_3R_DESC_BASE<"min_a.h", int_mips_min_a_h, MSA128HOpnd>;
+class MIN_A_W_DESC : MSA_3R_DESC_BASE<"min_a.w", int_mips_min_a_w, MSA128WOpnd>;
+class MIN_A_D_DESC : MSA_3R_DESC_BASE<"min_a.d", int_mips_min_a_d, MSA128DOpnd>;
+
+class MIN_S_B_DESC : MSA_3R_DESC_BASE<"min_s.b", MipsVSMin, MSA128BOpnd>;
+class MIN_S_H_DESC : MSA_3R_DESC_BASE<"min_s.h", MipsVSMin, MSA128HOpnd>;
+class MIN_S_W_DESC : MSA_3R_DESC_BASE<"min_s.w", MipsVSMin, MSA128WOpnd>;
+class MIN_S_D_DESC : MSA_3R_DESC_BASE<"min_s.d", MipsVSMin, MSA128DOpnd>;
+
+class MIN_U_B_DESC : MSA_3R_DESC_BASE<"min_u.b", MipsVUMin, MSA128BOpnd>;
+class MIN_U_H_DESC : MSA_3R_DESC_BASE<"min_u.h", MipsVUMin, MSA128HOpnd>;
+class MIN_U_W_DESC : MSA_3R_DESC_BASE<"min_u.w", MipsVUMin, MSA128WOpnd>;
+class MIN_U_D_DESC : MSA_3R_DESC_BASE<"min_u.d", MipsVUMin, MSA128DOpnd>;
+
+class MINI_S_B_DESC : MSA_I5_DESC_BASE<"mini_s.b", MipsVSMin, vsplati8_simm5,
+ MSA128BOpnd>;
+class MINI_S_H_DESC : MSA_I5_DESC_BASE<"mini_s.h", MipsVSMin, vsplati16_simm5,
+ MSA128HOpnd>;
+class MINI_S_W_DESC : MSA_I5_DESC_BASE<"mini_s.w", MipsVSMin, vsplati32_simm5,
+ MSA128WOpnd>;
+class MINI_S_D_DESC : MSA_I5_DESC_BASE<"mini_s.d", MipsVSMin, vsplati64_simm5,
+ MSA128DOpnd>;
+
+class MINI_U_B_DESC : MSA_I5_DESC_BASE<"mini_u.b", MipsVUMin, vsplati8_uimm5,
+ MSA128BOpnd>;
+class MINI_U_H_DESC : MSA_I5_DESC_BASE<"mini_u.h", MipsVUMin, vsplati16_uimm5,
+ MSA128HOpnd>;
+class MINI_U_W_DESC : MSA_I5_DESC_BASE<"mini_u.w", MipsVUMin, vsplati32_uimm5,
+ MSA128WOpnd>;
+class MINI_U_D_DESC : MSA_I5_DESC_BASE<"mini_u.d", MipsVUMin, vsplati64_uimm5,
+ MSA128DOpnd>;
+
+class MOD_S_B_DESC : MSA_3R_DESC_BASE<"mod_s.b", srem, MSA128BOpnd>;
+class MOD_S_H_DESC : MSA_3R_DESC_BASE<"mod_s.h", srem, MSA128HOpnd>;
+class MOD_S_W_DESC : MSA_3R_DESC_BASE<"mod_s.w", srem, MSA128WOpnd>;
+class MOD_S_D_DESC : MSA_3R_DESC_BASE<"mod_s.d", srem, MSA128DOpnd>;
+
+class MOD_U_B_DESC : MSA_3R_DESC_BASE<"mod_u.b", urem, MSA128BOpnd>;
+class MOD_U_H_DESC : MSA_3R_DESC_BASE<"mod_u.h", urem, MSA128HOpnd>;
+class MOD_U_W_DESC : MSA_3R_DESC_BASE<"mod_u.w", urem, MSA128WOpnd>;
+class MOD_U_D_DESC : MSA_3R_DESC_BASE<"mod_u.d", urem, MSA128DOpnd>;
+
+class MOVE_V_DESC {
+ dag OutOperandList = (outs MSA128BOpnd:$wd);
+ dag InOperandList = (ins MSA128BOpnd:$ws);
+ string AsmString = "move.v\t$wd, $ws";
+ list<dag> Pattern = [];
+ InstrItinClass Itinerary = NoItinerary;
+}
+
+class MSUB_Q_H_DESC : MSA_3RF_4RF_DESC_BASE<"msub_q.h", int_mips_msub_q_h,
+ MSA128HOpnd>;
+class MSUB_Q_W_DESC : MSA_3RF_4RF_DESC_BASE<"msub_q.w", int_mips_msub_q_w,
+ MSA128WOpnd>;
+
+class MSUBR_Q_H_DESC : MSA_3RF_4RF_DESC_BASE<"msubr_q.h", int_mips_msubr_q_h,
+ MSA128HOpnd>;
+class MSUBR_Q_W_DESC : MSA_3RF_4RF_DESC_BASE<"msubr_q.w", int_mips_msubr_q_w,
+ MSA128WOpnd>;
+
+class MSUBV_B_DESC : MSA_3R_4R_DESC_BASE<"msubv.b", mulsub, MSA128BOpnd>;
+class MSUBV_H_DESC : MSA_3R_4R_DESC_BASE<"msubv.h", mulsub, MSA128HOpnd>;
+class MSUBV_W_DESC : MSA_3R_4R_DESC_BASE<"msubv.w", mulsub, MSA128WOpnd>;
+class MSUBV_D_DESC : MSA_3R_4R_DESC_BASE<"msubv.d", mulsub, MSA128DOpnd>;
+
+class MUL_Q_H_DESC : MSA_3RF_DESC_BASE<"mul_q.h", int_mips_mul_q_h,
+ MSA128HOpnd>;
+class MUL_Q_W_DESC : MSA_3RF_DESC_BASE<"mul_q.w", int_mips_mul_q_w,
+ MSA128WOpnd>;
+
+class MULR_Q_H_DESC : MSA_3RF_DESC_BASE<"mulr_q.h", int_mips_mulr_q_h,
+ MSA128HOpnd>;
+class MULR_Q_W_DESC : MSA_3RF_DESC_BASE<"mulr_q.w", int_mips_mulr_q_w,
+ MSA128WOpnd>;
+
+class MULV_B_DESC : MSA_3R_DESC_BASE<"mulv.b", mul, MSA128BOpnd>;
+class MULV_H_DESC : MSA_3R_DESC_BASE<"mulv.h", mul, MSA128HOpnd>;
+class MULV_W_DESC : MSA_3R_DESC_BASE<"mulv.w", mul, MSA128WOpnd>;
+class MULV_D_DESC : MSA_3R_DESC_BASE<"mulv.d", mul, MSA128DOpnd>;
+
+class NLOC_B_DESC : MSA_2R_DESC_BASE<"nloc.b", int_mips_nloc_b, MSA128BOpnd>;
+class NLOC_H_DESC : MSA_2R_DESC_BASE<"nloc.h", int_mips_nloc_h, MSA128HOpnd>;
+class NLOC_W_DESC : MSA_2R_DESC_BASE<"nloc.w", int_mips_nloc_w, MSA128WOpnd>;
+class NLOC_D_DESC : MSA_2R_DESC_BASE<"nloc.d", int_mips_nloc_d, MSA128DOpnd>;
+
+class NLZC_B_DESC : MSA_2R_DESC_BASE<"nlzc.b", ctlz, MSA128BOpnd>;
+class NLZC_H_DESC : MSA_2R_DESC_BASE<"nlzc.h", ctlz, MSA128HOpnd>;
+class NLZC_W_DESC : MSA_2R_DESC_BASE<"nlzc.w", ctlz, MSA128WOpnd>;
+class NLZC_D_DESC : MSA_2R_DESC_BASE<"nlzc.d", ctlz, MSA128DOpnd>;
+
+class NOR_V_DESC : MSA_VEC_DESC_BASE<"nor.v", MipsVNOR, MSA128BOpnd>;
+class NOR_V_H_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<MipsVNOR, MSA128HOpnd>;
+class NOR_V_W_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<MipsVNOR, MSA128WOpnd>;
+class NOR_V_D_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<MipsVNOR, MSA128DOpnd>;
+
+class NORI_B_DESC : MSA_I8_DESC_BASE<"nori.b", MipsVNOR, vsplati8_uimm8,
+ MSA128BOpnd>;
+
+class OR_V_DESC : MSA_VEC_DESC_BASE<"or.v", or, MSA128BOpnd>;
+class OR_V_H_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<or, MSA128HOpnd>;
+class OR_V_W_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<or, MSA128WOpnd>;
+class OR_V_D_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<or, MSA128DOpnd>;
+
+class ORI_B_DESC : MSA_I8_DESC_BASE<"ori.b", or, vsplati8_uimm8, MSA128BOpnd>;
+
+class PCKEV_B_DESC : MSA_3R_DESC_BASE<"pckev.b", MipsPCKEV, MSA128BOpnd>;
+class PCKEV_H_DESC : MSA_3R_DESC_BASE<"pckev.h", MipsPCKEV, MSA128HOpnd>;
+class PCKEV_W_DESC : MSA_3R_DESC_BASE<"pckev.w", MipsPCKEV, MSA128WOpnd>;
+class PCKEV_D_DESC : MSA_3R_DESC_BASE<"pckev.d", MipsPCKEV, MSA128DOpnd>;
+
+class PCKOD_B_DESC : MSA_3R_DESC_BASE<"pckod.b", MipsPCKOD, MSA128BOpnd>;
+class PCKOD_H_DESC : MSA_3R_DESC_BASE<"pckod.h", MipsPCKOD, MSA128HOpnd>;
+class PCKOD_W_DESC : MSA_3R_DESC_BASE<"pckod.w", MipsPCKOD, MSA128WOpnd>;
+class PCKOD_D_DESC : MSA_3R_DESC_BASE<"pckod.d", MipsPCKOD, MSA128DOpnd>;
+
+class PCNT_B_DESC : MSA_2R_DESC_BASE<"pcnt.b", ctpop, MSA128BOpnd>;
+class PCNT_H_DESC : MSA_2R_DESC_BASE<"pcnt.h", ctpop, MSA128HOpnd>;
+class PCNT_W_DESC : MSA_2R_DESC_BASE<"pcnt.w", ctpop, MSA128WOpnd>;
+class PCNT_D_DESC : MSA_2R_DESC_BASE<"pcnt.d", ctpop, MSA128DOpnd>;
+
+class SAT_S_B_DESC : MSA_BIT_B_X_DESC_BASE<"sat_s.b", int_mips_sat_s_b,
+ MSA128BOpnd>;
+class SAT_S_H_DESC : MSA_BIT_H_X_DESC_BASE<"sat_s.h", int_mips_sat_s_h,
+ MSA128HOpnd>;
+class SAT_S_W_DESC : MSA_BIT_W_X_DESC_BASE<"sat_s.w", int_mips_sat_s_w,
+ MSA128WOpnd>;
+class SAT_S_D_DESC : MSA_BIT_D_X_DESC_BASE<"sat_s.d", int_mips_sat_s_d,
+ MSA128DOpnd>;
+
+class SAT_U_B_DESC : MSA_BIT_B_X_DESC_BASE<"sat_u.b", int_mips_sat_u_b,
+ MSA128BOpnd>;
+class SAT_U_H_DESC : MSA_BIT_H_X_DESC_BASE<"sat_u.h", int_mips_sat_u_h,
+ MSA128HOpnd>;
+class SAT_U_W_DESC : MSA_BIT_W_X_DESC_BASE<"sat_u.w", int_mips_sat_u_w,
+ MSA128WOpnd>;
+class SAT_U_D_DESC : MSA_BIT_D_X_DESC_BASE<"sat_u.d", int_mips_sat_u_d,
+ MSA128DOpnd>;
+
+class SHF_B_DESC : MSA_I8_SHF_DESC_BASE<"shf.b", MSA128BOpnd>;
+class SHF_H_DESC : MSA_I8_SHF_DESC_BASE<"shf.h", MSA128HOpnd>;
+class SHF_W_DESC : MSA_I8_SHF_DESC_BASE<"shf.w", MSA128WOpnd>;
+
+class SLD_B_DESC : MSA_3R_SLD_DESC_BASE<"sld.b", int_mips_sld_b, MSA128BOpnd>;
+class SLD_H_DESC : MSA_3R_SLD_DESC_BASE<"sld.h", int_mips_sld_h, MSA128HOpnd>;
+class SLD_W_DESC : MSA_3R_SLD_DESC_BASE<"sld.w", int_mips_sld_w, MSA128WOpnd>;
+class SLD_D_DESC : MSA_3R_SLD_DESC_BASE<"sld.d", int_mips_sld_d, MSA128DOpnd>;
+
+class SLDI_B_DESC : MSA_ELM_DESC_BASE<"sldi.b", int_mips_sldi_b, MSA128BOpnd>;
+class SLDI_H_DESC : MSA_ELM_DESC_BASE<"sldi.h", int_mips_sldi_h, MSA128HOpnd>;
+class SLDI_W_DESC : MSA_ELM_DESC_BASE<"sldi.w", int_mips_sldi_w, MSA128WOpnd>;
+class SLDI_D_DESC : MSA_ELM_DESC_BASE<"sldi.d", int_mips_sldi_d, MSA128DOpnd>;
+
+class SLL_B_DESC : MSA_3R_DESC_BASE<"sll.b", shl, MSA128BOpnd>;
+class SLL_H_DESC : MSA_3R_DESC_BASE<"sll.h", shl, MSA128HOpnd>;
+class SLL_W_DESC : MSA_3R_DESC_BASE<"sll.w", shl, MSA128WOpnd>;
+class SLL_D_DESC : MSA_3R_DESC_BASE<"sll.d", shl, MSA128DOpnd>;
+
+class SLLI_B_DESC : MSA_BIT_SPLAT_DESC_BASE<"slli.b", shl, vsplati8_uimm3,
+ MSA128BOpnd>;
+class SLLI_H_DESC : MSA_BIT_SPLAT_DESC_BASE<"slli.h", shl, vsplati16_uimm4,
+ MSA128HOpnd>;
+class SLLI_W_DESC : MSA_BIT_SPLAT_DESC_BASE<"slli.w", shl, vsplati32_uimm5,
+ MSA128WOpnd>;
+class SLLI_D_DESC : MSA_BIT_SPLAT_DESC_BASE<"slli.d", shl, vsplati64_uimm6,
+ MSA128DOpnd>;
+
+class SPLAT_B_DESC : MSA_3R_SPLAT_DESC_BASE<"splat.b", vsplati8_elt,
+ MSA128BOpnd>;
+class SPLAT_H_DESC : MSA_3R_SPLAT_DESC_BASE<"splat.h", vsplati16_elt,
+ MSA128HOpnd>;
+class SPLAT_W_DESC : MSA_3R_SPLAT_DESC_BASE<"splat.w", vsplati32_elt,
+ MSA128WOpnd>;
+class SPLAT_D_DESC : MSA_3R_SPLAT_DESC_BASE<"splat.d", vsplati64_elt,
+ MSA128DOpnd>;
+
+class SPLATI_B_DESC : MSA_ELM_SPLAT_DESC_BASE<"splati.b", vsplati8_uimm4,
+ MSA128BOpnd>;
+class SPLATI_H_DESC : MSA_ELM_SPLAT_DESC_BASE<"splati.h", vsplati16_uimm3,
+ MSA128HOpnd>;
+class SPLATI_W_DESC : MSA_ELM_SPLAT_DESC_BASE<"splati.w", vsplati32_uimm2,
+ MSA128WOpnd>;
+class SPLATI_D_DESC : MSA_ELM_SPLAT_DESC_BASE<"splati.d", vsplati64_uimm1,
+ MSA128DOpnd>;
+
+class SRA_B_DESC : MSA_3R_DESC_BASE<"sra.b", sra, MSA128BOpnd>;
+class SRA_H_DESC : MSA_3R_DESC_BASE<"sra.h", sra, MSA128HOpnd>;
+class SRA_W_DESC : MSA_3R_DESC_BASE<"sra.w", sra, MSA128WOpnd>;
+class SRA_D_DESC : MSA_3R_DESC_BASE<"sra.d", sra, MSA128DOpnd>;
+
+class SRAI_B_DESC : MSA_BIT_SPLAT_DESC_BASE<"srai.b", sra, vsplati8_uimm3,
+ MSA128BOpnd>;
+class SRAI_H_DESC : MSA_BIT_SPLAT_DESC_BASE<"srai.h", sra, vsplati16_uimm4,
+ MSA128HOpnd>;
+class SRAI_W_DESC : MSA_BIT_SPLAT_DESC_BASE<"srai.w", sra, vsplati32_uimm5,
+ MSA128WOpnd>;
+class SRAI_D_DESC : MSA_BIT_SPLAT_DESC_BASE<"srai.d", sra, vsplati64_uimm6,
+ MSA128DOpnd>;
+
+class SRAR_B_DESC : MSA_3R_DESC_BASE<"srar.b", int_mips_srar_b, MSA128BOpnd>;
+class SRAR_H_DESC : MSA_3R_DESC_BASE<"srar.h", int_mips_srar_h, MSA128HOpnd>;
+class SRAR_W_DESC : MSA_3R_DESC_BASE<"srar.w", int_mips_srar_w, MSA128WOpnd>;
+class SRAR_D_DESC : MSA_3R_DESC_BASE<"srar.d", int_mips_srar_d, MSA128DOpnd>;
+
+class SRARI_B_DESC : MSA_BIT_B_X_DESC_BASE<"srari.b", int_mips_srari_b,
+ MSA128BOpnd>;
+class SRARI_H_DESC : MSA_BIT_H_X_DESC_BASE<"srari.h", int_mips_srari_h,
+ MSA128HOpnd>;
+class SRARI_W_DESC : MSA_BIT_W_X_DESC_BASE<"srari.w", int_mips_srari_w,
+ MSA128WOpnd>;
+class SRARI_D_DESC : MSA_BIT_D_X_DESC_BASE<"srari.d", int_mips_srari_d,
+ MSA128DOpnd>;
+
+class SRL_B_DESC : MSA_3R_DESC_BASE<"srl.b", srl, MSA128BOpnd>;
+class SRL_H_DESC : MSA_3R_DESC_BASE<"srl.h", srl, MSA128HOpnd>;
+class SRL_W_DESC : MSA_3R_DESC_BASE<"srl.w", srl, MSA128WOpnd>;
+class SRL_D_DESC : MSA_3R_DESC_BASE<"srl.d", srl, MSA128DOpnd>;
+
+class SRLI_B_DESC : MSA_BIT_SPLAT_DESC_BASE<"srli.b", srl, vsplati8_uimm3,
+ MSA128BOpnd>;
+class SRLI_H_DESC : MSA_BIT_SPLAT_DESC_BASE<"srli.h", srl, vsplati16_uimm4,
+ MSA128HOpnd>;
+class SRLI_W_DESC : MSA_BIT_SPLAT_DESC_BASE<"srli.w", srl, vsplati32_uimm5,
+ MSA128WOpnd>;
+class SRLI_D_DESC : MSA_BIT_SPLAT_DESC_BASE<"srli.d", srl, vsplati64_uimm6,
+ MSA128DOpnd>;
+
+class SRLR_B_DESC : MSA_3R_DESC_BASE<"srlr.b", int_mips_srlr_b, MSA128BOpnd>;
+class SRLR_H_DESC : MSA_3R_DESC_BASE<"srlr.h", int_mips_srlr_h, MSA128HOpnd>;
+class SRLR_W_DESC : MSA_3R_DESC_BASE<"srlr.w", int_mips_srlr_w, MSA128WOpnd>;
+class SRLR_D_DESC : MSA_3R_DESC_BASE<"srlr.d", int_mips_srlr_d, MSA128DOpnd>;
+
+class SRLRI_B_DESC : MSA_BIT_B_X_DESC_BASE<"srlri.b", int_mips_srlri_b,
+ MSA128BOpnd>;
+class SRLRI_H_DESC : MSA_BIT_H_X_DESC_BASE<"srlri.h", int_mips_srlri_h,
+ MSA128HOpnd>;
+class SRLRI_W_DESC : MSA_BIT_W_X_DESC_BASE<"srlri.w", int_mips_srlri_w,
+ MSA128WOpnd>;
+class SRLRI_D_DESC : MSA_BIT_D_X_DESC_BASE<"srlri.d", int_mips_srlri_d,
+ MSA128DOpnd>;
+
+class ST_DESC_BASE<string instr_asm, SDPatternOperator OpNode,
+ ValueType TyNode, RegisterOperand ROWD,
+ Operand MemOpnd = mem, ComplexPattern Addr = addrRegImm,
+ InstrItinClass itin = NoItinerary> {
+ dag OutOperandList = (outs);
+ dag InOperandList = (ins ROWD:$wd, MemOpnd:$addr);
+ string AsmString = !strconcat(instr_asm, "\t$wd, $addr");
+ list<dag> Pattern = [(OpNode (TyNode ROWD:$wd), Addr:$addr)];
+ InstrItinClass Itinerary = itin;
+ string DecoderMethod = "DecodeMSA128Mem";
+}
+
+class ST_B_DESC : ST_DESC_BASE<"st.b", store, v16i8, MSA128BOpnd>;
+class ST_H_DESC : ST_DESC_BASE<"st.h", store, v8i16, MSA128HOpnd>;
+class ST_W_DESC : ST_DESC_BASE<"st.w", store, v4i32, MSA128WOpnd>;
+class ST_D_DESC : ST_DESC_BASE<"st.d", store, v2i64, MSA128DOpnd>;
+
+class SUBS_S_B_DESC : MSA_3R_DESC_BASE<"subs_s.b", int_mips_subs_s_b,
+ MSA128BOpnd>;
+class SUBS_S_H_DESC : MSA_3R_DESC_BASE<"subs_s.h", int_mips_subs_s_h,
+ MSA128HOpnd>;
+class SUBS_S_W_DESC : MSA_3R_DESC_BASE<"subs_s.w", int_mips_subs_s_w,
+ MSA128WOpnd>;
+class SUBS_S_D_DESC : MSA_3R_DESC_BASE<"subs_s.d", int_mips_subs_s_d,
+ MSA128DOpnd>;
+
+class SUBS_U_B_DESC : MSA_3R_DESC_BASE<"subs_u.b", int_mips_subs_u_b,
+ MSA128BOpnd>;
+class SUBS_U_H_DESC : MSA_3R_DESC_BASE<"subs_u.h", int_mips_subs_u_h,
+ MSA128HOpnd>;
+class SUBS_U_W_DESC : MSA_3R_DESC_BASE<"subs_u.w", int_mips_subs_u_w,
+ MSA128WOpnd>;
+class SUBS_U_D_DESC : MSA_3R_DESC_BASE<"subs_u.d", int_mips_subs_u_d,
+ MSA128DOpnd>;
+
+class SUBSUS_U_B_DESC : MSA_3R_DESC_BASE<"subsus_u.b", int_mips_subsus_u_b,
+ MSA128BOpnd>;
+class SUBSUS_U_H_DESC : MSA_3R_DESC_BASE<"subsus_u.h", int_mips_subsus_u_h,
+ MSA128HOpnd>;
+class SUBSUS_U_W_DESC : MSA_3R_DESC_BASE<"subsus_u.w", int_mips_subsus_u_w,
+ MSA128WOpnd>;
+class SUBSUS_U_D_DESC : MSA_3R_DESC_BASE<"subsus_u.d", int_mips_subsus_u_d,
+ MSA128DOpnd>;
+
+class SUBSUU_S_B_DESC : MSA_3R_DESC_BASE<"subsuu_s.b", int_mips_subsuu_s_b,
+ MSA128BOpnd>;
+class SUBSUU_S_H_DESC : MSA_3R_DESC_BASE<"subsuu_s.h", int_mips_subsuu_s_h,
+ MSA128HOpnd>;
+class SUBSUU_S_W_DESC : MSA_3R_DESC_BASE<"subsuu_s.w", int_mips_subsuu_s_w,
+ MSA128WOpnd>;
+class SUBSUU_S_D_DESC : MSA_3R_DESC_BASE<"subsuu_s.d", int_mips_subsuu_s_d,
+ MSA128DOpnd>;
+
+class SUBV_B_DESC : MSA_3R_DESC_BASE<"subv.b", sub, MSA128BOpnd>;
+class SUBV_H_DESC : MSA_3R_DESC_BASE<"subv.h", sub, MSA128HOpnd>;
+class SUBV_W_DESC : MSA_3R_DESC_BASE<"subv.w", sub, MSA128WOpnd>;
+class SUBV_D_DESC : MSA_3R_DESC_BASE<"subv.d", sub, MSA128DOpnd>;
+
+class SUBVI_B_DESC : MSA_I5_DESC_BASE<"subvi.b", sub, vsplati8_uimm5,
+ MSA128BOpnd>;
+class SUBVI_H_DESC : MSA_I5_DESC_BASE<"subvi.h", sub, vsplati16_uimm5,
+ MSA128HOpnd>;
+class SUBVI_W_DESC : MSA_I5_DESC_BASE<"subvi.w", sub, vsplati32_uimm5,
+ MSA128WOpnd>;
+class SUBVI_D_DESC : MSA_I5_DESC_BASE<"subvi.d", sub, vsplati64_uimm5,
+ MSA128DOpnd>;
+
+class VSHF_B_DESC : MSA_3R_VSHF_DESC_BASE<"vshf.b", MSA128BOpnd>;
+class VSHF_H_DESC : MSA_3R_VSHF_DESC_BASE<"vshf.h", MSA128HOpnd>;
+class VSHF_W_DESC : MSA_3R_VSHF_DESC_BASE<"vshf.w", MSA128WOpnd>;
+class VSHF_D_DESC : MSA_3R_VSHF_DESC_BASE<"vshf.d", MSA128DOpnd>;
+
+class XOR_V_DESC : MSA_VEC_DESC_BASE<"xor.v", xor, MSA128BOpnd>;
+class XOR_V_H_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<xor, MSA128HOpnd>;
+class XOR_V_W_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<xor, MSA128WOpnd>;
+class XOR_V_D_PSEUDO_DESC : MSA_VEC_PSEUDO_BASE<xor, MSA128DOpnd>;
+
+class XORI_B_DESC : MSA_I8_DESC_BASE<"xori.b", xor, vsplati8_uimm8,
+ MSA128BOpnd>;
+
+// Instruction defs.
+def ADD_A_B : ADD_A_B_ENC, ADD_A_B_DESC;
+def ADD_A_H : ADD_A_H_ENC, ADD_A_H_DESC;
+def ADD_A_W : ADD_A_W_ENC, ADD_A_W_DESC;
+def ADD_A_D : ADD_A_D_ENC, ADD_A_D_DESC;
+
+def ADDS_A_B : ADDS_A_B_ENC, ADDS_A_B_DESC;
+def ADDS_A_H : ADDS_A_H_ENC, ADDS_A_H_DESC;
+def ADDS_A_W : ADDS_A_W_ENC, ADDS_A_W_DESC;
+def ADDS_A_D : ADDS_A_D_ENC, ADDS_A_D_DESC;
+
+def ADDS_S_B : ADDS_S_B_ENC, ADDS_S_B_DESC;
+def ADDS_S_H : ADDS_S_H_ENC, ADDS_S_H_DESC;
+def ADDS_S_W : ADDS_S_W_ENC, ADDS_S_W_DESC;
+def ADDS_S_D : ADDS_S_D_ENC, ADDS_S_D_DESC;
+
+def ADDS_U_B : ADDS_U_B_ENC, ADDS_U_B_DESC;
+def ADDS_U_H : ADDS_U_H_ENC, ADDS_U_H_DESC;
+def ADDS_U_W : ADDS_U_W_ENC, ADDS_U_W_DESC;
+def ADDS_U_D : ADDS_U_D_ENC, ADDS_U_D_DESC;
+
+def ADDV_B : ADDV_B_ENC, ADDV_B_DESC;
+def ADDV_H : ADDV_H_ENC, ADDV_H_DESC;
+def ADDV_W : ADDV_W_ENC, ADDV_W_DESC;
+def ADDV_D : ADDV_D_ENC, ADDV_D_DESC;
+
+def ADDVI_B : ADDVI_B_ENC, ADDVI_B_DESC;
+def ADDVI_H : ADDVI_H_ENC, ADDVI_H_DESC;
+def ADDVI_W : ADDVI_W_ENC, ADDVI_W_DESC;
+def ADDVI_D : ADDVI_D_ENC, ADDVI_D_DESC;
+
+def AND_V : AND_V_ENC, AND_V_DESC;
+def AND_V_H_PSEUDO : AND_V_H_PSEUDO_DESC,
+ PseudoInstExpansion<(AND_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+def AND_V_W_PSEUDO : AND_V_W_PSEUDO_DESC,
+ PseudoInstExpansion<(AND_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+def AND_V_D_PSEUDO : AND_V_D_PSEUDO_DESC,
+ PseudoInstExpansion<(AND_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+
+def ANDI_B : ANDI_B_ENC, ANDI_B_DESC;
+
+def ASUB_S_B : ASUB_S_B_ENC, ASUB_S_B_DESC;
+def ASUB_S_H : ASUB_S_H_ENC, ASUB_S_H_DESC;
+def ASUB_S_W : ASUB_S_W_ENC, ASUB_S_W_DESC;
+def ASUB_S_D : ASUB_S_D_ENC, ASUB_S_D_DESC;
+
+def ASUB_U_B : ASUB_U_B_ENC, ASUB_U_B_DESC;
+def ASUB_U_H : ASUB_U_H_ENC, ASUB_U_H_DESC;
+def ASUB_U_W : ASUB_U_W_ENC, ASUB_U_W_DESC;
+def ASUB_U_D : ASUB_U_D_ENC, ASUB_U_D_DESC;
+
+def AVE_S_B : AVE_S_B_ENC, AVE_S_B_DESC;
+def AVE_S_H : AVE_S_H_ENC, AVE_S_H_DESC;
+def AVE_S_W : AVE_S_W_ENC, AVE_S_W_DESC;
+def AVE_S_D : AVE_S_D_ENC, AVE_S_D_DESC;
+
+def AVE_U_B : AVE_U_B_ENC, AVE_U_B_DESC;
+def AVE_U_H : AVE_U_H_ENC, AVE_U_H_DESC;
+def AVE_U_W : AVE_U_W_ENC, AVE_U_W_DESC;
+def AVE_U_D : AVE_U_D_ENC, AVE_U_D_DESC;
+
+def AVER_S_B : AVER_S_B_ENC, AVER_S_B_DESC;
+def AVER_S_H : AVER_S_H_ENC, AVER_S_H_DESC;
+def AVER_S_W : AVER_S_W_ENC, AVER_S_W_DESC;
+def AVER_S_D : AVER_S_D_ENC, AVER_S_D_DESC;
+
+def AVER_U_B : AVER_U_B_ENC, AVER_U_B_DESC;
+def AVER_U_H : AVER_U_H_ENC, AVER_U_H_DESC;
+def AVER_U_W : AVER_U_W_ENC, AVER_U_W_DESC;
+def AVER_U_D : AVER_U_D_ENC, AVER_U_D_DESC;
+
+def BCLR_B : BCLR_B_ENC, BCLR_B_DESC;
+def BCLR_H : BCLR_H_ENC, BCLR_H_DESC;
+def BCLR_W : BCLR_W_ENC, BCLR_W_DESC;
+def BCLR_D : BCLR_D_ENC, BCLR_D_DESC;
+
+def BCLRI_B : BCLRI_B_ENC, BCLRI_B_DESC;
+def BCLRI_H : BCLRI_H_ENC, BCLRI_H_DESC;
+def BCLRI_W : BCLRI_W_ENC, BCLRI_W_DESC;
+def BCLRI_D : BCLRI_D_ENC, BCLRI_D_DESC;
+
+def BINSL_B : BINSL_B_ENC, BINSL_B_DESC;
+def BINSL_H : BINSL_H_ENC, BINSL_H_DESC;
+def BINSL_W : BINSL_W_ENC, BINSL_W_DESC;
+def BINSL_D : BINSL_D_ENC, BINSL_D_DESC;
+
+def BINSLI_B : BINSLI_B_ENC, BINSLI_B_DESC;
+def BINSLI_H : BINSLI_H_ENC, BINSLI_H_DESC;
+def BINSLI_W : BINSLI_W_ENC, BINSLI_W_DESC;
+def BINSLI_D : BINSLI_D_ENC, BINSLI_D_DESC;
+
+def BINSR_B : BINSR_B_ENC, BINSR_B_DESC;
+def BINSR_H : BINSR_H_ENC, BINSR_H_DESC;
+def BINSR_W : BINSR_W_ENC, BINSR_W_DESC;
+def BINSR_D : BINSR_D_ENC, BINSR_D_DESC;
+
+def BINSRI_B : BINSRI_B_ENC, BINSRI_B_DESC;
+def BINSRI_H : BINSRI_H_ENC, BINSRI_H_DESC;
+def BINSRI_W : BINSRI_W_ENC, BINSRI_W_DESC;
+def BINSRI_D : BINSRI_D_ENC, BINSRI_D_DESC;
+
+def BMNZ_V : BMNZ_V_ENC, BMNZ_V_DESC;
+
+def BMNZI_B : BMNZI_B_ENC, BMNZI_B_DESC;
+
+def BMZ_V : BMZ_V_ENC, BMZ_V_DESC;
+
+def BMZI_B : BMZI_B_ENC, BMZI_B_DESC;
+
+def BNEG_B : BNEG_B_ENC, BNEG_B_DESC;
+def BNEG_H : BNEG_H_ENC, BNEG_H_DESC;
+def BNEG_W : BNEG_W_ENC, BNEG_W_DESC;
+def BNEG_D : BNEG_D_ENC, BNEG_D_DESC;
+
+def BNEGI_B : BNEGI_B_ENC, BNEGI_B_DESC;
+def BNEGI_H : BNEGI_H_ENC, BNEGI_H_DESC;
+def BNEGI_W : BNEGI_W_ENC, BNEGI_W_DESC;
+def BNEGI_D : BNEGI_D_ENC, BNEGI_D_DESC;
+
+def BNZ_B : BNZ_B_ENC, BNZ_B_DESC;
+def BNZ_H : BNZ_H_ENC, BNZ_H_DESC;
+def BNZ_W : BNZ_W_ENC, BNZ_W_DESC;
+def BNZ_D : BNZ_D_ENC, BNZ_D_DESC;
+
+def BNZ_V : BNZ_V_ENC, BNZ_V_DESC;
+
+def BSEL_V : BSEL_V_ENC, BSEL_V_DESC;
+
+class MSA_BSEL_PSEUDO_BASE<RegisterOperand RO, ValueType Ty> :
+ MipsPseudo<(outs RO:$wd), (ins RO:$wd_in, RO:$ws, RO:$wt),
+ [(set RO:$wd, (Ty (vselect RO:$wd_in, RO:$ws, RO:$wt)))]>,
+ PseudoInstExpansion<(BSEL_V MSA128BOpnd:$wd, MSA128BOpnd:$wd_in,
+ MSA128BOpnd:$ws, MSA128BOpnd:$wt)> {
+ let Constraints = "$wd_in = $wd";
+}
+
+def BSEL_H_PSEUDO : MSA_BSEL_PSEUDO_BASE<MSA128HOpnd, v8i16>;
+def BSEL_W_PSEUDO : MSA_BSEL_PSEUDO_BASE<MSA128WOpnd, v4i32>;
+def BSEL_D_PSEUDO : MSA_BSEL_PSEUDO_BASE<MSA128DOpnd, v2i64>;
+def BSEL_FW_PSEUDO : MSA_BSEL_PSEUDO_BASE<MSA128WOpnd, v4f32>;
+def BSEL_FD_PSEUDO : MSA_BSEL_PSEUDO_BASE<MSA128DOpnd, v2f64>;
+
+def BSELI_B : BSELI_B_ENC, BSELI_B_DESC;
+
+def BSET_B : BSET_B_ENC, BSET_B_DESC;
+def BSET_H : BSET_H_ENC, BSET_H_DESC;
+def BSET_W : BSET_W_ENC, BSET_W_DESC;
+def BSET_D : BSET_D_ENC, BSET_D_DESC;
+
+def BSETI_B : BSETI_B_ENC, BSETI_B_DESC;
+def BSETI_H : BSETI_H_ENC, BSETI_H_DESC;
+def BSETI_W : BSETI_W_ENC, BSETI_W_DESC;
+def BSETI_D : BSETI_D_ENC, BSETI_D_DESC;
+
+def BZ_B : BZ_B_ENC, BZ_B_DESC;
+def BZ_H : BZ_H_ENC, BZ_H_DESC;
+def BZ_W : BZ_W_ENC, BZ_W_DESC;
+def BZ_D : BZ_D_ENC, BZ_D_DESC;
+
+def BZ_V : BZ_V_ENC, BZ_V_DESC;
+
+def CEQ_B : CEQ_B_ENC, CEQ_B_DESC;
+def CEQ_H : CEQ_H_ENC, CEQ_H_DESC;
+def CEQ_W : CEQ_W_ENC, CEQ_W_DESC;
+def CEQ_D : CEQ_D_ENC, CEQ_D_DESC;
+
+def CEQI_B : CEQI_B_ENC, CEQI_B_DESC;
+def CEQI_H : CEQI_H_ENC, CEQI_H_DESC;
+def CEQI_W : CEQI_W_ENC, CEQI_W_DESC;
+def CEQI_D : CEQI_D_ENC, CEQI_D_DESC;
+
+def CFCMSA : CFCMSA_ENC, CFCMSA_DESC;
+
+def CLE_S_B : CLE_S_B_ENC, CLE_S_B_DESC;
+def CLE_S_H : CLE_S_H_ENC, CLE_S_H_DESC;
+def CLE_S_W : CLE_S_W_ENC, CLE_S_W_DESC;
+def CLE_S_D : CLE_S_D_ENC, CLE_S_D_DESC;
+
+def CLE_U_B : CLE_U_B_ENC, CLE_U_B_DESC;
+def CLE_U_H : CLE_U_H_ENC, CLE_U_H_DESC;
+def CLE_U_W : CLE_U_W_ENC, CLE_U_W_DESC;
+def CLE_U_D : CLE_U_D_ENC, CLE_U_D_DESC;
+
+def CLEI_S_B : CLEI_S_B_ENC, CLEI_S_B_DESC;
+def CLEI_S_H : CLEI_S_H_ENC, CLEI_S_H_DESC;
+def CLEI_S_W : CLEI_S_W_ENC, CLEI_S_W_DESC;
+def CLEI_S_D : CLEI_S_D_ENC, CLEI_S_D_DESC;
+
+def CLEI_U_B : CLEI_U_B_ENC, CLEI_U_B_DESC;
+def CLEI_U_H : CLEI_U_H_ENC, CLEI_U_H_DESC;
+def CLEI_U_W : CLEI_U_W_ENC, CLEI_U_W_DESC;
+def CLEI_U_D : CLEI_U_D_ENC, CLEI_U_D_DESC;
+
+def CLT_S_B : CLT_S_B_ENC, CLT_S_B_DESC;
+def CLT_S_H : CLT_S_H_ENC, CLT_S_H_DESC;
+def CLT_S_W : CLT_S_W_ENC, CLT_S_W_DESC;
+def CLT_S_D : CLT_S_D_ENC, CLT_S_D_DESC;
+
+def CLT_U_B : CLT_U_B_ENC, CLT_U_B_DESC;
+def CLT_U_H : CLT_U_H_ENC, CLT_U_H_DESC;
+def CLT_U_W : CLT_U_W_ENC, CLT_U_W_DESC;
+def CLT_U_D : CLT_U_D_ENC, CLT_U_D_DESC;
+
+def CLTI_S_B : CLTI_S_B_ENC, CLTI_S_B_DESC;
+def CLTI_S_H : CLTI_S_H_ENC, CLTI_S_H_DESC;
+def CLTI_S_W : CLTI_S_W_ENC, CLTI_S_W_DESC;
+def CLTI_S_D : CLTI_S_D_ENC, CLTI_S_D_DESC;
+
+def CLTI_U_B : CLTI_U_B_ENC, CLTI_U_B_DESC;
+def CLTI_U_H : CLTI_U_H_ENC, CLTI_U_H_DESC;
+def CLTI_U_W : CLTI_U_W_ENC, CLTI_U_W_DESC;
+def CLTI_U_D : CLTI_U_D_ENC, CLTI_U_D_DESC;
+
+def COPY_S_B : COPY_S_B_ENC, COPY_S_B_DESC;
+def COPY_S_H : COPY_S_H_ENC, COPY_S_H_DESC;
+def COPY_S_W : COPY_S_W_ENC, COPY_S_W_DESC;
+
+def COPY_U_B : COPY_U_B_ENC, COPY_U_B_DESC;
+def COPY_U_H : COPY_U_H_ENC, COPY_U_H_DESC;
+def COPY_U_W : COPY_U_W_ENC, COPY_U_W_DESC;
+
+def COPY_FW_PSEUDO : COPY_FW_PSEUDO_DESC;
+def COPY_FD_PSEUDO : COPY_FD_PSEUDO_DESC;
+
+def CTCMSA : CTCMSA_ENC, CTCMSA_DESC;
+
+def DIV_S_B : DIV_S_B_ENC, DIV_S_B_DESC;
+def DIV_S_H : DIV_S_H_ENC, DIV_S_H_DESC;
+def DIV_S_W : DIV_S_W_ENC, DIV_S_W_DESC;
+def DIV_S_D : DIV_S_D_ENC, DIV_S_D_DESC;
+
+def DIV_U_B : DIV_U_B_ENC, DIV_U_B_DESC;
+def DIV_U_H : DIV_U_H_ENC, DIV_U_H_DESC;
+def DIV_U_W : DIV_U_W_ENC, DIV_U_W_DESC;
+def DIV_U_D : DIV_U_D_ENC, DIV_U_D_DESC;
+
+def DOTP_S_H : DOTP_S_H_ENC, DOTP_S_H_DESC;
+def DOTP_S_W : DOTP_S_W_ENC, DOTP_S_W_DESC;
+def DOTP_S_D : DOTP_S_D_ENC, DOTP_S_D_DESC;
+
+def DOTP_U_H : DOTP_U_H_ENC, DOTP_U_H_DESC;
+def DOTP_U_W : DOTP_U_W_ENC, DOTP_U_W_DESC;
+def DOTP_U_D : DOTP_U_D_ENC, DOTP_U_D_DESC;
+
+def DPADD_S_H : DPADD_S_H_ENC, DPADD_S_H_DESC;
+def DPADD_S_W : DPADD_S_W_ENC, DPADD_S_W_DESC;
+def DPADD_S_D : DPADD_S_D_ENC, DPADD_S_D_DESC;
+
+def DPADD_U_H : DPADD_U_H_ENC, DPADD_U_H_DESC;
+def DPADD_U_W : DPADD_U_W_ENC, DPADD_U_W_DESC;
+def DPADD_U_D : DPADD_U_D_ENC, DPADD_U_D_DESC;
+
+def DPSUB_S_H : DPSUB_S_H_ENC, DPSUB_S_H_DESC;
+def DPSUB_S_W : DPSUB_S_W_ENC, DPSUB_S_W_DESC;
+def DPSUB_S_D : DPSUB_S_D_ENC, DPSUB_S_D_DESC;
+
+def DPSUB_U_H : DPSUB_U_H_ENC, DPSUB_U_H_DESC;
+def DPSUB_U_W : DPSUB_U_W_ENC, DPSUB_U_W_DESC;
+def DPSUB_U_D : DPSUB_U_D_ENC, DPSUB_U_D_DESC;
+
+def FADD_W : FADD_W_ENC, FADD_W_DESC;
+def FADD_D : FADD_D_ENC, FADD_D_DESC;
+
+def FCAF_W : FCAF_W_ENC, FCAF_W_DESC;
+def FCAF_D : FCAF_D_ENC, FCAF_D_DESC;
+
+def FCEQ_W : FCEQ_W_ENC, FCEQ_W_DESC;
+def FCEQ_D : FCEQ_D_ENC, FCEQ_D_DESC;
+
+def FCLE_W : FCLE_W_ENC, FCLE_W_DESC;
+def FCLE_D : FCLE_D_ENC, FCLE_D_DESC;
+
+def FCLT_W : FCLT_W_ENC, FCLT_W_DESC;
+def FCLT_D : FCLT_D_ENC, FCLT_D_DESC;
+
+def FCLASS_W : FCLASS_W_ENC, FCLASS_W_DESC;
+def FCLASS_D : FCLASS_D_ENC, FCLASS_D_DESC;
+
+def FCNE_W : FCNE_W_ENC, FCNE_W_DESC;
+def FCNE_D : FCNE_D_ENC, FCNE_D_DESC;
+
+def FCOR_W : FCOR_W_ENC, FCOR_W_DESC;
+def FCOR_D : FCOR_D_ENC, FCOR_D_DESC;
+
+def FCUEQ_W : FCUEQ_W_ENC, FCUEQ_W_DESC;
+def FCUEQ_D : FCUEQ_D_ENC, FCUEQ_D_DESC;
+
+def FCULE_W : FCULE_W_ENC, FCULE_W_DESC;
+def FCULE_D : FCULE_D_ENC, FCULE_D_DESC;
+
+def FCULT_W : FCULT_W_ENC, FCULT_W_DESC;
+def FCULT_D : FCULT_D_ENC, FCULT_D_DESC;
+
+def FCUN_W : FCUN_W_ENC, FCUN_W_DESC;
+def FCUN_D : FCUN_D_ENC, FCUN_D_DESC;
+
+def FCUNE_W : FCUNE_W_ENC, FCUNE_W_DESC;
+def FCUNE_D : FCUNE_D_ENC, FCUNE_D_DESC;
+
+def FDIV_W : FDIV_W_ENC, FDIV_W_DESC;
+def FDIV_D : FDIV_D_ENC, FDIV_D_DESC;
+
+def FEXDO_H : FEXDO_H_ENC, FEXDO_H_DESC;
+def FEXDO_W : FEXDO_W_ENC, FEXDO_W_DESC;
+
+def FEXP2_W : FEXP2_W_ENC, FEXP2_W_DESC;
+def FEXP2_D : FEXP2_D_ENC, FEXP2_D_DESC;
+def FEXP2_W_1_PSEUDO : FEXP2_W_1_PSEUDO_DESC;
+def FEXP2_D_1_PSEUDO : FEXP2_D_1_PSEUDO_DESC;
+
+def FEXUPL_W : FEXUPL_W_ENC, FEXUPL_W_DESC;
+def FEXUPL_D : FEXUPL_D_ENC, FEXUPL_D_DESC;
+
+def FEXUPR_W : FEXUPR_W_ENC, FEXUPR_W_DESC;
+def FEXUPR_D : FEXUPR_D_ENC, FEXUPR_D_DESC;
+
+def FFINT_S_W : FFINT_S_W_ENC, FFINT_S_W_DESC;
+def FFINT_S_D : FFINT_S_D_ENC, FFINT_S_D_DESC;
+
+def FFINT_U_W : FFINT_U_W_ENC, FFINT_U_W_DESC;
+def FFINT_U_D : FFINT_U_D_ENC, FFINT_U_D_DESC;
+
+def FFQL_W : FFQL_W_ENC, FFQL_W_DESC;
+def FFQL_D : FFQL_D_ENC, FFQL_D_DESC;
+
+def FFQR_W : FFQR_W_ENC, FFQR_W_DESC;
+def FFQR_D : FFQR_D_ENC, FFQR_D_DESC;
+
+def FILL_B : FILL_B_ENC, FILL_B_DESC;
+def FILL_H : FILL_H_ENC, FILL_H_DESC;
+def FILL_W : FILL_W_ENC, FILL_W_DESC;
+def FILL_FW_PSEUDO : FILL_FW_PSEUDO_DESC;
+def FILL_FD_PSEUDO : FILL_FD_PSEUDO_DESC;
+
+def FLOG2_W : FLOG2_W_ENC, FLOG2_W_DESC;
+def FLOG2_D : FLOG2_D_ENC, FLOG2_D_DESC;
+
+def FMADD_W : FMADD_W_ENC, FMADD_W_DESC;
+def FMADD_D : FMADD_D_ENC, FMADD_D_DESC;
+
+def FMAX_W : FMAX_W_ENC, FMAX_W_DESC;
+def FMAX_D : FMAX_D_ENC, FMAX_D_DESC;
+
+def FMAX_A_W : FMAX_A_W_ENC, FMAX_A_W_DESC;
+def FMAX_A_D : FMAX_A_D_ENC, FMAX_A_D_DESC;
+
+def FMIN_W : FMIN_W_ENC, FMIN_W_DESC;
+def FMIN_D : FMIN_D_ENC, FMIN_D_DESC;
+
+def FMIN_A_W : FMIN_A_W_ENC, FMIN_A_W_DESC;
+def FMIN_A_D : FMIN_A_D_ENC, FMIN_A_D_DESC;
+
+def FMSUB_W : FMSUB_W_ENC, FMSUB_W_DESC;
+def FMSUB_D : FMSUB_D_ENC, FMSUB_D_DESC;
+
+def FMUL_W : FMUL_W_ENC, FMUL_W_DESC;
+def FMUL_D : FMUL_D_ENC, FMUL_D_DESC;
+
+def FRINT_W : FRINT_W_ENC, FRINT_W_DESC;
+def FRINT_D : FRINT_D_ENC, FRINT_D_DESC;
+
+def FRCP_W : FRCP_W_ENC, FRCP_W_DESC;
+def FRCP_D : FRCP_D_ENC, FRCP_D_DESC;
+
+def FRSQRT_W : FRSQRT_W_ENC, FRSQRT_W_DESC;
+def FRSQRT_D : FRSQRT_D_ENC, FRSQRT_D_DESC;
+
+def FSAF_W : FSAF_W_ENC, FSAF_W_DESC;
+def FSAF_D : FSAF_D_ENC, FSAF_D_DESC;
+
+def FSEQ_W : FSEQ_W_ENC, FSEQ_W_DESC;
+def FSEQ_D : FSEQ_D_ENC, FSEQ_D_DESC;
+
+def FSLE_W : FSLE_W_ENC, FSLE_W_DESC;
+def FSLE_D : FSLE_D_ENC, FSLE_D_DESC;
+
+def FSLT_W : FSLT_W_ENC, FSLT_W_DESC;
+def FSLT_D : FSLT_D_ENC, FSLT_D_DESC;
+
+def FSNE_W : FSNE_W_ENC, FSNE_W_DESC;
+def FSNE_D : FSNE_D_ENC, FSNE_D_DESC;
+
+def FSOR_W : FSOR_W_ENC, FSOR_W_DESC;
+def FSOR_D : FSOR_D_ENC, FSOR_D_DESC;
+
+def FSQRT_W : FSQRT_W_ENC, FSQRT_W_DESC;
+def FSQRT_D : FSQRT_D_ENC, FSQRT_D_DESC;
+
+def FSUB_W : FSUB_W_ENC, FSUB_W_DESC;
+def FSUB_D : FSUB_D_ENC, FSUB_D_DESC;
+
+def FSUEQ_W : FSUEQ_W_ENC, FSUEQ_W_DESC;
+def FSUEQ_D : FSUEQ_D_ENC, FSUEQ_D_DESC;
+
+def FSULE_W : FSULE_W_ENC, FSULE_W_DESC;
+def FSULE_D : FSULE_D_ENC, FSULE_D_DESC;
+
+def FSULT_W : FSULT_W_ENC, FSULT_W_DESC;
+def FSULT_D : FSULT_D_ENC, FSULT_D_DESC;
+
+def FSUN_W : FSUN_W_ENC, FSUN_W_DESC;
+def FSUN_D : FSUN_D_ENC, FSUN_D_DESC;
+
+def FSUNE_W : FSUNE_W_ENC, FSUNE_W_DESC;
+def FSUNE_D : FSUNE_D_ENC, FSUNE_D_DESC;
+
+def FTINT_S_W : FTINT_S_W_ENC, FTINT_S_W_DESC;
+def FTINT_S_D : FTINT_S_D_ENC, FTINT_S_D_DESC;
+
+def FTINT_U_W : FTINT_U_W_ENC, FTINT_U_W_DESC;
+def FTINT_U_D : FTINT_U_D_ENC, FTINT_U_D_DESC;
+
+def FTQ_H : FTQ_H_ENC, FTQ_H_DESC;
+def FTQ_W : FTQ_W_ENC, FTQ_W_DESC;
+
+def FTRUNC_S_W : FTRUNC_S_W_ENC, FTRUNC_S_W_DESC;
+def FTRUNC_S_D : FTRUNC_S_D_ENC, FTRUNC_S_D_DESC;
+
+def FTRUNC_U_W : FTRUNC_U_W_ENC, FTRUNC_U_W_DESC;
+def FTRUNC_U_D : FTRUNC_U_D_ENC, FTRUNC_U_D_DESC;
+
+def HADD_S_H : HADD_S_H_ENC, HADD_S_H_DESC;
+def HADD_S_W : HADD_S_W_ENC, HADD_S_W_DESC;
+def HADD_S_D : HADD_S_D_ENC, HADD_S_D_DESC;
+
+def HADD_U_H : HADD_U_H_ENC, HADD_U_H_DESC;
+def HADD_U_W : HADD_U_W_ENC, HADD_U_W_DESC;
+def HADD_U_D : HADD_U_D_ENC, HADD_U_D_DESC;
+
+def HSUB_S_H : HSUB_S_H_ENC, HSUB_S_H_DESC;
+def HSUB_S_W : HSUB_S_W_ENC, HSUB_S_W_DESC;
+def HSUB_S_D : HSUB_S_D_ENC, HSUB_S_D_DESC;
+
+def HSUB_U_H : HSUB_U_H_ENC, HSUB_U_H_DESC;
+def HSUB_U_W : HSUB_U_W_ENC, HSUB_U_W_DESC;
+def HSUB_U_D : HSUB_U_D_ENC, HSUB_U_D_DESC;
+
+def ILVEV_B : ILVEV_B_ENC, ILVEV_B_DESC;
+def ILVEV_H : ILVEV_H_ENC, ILVEV_H_DESC;
+def ILVEV_W : ILVEV_W_ENC, ILVEV_W_DESC;
+def ILVEV_D : ILVEV_D_ENC, ILVEV_D_DESC;
+
+def ILVL_B : ILVL_B_ENC, ILVL_B_DESC;
+def ILVL_H : ILVL_H_ENC, ILVL_H_DESC;
+def ILVL_W : ILVL_W_ENC, ILVL_W_DESC;
+def ILVL_D : ILVL_D_ENC, ILVL_D_DESC;
+
+def ILVOD_B : ILVOD_B_ENC, ILVOD_B_DESC;
+def ILVOD_H : ILVOD_H_ENC, ILVOD_H_DESC;
+def ILVOD_W : ILVOD_W_ENC, ILVOD_W_DESC;
+def ILVOD_D : ILVOD_D_ENC, ILVOD_D_DESC;
+
+def ILVR_B : ILVR_B_ENC, ILVR_B_DESC;
+def ILVR_H : ILVR_H_ENC, ILVR_H_DESC;
+def ILVR_W : ILVR_W_ENC, ILVR_W_DESC;
+def ILVR_D : ILVR_D_ENC, ILVR_D_DESC;
+
+def INSERT_B : INSERT_B_ENC, INSERT_B_DESC;
+def INSERT_H : INSERT_H_ENC, INSERT_H_DESC;
+def INSERT_W : INSERT_W_ENC, INSERT_W_DESC;
+
+// INSERT_FW_PSEUDO defined after INSVE_W
+// INSERT_FD_PSEUDO defined after INSVE_D
+
+def INSVE_B : INSVE_B_ENC, INSVE_B_DESC;
+def INSVE_H : INSVE_H_ENC, INSVE_H_DESC;
+def INSVE_W : INSVE_W_ENC, INSVE_W_DESC;
+def INSVE_D : INSVE_D_ENC, INSVE_D_DESC;
+
+def INSERT_FW_PSEUDO : INSERT_FW_PSEUDO_DESC;
+def INSERT_FD_PSEUDO : INSERT_FD_PSEUDO_DESC;
+
+def LD_B: LD_B_ENC, LD_B_DESC;
+def LD_H: LD_H_ENC, LD_H_DESC;
+def LD_W: LD_W_ENC, LD_W_DESC;
+def LD_D: LD_D_ENC, LD_D_DESC;
+
+def LDI_B : LDI_B_ENC, LDI_B_DESC;
+def LDI_H : LDI_H_ENC, LDI_H_DESC;
+def LDI_W : LDI_W_ENC, LDI_W_DESC;
+def LDI_D : LDI_D_ENC, LDI_D_DESC;
+
+def LSA : LSA_ENC, LSA_DESC;
+
+def MADD_Q_H : MADD_Q_H_ENC, MADD_Q_H_DESC;
+def MADD_Q_W : MADD_Q_W_ENC, MADD_Q_W_DESC;
+
+def MADDR_Q_H : MADDR_Q_H_ENC, MADDR_Q_H_DESC;
+def MADDR_Q_W : MADDR_Q_W_ENC, MADDR_Q_W_DESC;
+
+def MADDV_B : MADDV_B_ENC, MADDV_B_DESC;
+def MADDV_H : MADDV_H_ENC, MADDV_H_DESC;
+def MADDV_W : MADDV_W_ENC, MADDV_W_DESC;
+def MADDV_D : MADDV_D_ENC, MADDV_D_DESC;
+
+def MAX_A_B : MAX_A_B_ENC, MAX_A_B_DESC;
+def MAX_A_H : MAX_A_H_ENC, MAX_A_H_DESC;
+def MAX_A_W : MAX_A_W_ENC, MAX_A_W_DESC;
+def MAX_A_D : MAX_A_D_ENC, MAX_A_D_DESC;
+
+def MAX_S_B : MAX_S_B_ENC, MAX_S_B_DESC;
+def MAX_S_H : MAX_S_H_ENC, MAX_S_H_DESC;
+def MAX_S_W : MAX_S_W_ENC, MAX_S_W_DESC;
+def MAX_S_D : MAX_S_D_ENC, MAX_S_D_DESC;
+
+def MAX_U_B : MAX_U_B_ENC, MAX_U_B_DESC;
+def MAX_U_H : MAX_U_H_ENC, MAX_U_H_DESC;
+def MAX_U_W : MAX_U_W_ENC, MAX_U_W_DESC;
+def MAX_U_D : MAX_U_D_ENC, MAX_U_D_DESC;
+
+def MAXI_S_B : MAXI_S_B_ENC, MAXI_S_B_DESC;
+def MAXI_S_H : MAXI_S_H_ENC, MAXI_S_H_DESC;
+def MAXI_S_W : MAXI_S_W_ENC, MAXI_S_W_DESC;
+def MAXI_S_D : MAXI_S_D_ENC, MAXI_S_D_DESC;
+
+def MAXI_U_B : MAXI_U_B_ENC, MAXI_U_B_DESC;
+def MAXI_U_H : MAXI_U_H_ENC, MAXI_U_H_DESC;
+def MAXI_U_W : MAXI_U_W_ENC, MAXI_U_W_DESC;
+def MAXI_U_D : MAXI_U_D_ENC, MAXI_U_D_DESC;
+
+def MIN_A_B : MIN_A_B_ENC, MIN_A_B_DESC;
+def MIN_A_H : MIN_A_H_ENC, MIN_A_H_DESC;
+def MIN_A_W : MIN_A_W_ENC, MIN_A_W_DESC;
+def MIN_A_D : MIN_A_D_ENC, MIN_A_D_DESC;
+
+def MIN_S_B : MIN_S_B_ENC, MIN_S_B_DESC;
+def MIN_S_H : MIN_S_H_ENC, MIN_S_H_DESC;
+def MIN_S_W : MIN_S_W_ENC, MIN_S_W_DESC;
+def MIN_S_D : MIN_S_D_ENC, MIN_S_D_DESC;
+
+def MIN_U_B : MIN_U_B_ENC, MIN_U_B_DESC;
+def MIN_U_H : MIN_U_H_ENC, MIN_U_H_DESC;
+def MIN_U_W : MIN_U_W_ENC, MIN_U_W_DESC;
+def MIN_U_D : MIN_U_D_ENC, MIN_U_D_DESC;
+
+def MINI_S_B : MINI_S_B_ENC, MINI_S_B_DESC;
+def MINI_S_H : MINI_S_H_ENC, MINI_S_H_DESC;
+def MINI_S_W : MINI_S_W_ENC, MINI_S_W_DESC;
+def MINI_S_D : MINI_S_D_ENC, MINI_S_D_DESC;
+
+def MINI_U_B : MINI_U_B_ENC, MINI_U_B_DESC;
+def MINI_U_H : MINI_U_H_ENC, MINI_U_H_DESC;
+def MINI_U_W : MINI_U_W_ENC, MINI_U_W_DESC;
+def MINI_U_D : MINI_U_D_ENC, MINI_U_D_DESC;
+
+def MOD_S_B : MOD_S_B_ENC, MOD_S_B_DESC;
+def MOD_S_H : MOD_S_H_ENC, MOD_S_H_DESC;
+def MOD_S_W : MOD_S_W_ENC, MOD_S_W_DESC;
+def MOD_S_D : MOD_S_D_ENC, MOD_S_D_DESC;
+
+def MOD_U_B : MOD_U_B_ENC, MOD_U_B_DESC;
+def MOD_U_H : MOD_U_H_ENC, MOD_U_H_DESC;
+def MOD_U_W : MOD_U_W_ENC, MOD_U_W_DESC;
+def MOD_U_D : MOD_U_D_ENC, MOD_U_D_DESC;
+
+def MOVE_V : MOVE_V_ENC, MOVE_V_DESC;
+
+def MSUB_Q_H : MSUB_Q_H_ENC, MSUB_Q_H_DESC;
+def MSUB_Q_W : MSUB_Q_W_ENC, MSUB_Q_W_DESC;
+
+def MSUBR_Q_H : MSUBR_Q_H_ENC, MSUBR_Q_H_DESC;
+def MSUBR_Q_W : MSUBR_Q_W_ENC, MSUBR_Q_W_DESC;
+
+def MSUBV_B : MSUBV_B_ENC, MSUBV_B_DESC;
+def MSUBV_H : MSUBV_H_ENC, MSUBV_H_DESC;
+def MSUBV_W : MSUBV_W_ENC, MSUBV_W_DESC;
+def MSUBV_D : MSUBV_D_ENC, MSUBV_D_DESC;
+
+def MUL_Q_H : MUL_Q_H_ENC, MUL_Q_H_DESC;
+def MUL_Q_W : MUL_Q_W_ENC, MUL_Q_W_DESC;
+
+def MULR_Q_H : MULR_Q_H_ENC, MULR_Q_H_DESC;
+def MULR_Q_W : MULR_Q_W_ENC, MULR_Q_W_DESC;
+
+def MULV_B : MULV_B_ENC, MULV_B_DESC;
+def MULV_H : MULV_H_ENC, MULV_H_DESC;
+def MULV_W : MULV_W_ENC, MULV_W_DESC;
+def MULV_D : MULV_D_ENC, MULV_D_DESC;
+
+def NLOC_B : NLOC_B_ENC, NLOC_B_DESC;
+def NLOC_H : NLOC_H_ENC, NLOC_H_DESC;
+def NLOC_W : NLOC_W_ENC, NLOC_W_DESC;
+def NLOC_D : NLOC_D_ENC, NLOC_D_DESC;
+
+def NLZC_B : NLZC_B_ENC, NLZC_B_DESC;
+def NLZC_H : NLZC_H_ENC, NLZC_H_DESC;
+def NLZC_W : NLZC_W_ENC, NLZC_W_DESC;
+def NLZC_D : NLZC_D_ENC, NLZC_D_DESC;
+
+def NOR_V : NOR_V_ENC, NOR_V_DESC;
+def NOR_V_H_PSEUDO : NOR_V_H_PSEUDO_DESC,
+ PseudoInstExpansion<(NOR_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+def NOR_V_W_PSEUDO : NOR_V_W_PSEUDO_DESC,
+ PseudoInstExpansion<(NOR_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+def NOR_V_D_PSEUDO : NOR_V_D_PSEUDO_DESC,
+ PseudoInstExpansion<(NOR_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+
+def NORI_B : NORI_B_ENC, NORI_B_DESC;
+
+def OR_V : OR_V_ENC, OR_V_DESC;
+def OR_V_H_PSEUDO : OR_V_H_PSEUDO_DESC,
+ PseudoInstExpansion<(OR_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+def OR_V_W_PSEUDO : OR_V_W_PSEUDO_DESC,
+ PseudoInstExpansion<(OR_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+def OR_V_D_PSEUDO : OR_V_D_PSEUDO_DESC,
+ PseudoInstExpansion<(OR_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+
+def ORI_B : ORI_B_ENC, ORI_B_DESC;
+
+def PCKEV_B : PCKEV_B_ENC, PCKEV_B_DESC;
+def PCKEV_H : PCKEV_H_ENC, PCKEV_H_DESC;
+def PCKEV_W : PCKEV_W_ENC, PCKEV_W_DESC;
+def PCKEV_D : PCKEV_D_ENC, PCKEV_D_DESC;
+
+def PCKOD_B : PCKOD_B_ENC, PCKOD_B_DESC;
+def PCKOD_H : PCKOD_H_ENC, PCKOD_H_DESC;
+def PCKOD_W : PCKOD_W_ENC, PCKOD_W_DESC;
+def PCKOD_D : PCKOD_D_ENC, PCKOD_D_DESC;
+
+def PCNT_B : PCNT_B_ENC, PCNT_B_DESC;
+def PCNT_H : PCNT_H_ENC, PCNT_H_DESC;
+def PCNT_W : PCNT_W_ENC, PCNT_W_DESC;
+def PCNT_D : PCNT_D_ENC, PCNT_D_DESC;
+
+def SAT_S_B : SAT_S_B_ENC, SAT_S_B_DESC;
+def SAT_S_H : SAT_S_H_ENC, SAT_S_H_DESC;
+def SAT_S_W : SAT_S_W_ENC, SAT_S_W_DESC;
+def SAT_S_D : SAT_S_D_ENC, SAT_S_D_DESC;
+
+def SAT_U_B : SAT_U_B_ENC, SAT_U_B_DESC;
+def SAT_U_H : SAT_U_H_ENC, SAT_U_H_DESC;
+def SAT_U_W : SAT_U_W_ENC, SAT_U_W_DESC;
+def SAT_U_D : SAT_U_D_ENC, SAT_U_D_DESC;
+
+def SHF_B : SHF_B_ENC, SHF_B_DESC;
+def SHF_H : SHF_H_ENC, SHF_H_DESC;
+def SHF_W : SHF_W_ENC, SHF_W_DESC;
+
+def SLD_B : SLD_B_ENC, SLD_B_DESC;
+def SLD_H : SLD_H_ENC, SLD_H_DESC;
+def SLD_W : SLD_W_ENC, SLD_W_DESC;
+def SLD_D : SLD_D_ENC, SLD_D_DESC;
+
+def SLDI_B : SLDI_B_ENC, SLDI_B_DESC;
+def SLDI_H : SLDI_H_ENC, SLDI_H_DESC;
+def SLDI_W : SLDI_W_ENC, SLDI_W_DESC;
+def SLDI_D : SLDI_D_ENC, SLDI_D_DESC;
+
+def SLL_B : SLL_B_ENC, SLL_B_DESC;
+def SLL_H : SLL_H_ENC, SLL_H_DESC;
+def SLL_W : SLL_W_ENC, SLL_W_DESC;
+def SLL_D : SLL_D_ENC, SLL_D_DESC;
+
+def SLLI_B : SLLI_B_ENC, SLLI_B_DESC;
+def SLLI_H : SLLI_H_ENC, SLLI_H_DESC;
+def SLLI_W : SLLI_W_ENC, SLLI_W_DESC;
+def SLLI_D : SLLI_D_ENC, SLLI_D_DESC;
+
+def SPLAT_B : SPLAT_B_ENC, SPLAT_B_DESC;
+def SPLAT_H : SPLAT_H_ENC, SPLAT_H_DESC;
+def SPLAT_W : SPLAT_W_ENC, SPLAT_W_DESC;
+def SPLAT_D : SPLAT_D_ENC, SPLAT_D_DESC;
+
+def SPLATI_B : SPLATI_B_ENC, SPLATI_B_DESC;
+def SPLATI_H : SPLATI_H_ENC, SPLATI_H_DESC;
+def SPLATI_W : SPLATI_W_ENC, SPLATI_W_DESC;
+def SPLATI_D : SPLATI_D_ENC, SPLATI_D_DESC;
+
+def SRA_B : SRA_B_ENC, SRA_B_DESC;
+def SRA_H : SRA_H_ENC, SRA_H_DESC;
+def SRA_W : SRA_W_ENC, SRA_W_DESC;
+def SRA_D : SRA_D_ENC, SRA_D_DESC;
+
+def SRAI_B : SRAI_B_ENC, SRAI_B_DESC;
+def SRAI_H : SRAI_H_ENC, SRAI_H_DESC;
+def SRAI_W : SRAI_W_ENC, SRAI_W_DESC;
+def SRAI_D : SRAI_D_ENC, SRAI_D_DESC;
+
+def SRAR_B : SRAR_B_ENC, SRAR_B_DESC;
+def SRAR_H : SRAR_H_ENC, SRAR_H_DESC;
+def SRAR_W : SRAR_W_ENC, SRAR_W_DESC;
+def SRAR_D : SRAR_D_ENC, SRAR_D_DESC;
+
+def SRARI_B : SRARI_B_ENC, SRARI_B_DESC;
+def SRARI_H : SRARI_H_ENC, SRARI_H_DESC;
+def SRARI_W : SRARI_W_ENC, SRARI_W_DESC;
+def SRARI_D : SRARI_D_ENC, SRARI_D_DESC;
+
+def SRL_B : SRL_B_ENC, SRL_B_DESC;
+def SRL_H : SRL_H_ENC, SRL_H_DESC;
+def SRL_W : SRL_W_ENC, SRL_W_DESC;
+def SRL_D : SRL_D_ENC, SRL_D_DESC;
+
+def SRLI_B : SRLI_B_ENC, SRLI_B_DESC;
+def SRLI_H : SRLI_H_ENC, SRLI_H_DESC;
+def SRLI_W : SRLI_W_ENC, SRLI_W_DESC;
+def SRLI_D : SRLI_D_ENC, SRLI_D_DESC;
+
+def SRLR_B : SRLR_B_ENC, SRLR_B_DESC;
+def SRLR_H : SRLR_H_ENC, SRLR_H_DESC;
+def SRLR_W : SRLR_W_ENC, SRLR_W_DESC;
+def SRLR_D : SRLR_D_ENC, SRLR_D_DESC;
+
+def SRLRI_B : SRLRI_B_ENC, SRLRI_B_DESC;
+def SRLRI_H : SRLRI_H_ENC, SRLRI_H_DESC;
+def SRLRI_W : SRLRI_W_ENC, SRLRI_W_DESC;
+def SRLRI_D : SRLRI_D_ENC, SRLRI_D_DESC;
+
+def ST_B: ST_B_ENC, ST_B_DESC;
+def ST_H: ST_H_ENC, ST_H_DESC;
+def ST_W: ST_W_ENC, ST_W_DESC;
+def ST_D: ST_D_ENC, ST_D_DESC;
+
+def SUBS_S_B : SUBS_S_B_ENC, SUBS_S_B_DESC;
+def SUBS_S_H : SUBS_S_H_ENC, SUBS_S_H_DESC;
+def SUBS_S_W : SUBS_S_W_ENC, SUBS_S_W_DESC;
+def SUBS_S_D : SUBS_S_D_ENC, SUBS_S_D_DESC;
+
+def SUBS_U_B : SUBS_U_B_ENC, SUBS_U_B_DESC;
+def SUBS_U_H : SUBS_U_H_ENC, SUBS_U_H_DESC;
+def SUBS_U_W : SUBS_U_W_ENC, SUBS_U_W_DESC;
+def SUBS_U_D : SUBS_U_D_ENC, SUBS_U_D_DESC;
+
+def SUBSUS_U_B : SUBSUS_U_B_ENC, SUBSUS_U_B_DESC;
+def SUBSUS_U_H : SUBSUS_U_H_ENC, SUBSUS_U_H_DESC;
+def SUBSUS_U_W : SUBSUS_U_W_ENC, SUBSUS_U_W_DESC;
+def SUBSUS_U_D : SUBSUS_U_D_ENC, SUBSUS_U_D_DESC;
+
+def SUBSUU_S_B : SUBSUU_S_B_ENC, SUBSUU_S_B_DESC;
+def SUBSUU_S_H : SUBSUU_S_H_ENC, SUBSUU_S_H_DESC;
+def SUBSUU_S_W : SUBSUU_S_W_ENC, SUBSUU_S_W_DESC;
+def SUBSUU_S_D : SUBSUU_S_D_ENC, SUBSUU_S_D_DESC;
+
+def SUBV_B : SUBV_B_ENC, SUBV_B_DESC;
+def SUBV_H : SUBV_H_ENC, SUBV_H_DESC;
+def SUBV_W : SUBV_W_ENC, SUBV_W_DESC;
+def SUBV_D : SUBV_D_ENC, SUBV_D_DESC;
+
+def SUBVI_B : SUBVI_B_ENC, SUBVI_B_DESC;
+def SUBVI_H : SUBVI_H_ENC, SUBVI_H_DESC;
+def SUBVI_W : SUBVI_W_ENC, SUBVI_W_DESC;
+def SUBVI_D : SUBVI_D_ENC, SUBVI_D_DESC;
+
+def VSHF_B : VSHF_B_ENC, VSHF_B_DESC;
+def VSHF_H : VSHF_H_ENC, VSHF_H_DESC;
+def VSHF_W : VSHF_W_ENC, VSHF_W_DESC;
+def VSHF_D : VSHF_D_ENC, VSHF_D_DESC;
+
+def XOR_V : XOR_V_ENC, XOR_V_DESC;
+def XOR_V_H_PSEUDO : XOR_V_H_PSEUDO_DESC,
+ PseudoInstExpansion<(XOR_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+def XOR_V_W_PSEUDO : XOR_V_W_PSEUDO_DESC,
+ PseudoInstExpansion<(XOR_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+def XOR_V_D_PSEUDO : XOR_V_D_PSEUDO_DESC,
+ PseudoInstExpansion<(XOR_V MSA128BOpnd:$wd,
+ MSA128BOpnd:$ws,
+ MSA128BOpnd:$wt)>;
+
+def XORI_B : XORI_B_ENC, XORI_B_DESC;
+
+// Patterns.
+class MSAPat<dag pattern, dag result, list<Predicate> pred = [HasMSA]> :
+ Pat<pattern, result>, Requires<pred>;
+
+def : MSAPat<(extractelt (v4i32 MSA128W:$ws), immZExt4:$idx),
+ (COPY_S_W MSA128W:$ws, immZExt4:$idx)>;
+
+def : MSAPat<(v16i8 (load addr:$addr)), (LD_B addr:$addr)>;
+def : MSAPat<(v8i16 (load addr:$addr)), (LD_H addr:$addr)>;
+def : MSAPat<(v4i32 (load addr:$addr)), (LD_W addr:$addr)>;
+def : MSAPat<(v2i64 (load addr:$addr)), (LD_D addr:$addr)>;
+def : MSAPat<(v8f16 (load addr:$addr)), (LD_H addr:$addr)>;
+def : MSAPat<(v4f32 (load addr:$addr)), (LD_W addr:$addr)>;
+def : MSAPat<(v2f64 (load addr:$addr)), (LD_D addr:$addr)>;
+
+def : MSAPat<(v8f16 (load addrRegImm:$addr)), (LD_H addrRegImm:$addr)>;
+def : MSAPat<(v4f32 (load addrRegImm:$addr)), (LD_W addrRegImm:$addr)>;
+def : MSAPat<(v2f64 (load addrRegImm:$addr)), (LD_D addrRegImm:$addr)>;
+
+def : MSAPat<(store (v16i8 MSA128B:$ws), addr:$addr),
+ (ST_B MSA128B:$ws, addr:$addr)>;
+def : MSAPat<(store (v8i16 MSA128H:$ws), addr:$addr),
+ (ST_H MSA128H:$ws, addr:$addr)>;
+def : MSAPat<(store (v4i32 MSA128W:$ws), addr:$addr),
+ (ST_W MSA128W:$ws, addr:$addr)>;
+def : MSAPat<(store (v2i64 MSA128D:$ws), addr:$addr),
+ (ST_D MSA128D:$ws, addr:$addr)>;
+def : MSAPat<(store (v8f16 MSA128H:$ws), addr:$addr),
+ (ST_H MSA128H:$ws, addr:$addr)>;
+def : MSAPat<(store (v4f32 MSA128W:$ws), addr:$addr),
+ (ST_W MSA128W:$ws, addr:$addr)>;
+def : MSAPat<(store (v2f64 MSA128D:$ws), addr:$addr),
+ (ST_D MSA128D:$ws, addr:$addr)>;
+
+def ST_FH : MSAPat<(store (v8f16 MSA128H:$ws), addrRegImm:$addr),
+ (ST_H MSA128H:$ws, addrRegImm:$addr)>;
+def ST_FW : MSAPat<(store (v4f32 MSA128W:$ws), addrRegImm:$addr),
+ (ST_W MSA128W:$ws, addrRegImm:$addr)>;
+def ST_FD : MSAPat<(store (v2f64 MSA128D:$ws), addrRegImm:$addr),
+ (ST_D MSA128D:$ws, addrRegImm:$addr)>;
+
+class MSA_FABS_PSEUDO_DESC_BASE<RegisterOperand ROWD,
+ RegisterOperand ROWS = ROWD,
+ InstrItinClass itin = NoItinerary> :
+ MipsPseudo<(outs ROWD:$wd),
+ (ins ROWS:$ws),
+ [(set ROWD:$wd, (fabs ROWS:$ws))]> {
+ InstrItinClass Itinerary = itin;
+}
+def FABS_W : MSA_FABS_PSEUDO_DESC_BASE<MSA128WOpnd>,
+ PseudoInstExpansion<(FMAX_A_W MSA128WOpnd:$wd, MSA128WOpnd:$ws,
+ MSA128WOpnd:$ws)>;
+def FABS_D : MSA_FABS_PSEUDO_DESC_BASE<MSA128DOpnd>,
+ PseudoInstExpansion<(FMAX_A_D MSA128DOpnd:$wd, MSA128DOpnd:$ws,
+ MSA128DOpnd:$ws)>;
+
+class MSABitconvertPat<ValueType DstVT, ValueType SrcVT,
+ RegisterClass DstRC, list<Predicate> preds = [HasMSA]> :
+ MSAPat<(DstVT (bitconvert SrcVT:$src)),
+ (COPY_TO_REGCLASS SrcVT:$src, DstRC), preds>;
+
+// These are endian-independant because the element size doesnt change
+def : MSABitconvertPat<v8i16, v8f16, MSA128H>;
+def : MSABitconvertPat<v4i32, v4f32, MSA128W>;
+def : MSABitconvertPat<v2i64, v2f64, MSA128D>;
+def : MSABitconvertPat<v8f16, v8i16, MSA128H>;
+def : MSABitconvertPat<v4f32, v4i32, MSA128W>;
+def : MSABitconvertPat<v2f64, v2i64, MSA128D>;
+
+// Little endian bitcasts are always no-ops
+def : MSABitconvertPat<v16i8, v8i16, MSA128B, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v16i8, v4i32, MSA128B, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v16i8, v2i64, MSA128B, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v16i8, v8f16, MSA128B, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v16i8, v4f32, MSA128B, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v16i8, v2f64, MSA128B, [HasMSA, IsLE]>;
+
+def : MSABitconvertPat<v8i16, v16i8, MSA128H, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v8i16, v4i32, MSA128H, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v8i16, v2i64, MSA128H, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v8i16, v4f32, MSA128H, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v8i16, v2f64, MSA128H, [HasMSA, IsLE]>;
+
+def : MSABitconvertPat<v4i32, v16i8, MSA128W, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v4i32, v8i16, MSA128W, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v4i32, v2i64, MSA128W, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v4i32, v8f16, MSA128W, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v4i32, v2f64, MSA128W, [HasMSA, IsLE]>;
+
+def : MSABitconvertPat<v2i64, v16i8, MSA128D, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v2i64, v8i16, MSA128D, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v2i64, v4i32, MSA128D, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v2i64, v8f16, MSA128D, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v2i64, v4f32, MSA128D, [HasMSA, IsLE]>;
+
+def : MSABitconvertPat<v4f32, v16i8, MSA128W, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v4f32, v8i16, MSA128W, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v4f32, v2i64, MSA128W, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v4f32, v8f16, MSA128W, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v4f32, v2f64, MSA128W, [HasMSA, IsLE]>;
+
+def : MSABitconvertPat<v2f64, v16i8, MSA128D, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v2f64, v8i16, MSA128D, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v2f64, v4i32, MSA128D, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v2f64, v8f16, MSA128D, [HasMSA, IsLE]>;
+def : MSABitconvertPat<v2f64, v4f32, MSA128D, [HasMSA, IsLE]>;
+
+// Big endian bitcasts expand to shuffle instructions.
+// This is because bitcast is defined to be a store/load sequence and the
+// vector store/load instructions are mixed-endian with respect to the vector
+// as a whole (little endian with respect to element order, but big endian
+// elements).
+
+class MSABitconvertReverseQuartersPat<ValueType DstVT, ValueType SrcVT,
+ RegisterClass DstRC, MSAInst Insn,
+ RegisterClass ViaRC> :
+ MSAPat<(DstVT (bitconvert SrcVT:$src)),
+ (COPY_TO_REGCLASS (Insn (COPY_TO_REGCLASS SrcVT:$src, ViaRC), 27),
+ DstRC),
+ [HasMSA, IsBE]>;
+
+class MSABitconvertReverseHalvesPat<ValueType DstVT, ValueType SrcVT,
+ RegisterClass DstRC, MSAInst Insn,
+ RegisterClass ViaRC> :
+ MSAPat<(DstVT (bitconvert SrcVT:$src)),
+ (COPY_TO_REGCLASS (Insn (COPY_TO_REGCLASS SrcVT:$src, ViaRC), 177),
+ DstRC),
+ [HasMSA, IsBE]>;
+
+class MSABitconvertReverseBInHPat<ValueType DstVT, ValueType SrcVT,
+ RegisterClass DstRC> :
+ MSABitconvertReverseHalvesPat<DstVT, SrcVT, DstRC, SHF_B, MSA128B>;
+
+class MSABitconvertReverseBInWPat<ValueType DstVT, ValueType SrcVT,
+ RegisterClass DstRC> :
+ MSABitconvertReverseQuartersPat<DstVT, SrcVT, DstRC, SHF_B, MSA128B>;
+
+class MSABitconvertReverseBInDPat<ValueType DstVT, ValueType SrcVT,
+ RegisterClass DstRC> :
+ MSAPat<(DstVT (bitconvert SrcVT:$src)),
+ (COPY_TO_REGCLASS
+ (SHF_W
+ (COPY_TO_REGCLASS
+ (SHF_B (COPY_TO_REGCLASS SrcVT:$src, MSA128B), 27),
+ MSA128W), 177),
+ DstRC),
+ [HasMSA, IsBE]>;
+
+class MSABitconvertReverseHInWPat<ValueType DstVT, ValueType SrcVT,
+ RegisterClass DstRC> :
+ MSABitconvertReverseHalvesPat<DstVT, SrcVT, DstRC, SHF_H, MSA128H>;
+
+class MSABitconvertReverseHInDPat<ValueType DstVT, ValueType SrcVT,
+ RegisterClass DstRC> :
+ MSABitconvertReverseQuartersPat<DstVT, SrcVT, DstRC, SHF_H, MSA128H>;
+
+class MSABitconvertReverseWInDPat<ValueType DstVT, ValueType SrcVT,
+ RegisterClass DstRC> :
+ MSABitconvertReverseHalvesPat<DstVT, SrcVT, DstRC, SHF_W, MSA128W>;
+
+def : MSABitconvertReverseBInHPat<v8i16, v16i8, MSA128H>;
+def : MSABitconvertReverseBInHPat<v8f16, v16i8, MSA128H>;
+def : MSABitconvertReverseBInWPat<v4i32, v16i8, MSA128W>;
+def : MSABitconvertReverseBInWPat<v4f32, v16i8, MSA128W>;
+def : MSABitconvertReverseBInDPat<v2i64, v16i8, MSA128D>;
+def : MSABitconvertReverseBInDPat<v2f64, v16i8, MSA128D>;
+
+def : MSABitconvertReverseBInHPat<v16i8, v8i16, MSA128B>;
+def : MSABitconvertReverseHInWPat<v4i32, v8i16, MSA128W>;
+def : MSABitconvertReverseHInWPat<v4f32, v8i16, MSA128W>;
+def : MSABitconvertReverseHInDPat<v2i64, v8i16, MSA128D>;
+def : MSABitconvertReverseHInDPat<v2f64, v8i16, MSA128D>;
+
+def : MSABitconvertReverseBInHPat<v16i8, v8f16, MSA128B>;
+def : MSABitconvertReverseHInWPat<v4i32, v8f16, MSA128W>;
+def : MSABitconvertReverseHInWPat<v4f32, v8f16, MSA128W>;
+def : MSABitconvertReverseHInDPat<v2i64, v8f16, MSA128D>;
+def : MSABitconvertReverseHInDPat<v2f64, v8f16, MSA128D>;
+
+def : MSABitconvertReverseBInWPat<v16i8, v4i32, MSA128B>;
+def : MSABitconvertReverseHInWPat<v8i16, v4i32, MSA128H>;
+def : MSABitconvertReverseHInWPat<v8f16, v4i32, MSA128H>;
+def : MSABitconvertReverseWInDPat<v2i64, v4i32, MSA128D>;
+def : MSABitconvertReverseWInDPat<v2f64, v4i32, MSA128D>;
+
+def : MSABitconvertReverseBInWPat<v16i8, v4f32, MSA128B>;
+def : MSABitconvertReverseHInWPat<v8i16, v4f32, MSA128H>;
+def : MSABitconvertReverseHInWPat<v8f16, v4f32, MSA128H>;
+def : MSABitconvertReverseWInDPat<v2i64, v4f32, MSA128D>;
+def : MSABitconvertReverseWInDPat<v2f64, v4f32, MSA128D>;
+
+def : MSABitconvertReverseBInDPat<v16i8, v2i64, MSA128B>;
+def : MSABitconvertReverseHInDPat<v8i16, v2i64, MSA128H>;
+def : MSABitconvertReverseHInDPat<v8f16, v2i64, MSA128H>;
+def : MSABitconvertReverseWInDPat<v4i32, v2i64, MSA128W>;
+def : MSABitconvertReverseWInDPat<v4f32, v2i64, MSA128W>;
+
+def : MSABitconvertReverseBInDPat<v16i8, v2f64, MSA128B>;
+def : MSABitconvertReverseHInDPat<v8i16, v2f64, MSA128H>;
+def : MSABitconvertReverseHInDPat<v8f16, v2f64, MSA128H>;
+def : MSABitconvertReverseWInDPat<v4i32, v2f64, MSA128W>;
+def : MSABitconvertReverseWInDPat<v4f32, v2f64, MSA128W>;
+
+// Pseudos used to implement BNZ.df, and BZ.df
+
+class MSA_CBRANCH_PSEUDO_DESC_BASE<SDPatternOperator OpNode, ValueType TyNode,
+ RegisterClass RCWS,
+ InstrItinClass itin = NoItinerary> :
+ MipsPseudo<(outs GPR32:$dst),
+ (ins RCWS:$ws),
+ [(set GPR32:$dst, (OpNode (TyNode RCWS:$ws)))]> {
+ bit usesCustomInserter = 1;
+}
+
+def SNZ_B_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAllNonZero, v16i8,
+ MSA128B, NoItinerary>;
+def SNZ_H_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAllNonZero, v8i16,
+ MSA128H, NoItinerary>;
+def SNZ_W_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAllNonZero, v4i32,
+ MSA128W, NoItinerary>;
+def SNZ_D_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAllNonZero, v2i64,
+ MSA128D, NoItinerary>;
+def SNZ_V_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAnyNonZero, v16i8,
+ MSA128B, NoItinerary>;
+
+def SZ_B_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAllZero, v16i8,
+ MSA128B, NoItinerary>;
+def SZ_H_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAllZero, v8i16,
+ MSA128H, NoItinerary>;
+def SZ_W_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAllZero, v4i32,
+ MSA128W, NoItinerary>;
+def SZ_D_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAllZero, v2i64,
+ MSA128D, NoItinerary>;
+def SZ_V_PSEUDO : MSA_CBRANCH_PSEUDO_DESC_BASE<MipsVAnyZero, v16i8,
+ MSA128B, NoItinerary>;
diff --git a/lib/Target/Mips/MipsMachineFunction.cpp b/lib/Target/Mips/MipsMachineFunction.cpp
index 59b23f7ad7c1..dedf802f80ac 100644
--- a/lib/Target/Mips/MipsMachineFunction.cpp
+++ b/lib/Target/Mips/MipsMachineFunction.cpp
@@ -15,6 +15,7 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -22,6 +23,53 @@ static cl::opt<bool>
FixGlobalBaseReg("mips-fix-global-base-reg", cl::Hidden, cl::init(true),
cl::desc("Always use $gp as the global base register."));
+// class MipsCallEntry.
+MipsCallEntry::MipsCallEntry(const StringRef &N) {
+#ifndef NDEBUG
+ Name = N;
+ Val = 0;
+#endif
+}
+
+MipsCallEntry::MipsCallEntry(const GlobalValue *V) {
+#ifndef NDEBUG
+ Val = V;
+#endif
+}
+
+bool MipsCallEntry::isConstant(const MachineFrameInfo *) const {
+ return false;
+}
+
+bool MipsCallEntry::isAliased(const MachineFrameInfo *) const {
+ return false;
+}
+
+bool MipsCallEntry::mayAlias(const MachineFrameInfo *) const {
+ return false;
+}
+
+void MipsCallEntry::printCustom(raw_ostream &O) const {
+ O << "MipsCallEntry: ";
+#ifndef NDEBUG
+ if (Val)
+ O << Val->getName();
+ else
+ O << Name;
+#endif
+}
+
+MipsFunctionInfo::~MipsFunctionInfo() {
+ for (StringMap<const MipsCallEntry *>::iterator
+ I = ExternalCallEntries.begin(), E = ExternalCallEntries.end(); I != E;
+ ++I)
+ delete I->getValue();
+
+ for (ValueMap<const GlobalValue *, const MipsCallEntry *>::iterator
+ I = GlobalCallEntries.begin(), E = GlobalCallEntries.end(); I != E; ++I)
+ delete I->second;
+}
+
bool MipsFunctionInfo::globalBaseRegSet() const {
return GlobalBaseReg;
}
@@ -38,8 +86,8 @@ unsigned MipsFunctionInfo::getGlobalBaseReg() {
RC=(const TargetRegisterClass*)&Mips::CPU16RegsRegClass;
else
RC = ST.isABI_N64() ?
- (const TargetRegisterClass*)&Mips::CPU64RegsRegClass :
- (const TargetRegisterClass*)&Mips::CPURegsRegClass;
+ (const TargetRegisterClass*)&Mips::GPR64RegClass :
+ (const TargetRegisterClass*)&Mips::GPR32RegClass;
return GlobalBaseReg = MF.getRegInfo().createVirtualRegister(RC);
}
@@ -60,7 +108,7 @@ void MipsFunctionInfo::createEhDataRegsFI() {
for (int I = 0; I < 4; ++I) {
const MipsSubtarget &ST = MF.getTarget().getSubtarget<MipsSubtarget>();
const TargetRegisterClass *RC = ST.isABI_N64() ?
- &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass;
+ &Mips::GPR64RegClass : &Mips::GPR32RegClass;
EhDataRegFI[I] = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
RC->getAlignment(), false);
@@ -72,4 +120,22 @@ bool MipsFunctionInfo::isEhDataRegFI(int FI) const {
|| FI == EhDataRegFI[2] || FI == EhDataRegFI[3]);
}
+MachinePointerInfo MipsFunctionInfo::callPtrInfo(const StringRef &Name) {
+ const MipsCallEntry *&E = ExternalCallEntries[Name];
+
+ if (!E)
+ E = new MipsCallEntry(Name);
+
+ return MachinePointerInfo(E);
+}
+
+MachinePointerInfo MipsFunctionInfo::callPtrInfo(const GlobalValue *Val) {
+ const MipsCallEntry *&E = GlobalCallEntries[Val];
+
+ if (!E)
+ E = new MipsCallEntry(Val);
+
+ return MachinePointerInfo(E);
+}
+
void MipsFunctionInfo::anchor() { }
diff --git a/lib/Target/Mips/MipsMachineFunction.h b/lib/Target/Mips/MipsMachineFunction.h
index b05b348037d9..43bf6827eefb 100644
--- a/lib/Target/Mips/MipsMachineFunction.h
+++ b/lib/Target/Mips/MipsMachineFunction.h
@@ -15,56 +15,48 @@
#define MIPS_MACHINE_FUNCTION_INFO_H
#include "MipsSubtarget.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/ValueMap.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/IR/GlobalValue.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include <utility>
namespace llvm {
+/// \brief A class derived from PseudoSourceValue that represents a GOT entry
+/// resolved by lazy-binding.
+class MipsCallEntry : public PseudoSourceValue {
+public:
+ explicit MipsCallEntry(const StringRef &N);
+ explicit MipsCallEntry(const GlobalValue *V);
+ virtual bool isConstant(const MachineFrameInfo *) const;
+ virtual bool isAliased(const MachineFrameInfo *) const;
+ virtual bool mayAlias(const MachineFrameInfo *) const;
+
+private:
+ virtual void printCustom(raw_ostream &O) const;
+#ifndef NDEBUG
+ std::string Name;
+ const GlobalValue *Val;
+#endif
+};
+
/// MipsFunctionInfo - This class is derived from MachineFunction private
/// Mips target-specific information for each MachineFunction.
class MipsFunctionInfo : public MachineFunctionInfo {
- virtual void anchor();
-
- MachineFunction& MF;
- /// SRetReturnReg - Some subtargets require that sret lowering includes
- /// returning the value of the returned struct in a register. This field
- /// holds the virtual register into which the sret argument is passed.
- unsigned SRetReturnReg;
-
- /// GlobalBaseReg - keeps track of the virtual register initialized for
- /// use as the global base register. This is used for PIC in some PIC
- /// relocation models.
- unsigned GlobalBaseReg;
-
- /// Mips16SPAliasReg - keeps track of the virtual register initialized for
- /// use as an alias for SP for use in load/store of halfword/byte from/to
- /// the stack
- unsigned Mips16SPAliasReg;
-
- /// VarArgsFrameIndex - FrameIndex for start of varargs area.
- int VarArgsFrameIndex;
-
- /// True if function has a byval argument.
- bool HasByvalArg;
-
- /// Size of incoming argument area.
- unsigned IncomingArgSize;
-
- /// CallsEhReturn - Whether the function calls llvm.eh.return.
- bool CallsEhReturn;
-
- /// Frame objects for spilling eh data registers.
- int EhDataRegFI[4];
-
public:
MipsFunctionInfo(MachineFunction& MF)
: MF(MF), SRetReturnReg(0), GlobalBaseReg(0), Mips16SPAliasReg(0),
VarArgsFrameIndex(0), CallsEhReturn(false)
{}
+ ~MipsFunctionInfo();
+
unsigned getSRetReturnReg() const { return SRetReturnReg; }
void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
@@ -92,6 +84,51 @@ public:
int getEhDataRegFI(unsigned Reg) const { return EhDataRegFI[Reg]; }
bool isEhDataRegFI(int FI) const;
+ /// \brief Create a MachinePointerInfo that has a MipsCallEntr object
+ /// representing a GOT entry for an external function.
+ MachinePointerInfo callPtrInfo(const StringRef &Name);
+
+ /// \brief Create a MachinePointerInfo that has a MipsCallEntr object
+ /// representing a GOT entry for a global function.
+ MachinePointerInfo callPtrInfo(const GlobalValue *Val);
+
+private:
+ virtual void anchor();
+
+ MachineFunction& MF;
+ /// SRetReturnReg - Some subtargets require that sret lowering includes
+ /// returning the value of the returned struct in a register. This field
+ /// holds the virtual register into which the sret argument is passed.
+ unsigned SRetReturnReg;
+
+ /// GlobalBaseReg - keeps track of the virtual register initialized for
+ /// use as the global base register. This is used for PIC in some PIC
+ /// relocation models.
+ unsigned GlobalBaseReg;
+
+ /// Mips16SPAliasReg - keeps track of the virtual register initialized for
+ /// use as an alias for SP for use in load/store of halfword/byte from/to
+ /// the stack
+ unsigned Mips16SPAliasReg;
+
+ /// VarArgsFrameIndex - FrameIndex for start of varargs area.
+ int VarArgsFrameIndex;
+
+ /// True if function has a byval argument.
+ bool HasByvalArg;
+
+ /// Size of incoming argument area.
+ unsigned IncomingArgSize;
+
+ /// CallsEhReturn - Whether the function calls llvm.eh.return.
+ bool CallsEhReturn;
+
+ /// Frame objects for spilling eh data registers.
+ int EhDataRegFI[4];
+
+ /// MipsCallEntry maps.
+ StringMap<const MipsCallEntry *> ExternalCallEntries;
+ ValueMap<const GlobalValue *, const MipsCallEntry *> GlobalCallEntries;
};
} // end of namespace llvm
diff --git a/lib/Target/Mips/MipsOs16.cpp b/lib/Target/Mips/MipsOs16.cpp
index 1919077eeb71..fe60841212e0 100644
--- a/lib/Target/Mips/MipsOs16.cpp
+++ b/lib/Target/Mips/MipsOs16.cpp
@@ -14,9 +14,17 @@
#define DEBUG_TYPE "mips-os16"
#include "MipsOs16.h"
#include "llvm/IR/Module.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
+
+static cl::opt<std::string> Mips32FunctionMask(
+ "mips32-function-mask",
+ cl::init(""),
+ cl::desc("Force function to be mips32"),
+ cl::Hidden);
+
namespace {
// Figure out if we need float point based on the function signature.
@@ -85,18 +93,43 @@ namespace llvm {
bool MipsOs16::runOnModule(Module &M) {
- DEBUG(errs() << "Run on Module MipsOs16\n");
+ bool usingMask = Mips32FunctionMask.length() > 0;
+ bool doneUsingMask = false; // this will make it stop repeating
+ DEBUG(dbgs() << "Run on Module MipsOs16 \n" << Mips32FunctionMask << "\n");
+ if (usingMask)
+ DEBUG(dbgs() << "using mask \n" << Mips32FunctionMask << "\n");
+ unsigned int functionIndex = 0;
bool modified = false;
for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
if (F->isDeclaration()) continue;
DEBUG(dbgs() << "Working on " << F->getName() << "\n");
- if (needsFP(*F)) {
- DEBUG(dbgs() << " need to compile as nomips16 \n");
- F->addFnAttr("nomips16");
+ if (usingMask) {
+ if (!doneUsingMask) {
+ if (functionIndex == Mips32FunctionMask.length())
+ functionIndex = 0;
+ switch (Mips32FunctionMask[functionIndex]) {
+ case '1':
+ DEBUG(dbgs() << "mask forced mips32: " << F->getName() << "\n");
+ F->addFnAttr("nomips16");
+ break;
+ case '.':
+ doneUsingMask = true;
+ break;
+ default:
+ break;
+ }
+ functionIndex++;
+ }
}
else {
- F->addFnAttr("mips16");
- DEBUG(dbgs() << " no need to compile as nomips16 \n");
+ if (needsFP(*F)) {
+ DEBUG(dbgs() << "os16 forced mips32: " << F->getName() << "\n");
+ F->addFnAttr("nomips16");
+ }
+ else {
+ DEBUG(dbgs() << "os16 forced mips16: " << F->getName() << "\n");
+ F->addFnAttr("mips16");
+ }
}
}
return modified;
diff --git a/lib/Target/Mips/MipsRegisterInfo.cpp b/lib/Target/Mips/MipsRegisterInfo.cpp
index dead07bacd5e..3105b0208451 100644
--- a/lib/Target/Mips/MipsRegisterInfo.cpp
+++ b/lib/Target/Mips/MipsRegisterInfo.cpp
@@ -47,6 +47,11 @@ MipsRegisterInfo::MipsRegisterInfo(const MipsSubtarget &ST)
unsigned MipsRegisterInfo::getPICCallReg() { return Mips::T9; }
+const TargetRegisterClass *
+MipsRegisterInfo::getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind) const {
+ return Subtarget.isABI_N64() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
+}
unsigned
MipsRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
@@ -54,9 +59,9 @@ MipsRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
switch (RC->getID()) {
default:
return 0;
- case Mips::CPURegsRegClassID:
- case Mips::CPU64RegsRegClassID:
- case Mips::DSPRegsRegClassID: {
+ case Mips::GPR32RegClassID:
+ case Mips::GPR64RegClassID:
+ case Mips::DSPRRegClassID: {
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
return 28 - TFI->hasFP(MF);
}
@@ -78,48 +83,60 @@ const uint16_t* MipsRegisterInfo::
getCalleeSavedRegs(const MachineFunction *MF) const {
if (Subtarget.isSingleFloat())
return CSR_SingleFloatOnly_SaveList;
- else if (!Subtarget.hasMips64())
- return CSR_O32_SaveList;
- else if (Subtarget.isABI_N32())
+
+ if (Subtarget.isABI_N64())
+ return CSR_N64_SaveList;
+
+ if (Subtarget.isABI_N32())
return CSR_N32_SaveList;
- assert(Subtarget.isABI_N64());
- return CSR_N64_SaveList;
+ if (Subtarget.isFP64bit())
+ return CSR_O32_FP64_SaveList;
+
+ return CSR_O32_SaveList;
}
const uint32_t*
MipsRegisterInfo::getCallPreservedMask(CallingConv::ID) const {
if (Subtarget.isSingleFloat())
return CSR_SingleFloatOnly_RegMask;
- else if (!Subtarget.hasMips64())
- return CSR_O32_RegMask;
- else if (Subtarget.isABI_N32())
+
+ if (Subtarget.isABI_N64())
+ return CSR_N64_RegMask;
+
+ if (Subtarget.isABI_N32())
return CSR_N32_RegMask;
- assert(Subtarget.isABI_N64());
- return CSR_N64_RegMask;
+ if (Subtarget.isFP64bit())
+ return CSR_O32_FP64_RegMask;
+
+ return CSR_O32_RegMask;
+}
+
+const uint32_t *MipsRegisterInfo::getMips16RetHelperMask() {
+ return CSR_Mips16RetHelper_RegMask;
}
BitVector MipsRegisterInfo::
getReservedRegs(const MachineFunction &MF) const {
- static const uint16_t ReservedCPURegs[] = {
+ static const uint16_t ReservedGPR32[] = {
Mips::ZERO, Mips::K0, Mips::K1, Mips::SP
};
- static const uint16_t ReservedCPU64Regs[] = {
+ static const uint16_t ReservedGPR64[] = {
Mips::ZERO_64, Mips::K0_64, Mips::K1_64, Mips::SP_64
};
BitVector Reserved(getNumRegs());
typedef TargetRegisterClass::const_iterator RegIter;
- for (unsigned I = 0; I < array_lengthof(ReservedCPURegs); ++I)
- Reserved.set(ReservedCPURegs[I]);
+ for (unsigned I = 0; I < array_lengthof(ReservedGPR32); ++I)
+ Reserved.set(ReservedGPR32[I]);
- for (unsigned I = 0; I < array_lengthof(ReservedCPU64Regs); ++I)
- Reserved.set(ReservedCPU64Regs[I]);
+ for (unsigned I = 0; I < array_lengthof(ReservedGPR64); ++I)
+ Reserved.set(ReservedGPR64[I]);
- if (Subtarget.hasMips64()) {
+ if (Subtarget.isFP64bit()) {
// Reserve all registers in AFGR64.
for (RegIter Reg = Mips::AFGR64RegClass.begin(),
EReg = Mips::AFGR64RegClass.end(); Reg != EReg; ++Reg)
@@ -142,7 +159,6 @@ getReservedRegs(const MachineFunction &MF) const {
// Reserve hardware registers.
Reserved.set(Mips::HWR29);
- Reserved.set(Mips::HWR29_64);
// Reserve DSP control register.
Reserved.set(Mips::DSPPos);
@@ -151,10 +167,22 @@ getReservedRegs(const MachineFunction &MF) const {
Reserved.set(Mips::DSPEFI);
Reserved.set(Mips::DSPOutFlag);
+ // Reserve MSA control registers.
+ Reserved.set(Mips::MSAIR);
+ Reserved.set(Mips::MSACSR);
+ Reserved.set(Mips::MSAAccess);
+ Reserved.set(Mips::MSASave);
+ Reserved.set(Mips::MSAModify);
+ Reserved.set(Mips::MSARequest);
+ Reserved.set(Mips::MSAMap);
+ Reserved.set(Mips::MSAUnmap);
+
// Reserve RA if in mips16 mode.
if (Subtarget.inMips16Mode()) {
Reserved.set(Mips::RA);
Reserved.set(Mips::RA_64);
+ Reserved.set(Mips::T0);
+ Reserved.set(Mips::T1);
}
// Reserve GP if small section is used.
@@ -212,12 +240,3 @@ getFrameRegister(const MachineFunction &MF) const {
}
-unsigned MipsRegisterInfo::
-getEHExceptionRegister() const {
- llvm_unreachable("What is the exception register");
-}
-
-unsigned MipsRegisterInfo::
-getEHHandlerRegister() const {
- llvm_unreachable("What is the exception handler register");
-}
diff --git a/lib/Target/Mips/MipsRegisterInfo.h b/lib/Target/Mips/MipsRegisterInfo.h
index 5ed51241391f..0450c6fbe47e 100644
--- a/lib/Target/Mips/MipsRegisterInfo.h
+++ b/lib/Target/Mips/MipsRegisterInfo.h
@@ -42,10 +42,14 @@ public:
void adjustMipsStackFrame(MachineFunction &MF) const;
/// Code Generation virtual methods...
+ const TargetRegisterClass *getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind) const;
+
unsigned getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const;
const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
const uint32_t *getCallPreservedMask(CallingConv::ID) const;
+ static const uint32_t *getMips16RetHelperMask();
BitVector getReservedRegs(const MachineFunction &MF) const;
@@ -64,10 +68,6 @@ public:
/// Debug information queries.
unsigned getFrameRegister(const MachineFunction &MF) const;
- /// Exception handling queries.
- unsigned getEHExceptionRegister() const;
- unsigned getEHHandlerRegister() const;
-
/// \brief Return GPR register class.
virtual const TargetRegisterClass *intRegClass(unsigned Size) const = 0;
diff --git a/lib/Target/Mips/MipsRegisterInfo.td b/lib/Target/Mips/MipsRegisterInfo.td
index 229f1677c044..3173d0927af1 100644
--- a/lib/Target/Mips/MipsRegisterInfo.td
+++ b/lib/Target/Mips/MipsRegisterInfo.td
@@ -11,16 +11,15 @@
// Declarations that describe the MIPS register file
//===----------------------------------------------------------------------===//
let Namespace = "Mips" in {
-def sub_fpeven : SubRegIndex;
-def sub_fpodd : SubRegIndex;
-def sub_32 : SubRegIndex;
-def sub_lo : SubRegIndex;
-def sub_hi : SubRegIndex;
-def sub_dsp16_19 : SubRegIndex;
-def sub_dsp20 : SubRegIndex;
-def sub_dsp21 : SubRegIndex;
-def sub_dsp22 : SubRegIndex;
-def sub_dsp23 : SubRegIndex;
+def sub_32 : SubRegIndex<32>;
+def sub_64 : SubRegIndex<64>;
+def sub_lo : SubRegIndex<32>;
+def sub_hi : SubRegIndex<32, 32>;
+def sub_dsp16_19 : SubRegIndex<4, 16>;
+def sub_dsp20 : SubRegIndex<1, 20>;
+def sub_dsp21 : SubRegIndex<1, 21>;
+def sub_dsp22 : SubRegIndex<1, 22>;
+def sub_dsp23 : SubRegIndex<1, 23>;
}
class Unallocatable {
@@ -54,17 +53,24 @@ class FPR<bits<16> Enc, string n> : MipsReg<Enc, n>;
// Mips 64-bit (aliased) FPU Registers
class AFPR<bits<16> Enc, string n, list<Register> subregs>
: MipsRegWithSubRegs<Enc, n, subregs> {
- let SubRegIndices = [sub_fpeven, sub_fpodd];
+ let SubRegIndices = [sub_lo, sub_hi];
let CoveredBySubRegs = 1;
}
class AFPR64<bits<16> Enc, string n, list<Register> subregs>
: MipsRegWithSubRegs<Enc, n, subregs> {
- let SubRegIndices = [sub_32];
+ let SubRegIndices = [sub_lo, sub_hi];
+ let CoveredBySubRegs = 1;
+}
+
+// Mips 128-bit (aliased) MSA Registers
+class AFPR128<bits<16> Enc, string n, list<Register> subregs>
+ : MipsRegWithSubRegs<Enc, n, subregs> {
+ let SubRegIndices = [sub_64];
}
// Accumulator Registers
-class ACC<bits<16> Enc, string n, list<Register> subregs>
+class ACCReg<bits<16> Enc, string n, list<Register> subregs>
: MipsRegWithSubRegs<Enc, n, subregs> {
let SubRegIndices = [sub_lo, sub_hi];
let CoveredBySubRegs = 1;
@@ -147,127 +153,70 @@ let Namespace = "Mips" in {
def RA_64 : Mips64GPRReg< 31, "ra", [RA]>, DwarfRegNum<[31]>;
/// Mips Single point precision FPU Registers
- def F0 : FPR< 0, "f0">, DwarfRegNum<[32]>;
- def F1 : FPR< 1, "f1">, DwarfRegNum<[33]>;
- def F2 : FPR< 2, "f2">, DwarfRegNum<[34]>;
- def F3 : FPR< 3, "f3">, DwarfRegNum<[35]>;
- def F4 : FPR< 4, "f4">, DwarfRegNum<[36]>;
- def F5 : FPR< 5, "f5">, DwarfRegNum<[37]>;
- def F6 : FPR< 6, "f6">, DwarfRegNum<[38]>;
- def F7 : FPR< 7, "f7">, DwarfRegNum<[39]>;
- def F8 : FPR< 8, "f8">, DwarfRegNum<[40]>;
- def F9 : FPR< 9, "f9">, DwarfRegNum<[41]>;
- def F10 : FPR<10, "f10">, DwarfRegNum<[42]>;
- def F11 : FPR<11, "f11">, DwarfRegNum<[43]>;
- def F12 : FPR<12, "f12">, DwarfRegNum<[44]>;
- def F13 : FPR<13, "f13">, DwarfRegNum<[45]>;
- def F14 : FPR<14, "f14">, DwarfRegNum<[46]>;
- def F15 : FPR<15, "f15">, DwarfRegNum<[47]>;
- def F16 : FPR<16, "f16">, DwarfRegNum<[48]>;
- def F17 : FPR<17, "f17">, DwarfRegNum<[49]>;
- def F18 : FPR<18, "f18">, DwarfRegNum<[50]>;
- def F19 : FPR<19, "f19">, DwarfRegNum<[51]>;
- def F20 : FPR<20, "f20">, DwarfRegNum<[52]>;
- def F21 : FPR<21, "f21">, DwarfRegNum<[53]>;
- def F22 : FPR<22, "f22">, DwarfRegNum<[54]>;
- def F23 : FPR<23, "f23">, DwarfRegNum<[55]>;
- def F24 : FPR<24, "f24">, DwarfRegNum<[56]>;
- def F25 : FPR<25, "f25">, DwarfRegNum<[57]>;
- def F26 : FPR<26, "f26">, DwarfRegNum<[58]>;
- def F27 : FPR<27, "f27">, DwarfRegNum<[59]>;
- def F28 : FPR<28, "f28">, DwarfRegNum<[60]>;
- def F29 : FPR<29, "f29">, DwarfRegNum<[61]>;
- def F30 : FPR<30, "f30">, DwarfRegNum<[62]>;
- def F31 : FPR<31, "f31">, DwarfRegNum<[63]>;
+ foreach I = 0-31 in
+ def F#I : FPR<I, "f"#I>, DwarfRegNum<[!add(I, 32)]>;
+
+ // Higher half of 64-bit FP registers.
+ foreach I = 0-31 in
+ def F_HI#I : FPR<I, "f"#I>, DwarfRegNum<[!add(I, 32)]>;
/// Mips Double point precision FPU Registers (aliased
/// with the single precision to hold 64 bit values)
- def D0 : AFPR< 0, "f0", [F0, F1]>;
- def D1 : AFPR< 2, "f2", [F2, F3]>;
- def D2 : AFPR< 4, "f4", [F4, F5]>;
- def D3 : AFPR< 6, "f6", [F6, F7]>;
- def D4 : AFPR< 8, "f8", [F8, F9]>;
- def D5 : AFPR<10, "f10", [F10, F11]>;
- def D6 : AFPR<12, "f12", [F12, F13]>;
- def D7 : AFPR<14, "f14", [F14, F15]>;
- def D8 : AFPR<16, "f16", [F16, F17]>;
- def D9 : AFPR<18, "f18", [F18, F19]>;
- def D10 : AFPR<20, "f20", [F20, F21]>;
- def D11 : AFPR<22, "f22", [F22, F23]>;
- def D12 : AFPR<24, "f24", [F24, F25]>;
- def D13 : AFPR<26, "f26", [F26, F27]>;
- def D14 : AFPR<28, "f28", [F28, F29]>;
- def D15 : AFPR<30, "f30", [F30, F31]>;
+ foreach I = 0-15 in
+ def D#I : AFPR<!shl(I, 1), "f"#!shl(I, 1),
+ [!cast<FPR>("F"#!shl(I, 1)),
+ !cast<FPR>("F"#!add(!shl(I, 1), 1))]>;
/// Mips Double point precision FPU Registers in MFP64 mode.
- def D0_64 : AFPR64<0, "f0", [F0]>, DwarfRegNum<[32]>;
- def D1_64 : AFPR64<1, "f1", [F1]>, DwarfRegNum<[33]>;
- def D2_64 : AFPR64<2, "f2", [F2]>, DwarfRegNum<[34]>;
- def D3_64 : AFPR64<3, "f3", [F3]>, DwarfRegNum<[35]>;
- def D4_64 : AFPR64<4, "f4", [F4]>, DwarfRegNum<[36]>;
- def D5_64 : AFPR64<5, "f5", [F5]>, DwarfRegNum<[37]>;
- def D6_64 : AFPR64<6, "f6", [F6]>, DwarfRegNum<[38]>;
- def D7_64 : AFPR64<7, "f7", [F7]>, DwarfRegNum<[39]>;
- def D8_64 : AFPR64<8, "f8", [F8]>, DwarfRegNum<[40]>;
- def D9_64 : AFPR64<9, "f9", [F9]>, DwarfRegNum<[41]>;
- def D10_64 : AFPR64<10, "f10", [F10]>, DwarfRegNum<[42]>;
- def D11_64 : AFPR64<11, "f11", [F11]>, DwarfRegNum<[43]>;
- def D12_64 : AFPR64<12, "f12", [F12]>, DwarfRegNum<[44]>;
- def D13_64 : AFPR64<13, "f13", [F13]>, DwarfRegNum<[45]>;
- def D14_64 : AFPR64<14, "f14", [F14]>, DwarfRegNum<[46]>;
- def D15_64 : AFPR64<15, "f15", [F15]>, DwarfRegNum<[47]>;
- def D16_64 : AFPR64<16, "f16", [F16]>, DwarfRegNum<[48]>;
- def D17_64 : AFPR64<17, "f17", [F17]>, DwarfRegNum<[49]>;
- def D18_64 : AFPR64<18, "f18", [F18]>, DwarfRegNum<[50]>;
- def D19_64 : AFPR64<19, "f19", [F19]>, DwarfRegNum<[51]>;
- def D20_64 : AFPR64<20, "f20", [F20]>, DwarfRegNum<[52]>;
- def D21_64 : AFPR64<21, "f21", [F21]>, DwarfRegNum<[53]>;
- def D22_64 : AFPR64<22, "f22", [F22]>, DwarfRegNum<[54]>;
- def D23_64 : AFPR64<23, "f23", [F23]>, DwarfRegNum<[55]>;
- def D24_64 : AFPR64<24, "f24", [F24]>, DwarfRegNum<[56]>;
- def D25_64 : AFPR64<25, "f25", [F25]>, DwarfRegNum<[57]>;
- def D26_64 : AFPR64<26, "f26", [F26]>, DwarfRegNum<[58]>;
- def D27_64 : AFPR64<27, "f27", [F27]>, DwarfRegNum<[59]>;
- def D28_64 : AFPR64<28, "f28", [F28]>, DwarfRegNum<[60]>;
- def D29_64 : AFPR64<29, "f29", [F29]>, DwarfRegNum<[61]>;
- def D30_64 : AFPR64<30, "f30", [F30]>, DwarfRegNum<[62]>;
- def D31_64 : AFPR64<31, "f31", [F31]>, DwarfRegNum<[63]>;
+ foreach I = 0-31 in
+ def D#I#_64 : AFPR64<I, "f"#I, [!cast<FPR>("F"#I), !cast<FPR>("F_HI"#I)]>,
+ DwarfRegNum<[!add(I, 32)]>;
+
+ /// Mips MSA registers
+ /// MSA and FPU cannot both be present unless the FPU has 64-bit registers
+ foreach I = 0-31 in
+ def W#I : AFPR128<I, "w"#I, [!cast<AFPR64>("D"#I#"_64")]>,
+ DwarfRegNum<[!add(I, 32)]>;
// Hi/Lo registers
- def HI : Register<"ac0">, DwarfRegNum<[64]>;
- def HI1 : Register<"ac1">, DwarfRegNum<[176]>;
- def HI2 : Register<"ac2">, DwarfRegNum<[178]>;
- def HI3 : Register<"ac3">, DwarfRegNum<[180]>;
- def LO : Register<"ac0">, DwarfRegNum<[65]>;
- def LO1 : Register<"ac1">, DwarfRegNum<[177]>;
- def LO2 : Register<"ac2">, DwarfRegNum<[179]>;
- def LO3 : Register<"ac3">, DwarfRegNum<[181]>;
+ def HI0 : MipsReg<0, "ac0">, DwarfRegNum<[64]>;
+ def HI1 : MipsReg<1, "ac1">, DwarfRegNum<[176]>;
+ def HI2 : MipsReg<2, "ac2">, DwarfRegNum<[178]>;
+ def HI3 : MipsReg<3, "ac3">, DwarfRegNum<[180]>;
+ def LO0 : MipsReg<0, "ac0">, DwarfRegNum<[65]>;
+ def LO1 : MipsReg<1, "ac1">, DwarfRegNum<[177]>;
+ def LO2 : MipsReg<2, "ac2">, DwarfRegNum<[179]>;
+ def LO3 : MipsReg<3, "ac3">, DwarfRegNum<[181]>;
let SubRegIndices = [sub_32] in {
- def HI64 : RegisterWithSubRegs<"hi", [HI]>;
- def LO64 : RegisterWithSubRegs<"lo", [LO]>;
+ def HI0_64 : RegisterWithSubRegs<"hi", [HI0]>;
+ def LO0_64 : RegisterWithSubRegs<"lo", [LO0]>;
}
- // Status flags register
- def FCR31 : Register<"31">;
+ // FP control registers.
+ foreach I = 0-31 in
+ def FCR#I : MipsReg<#I, ""#I>;
+
+ // FP condition code registers.
+ foreach I = 0-7 in
+ def FCC#I : MipsReg<#I, "fcc"#I>;
- // fcc0 register
- def FCC0 : MipsReg<0, "fcc0">;
+ // COP2 registers.
+ foreach I = 0-31 in
+ def COP2#I : MipsReg<#I, ""#I>;
// PC register
def PC : Register<"pc">;
// Hardware register $29
def HWR29 : MipsReg<29, "29">;
- def HWR29_64 : MipsReg<29, "29">;
// Accum registers
- def AC0 : ACC<0, "ac0", [LO, HI]>;
- def AC1 : ACC<1, "ac1", [LO1, HI1]>;
- def AC2 : ACC<2, "ac2", [LO2, HI2]>;
- def AC3 : ACC<3, "ac3", [LO3, HI3]>;
+ foreach I = 0-3 in
+ def AC#I : ACCReg<#I, "ac"#I,
+ [!cast<Register>("LO"#I), !cast<Register>("HI"#I)]>;
- def AC0_64 : ACC<0, "ac0", [LO64, HI64]>;
+ def AC0_64 : ACCReg<0, "ac0", [LO0_64, HI0_64]>;
// DSP-ASE control register fields.
def DSPPos : Register<"">;
@@ -286,13 +235,23 @@ let Namespace = "Mips" in {
def DSPOutFlag : RegisterWithSubRegs<"", [DSPOutFlag16_19, DSPOutFlag20,
DSPOutFlag21, DSPOutFlag22,
DSPOutFlag23]>;
+
+ // MSA-ASE control registers.
+ def MSAIR : MipsReg<0, "0">;
+ def MSACSR : MipsReg<1, "1">;
+ def MSAAccess : MipsReg<2, "2">;
+ def MSASave : MipsReg<3, "3">;
+ def MSAModify : MipsReg<4, "4">;
+ def MSARequest : MipsReg<5, "5">;
+ def MSAMap : MipsReg<6, "6">;
+ def MSAUnmap : MipsReg<7, "7">;
}
//===----------------------------------------------------------------------===//
// Register Classes
//===----------------------------------------------------------------------===//
-class CPURegsClass<list<ValueType> regTypes> :
+class GPR32Class<list<ValueType> regTypes> :
RegisterClass<"Mips", regTypes, 32, (add
// Reserved
ZERO, AT,
@@ -307,10 +266,10 @@ class CPURegsClass<list<ValueType> regTypes> :
// Reserved
K0, K1, GP, SP, FP, RA)>;
-def CPURegs : CPURegsClass<[i32]>;
-def DSPRegs : CPURegsClass<[v4i8, v2i16]>;
+def GPR32 : GPR32Class<[i32]>;
+def DSPR : GPR32Class<[v4i8, v2i16]>;
-def CPU64Regs : RegisterClass<"Mips", [i64], 64, (add
+def GPR64 : RegisterClass<"Mips", [i64], 64, (add
// Reserved
ZERO_64, AT_64,
// Return Values and Arguments
@@ -330,6 +289,13 @@ def CPU16Regs : RegisterClass<"Mips", [i32], 32, (add
// Callee save
S0, S1)>;
+def CPU16RegsPlusSP : RegisterClass<"Mips", [i32], 32, (add
+ // Return Values and Arguments
+ V0, V1, A0, A1, A2, A3,
+ // Callee save
+ S0, S1,
+ SP)>;
+
def CPURAReg : RegisterClass<"Mips", [i32], 32, (add RA)>, Unallocatable;
def CPUSPReg : RegisterClass<"Mips", [i32], 32, (add SP)>, Unallocatable;
@@ -343,6 +309,9 @@ def CPUSPReg : RegisterClass<"Mips", [i32], 32, (add SP)>, Unallocatable;
// * FGR32 - 32 32-bit registers (single float only mode)
def FGR32 : RegisterClass<"Mips", [f32], 32, (sequence "F%u", 0, 31)>;
+def FGRH32 : RegisterClass<"Mips", [f32], 32, (sequence "F_HI%u", 0, 31)>,
+ Unallocatable;
+
def AFGR64 : RegisterClass<"Mips", [f64], 64, (add
// Return Values and Arguments
D0, D1,
@@ -357,78 +326,224 @@ def AFGR64 : RegisterClass<"Mips", [f64], 64, (add
def FGR64 : RegisterClass<"Mips", [f64], 64, (sequence "D%u_64", 0, 31)>;
-// Condition Register for floating point operations
-def CCR : RegisterClass<"Mips", [i32], 32, (add FCR31,FCC0)>, Unallocatable;
+// FP control registers.
+def CCR : RegisterClass<"Mips", [i32], 32, (sequence "FCR%u", 0, 31)>,
+ Unallocatable;
+
+// FP condition code registers.
+def FCC : RegisterClass<"Mips", [i32], 32, (sequence "FCC%u", 0, 7)>,
+ Unallocatable;
+
+def MSA128B: RegisterClass<"Mips", [v16i8], 128,
+ (sequence "W%u", 0, 31)>;
+def MSA128H: RegisterClass<"Mips", [v8i16, v8f16], 128,
+ (sequence "W%u", 0, 31)>;
+def MSA128W: RegisterClass<"Mips", [v4i32, v4f32], 128,
+ (sequence "W%u", 0, 31)>;
+def MSA128D: RegisterClass<"Mips", [v2i64, v2f64], 128,
+ (sequence "W%u", 0, 31)>;
+
+def MSACtrl: RegisterClass<"Mips", [i32], 32, (add
+ MSAIR, MSACSR, MSAAccess, MSASave, MSAModify, MSARequest, MSAMap, MSAUnmap)>;
// Hi/Lo Registers
-def LORegs : RegisterClass<"Mips", [i32], 32, (add LO)>;
-def HIRegs : RegisterClass<"Mips", [i32], 32, (add HI)>;
-def LORegsDSP : RegisterClass<"Mips", [i32], 32, (add LO, LO1, LO2, LO3)>;
-def HIRegsDSP : RegisterClass<"Mips", [i32], 32, (add HI, HI1, HI2, HI3)>;
-def LORegs64 : RegisterClass<"Mips", [i64], 64, (add LO64)>;
-def HIRegs64 : RegisterClass<"Mips", [i64], 64, (add HI64)>;
+def LO32 : RegisterClass<"Mips", [i32], 32, (add LO0)>;
+def HI32 : RegisterClass<"Mips", [i32], 32, (add HI0)>;
+def LO32DSP : RegisterClass<"Mips", [i32], 32, (sequence "LO%u", 0, 3)>;
+def HI32DSP : RegisterClass<"Mips", [i32], 32, (sequence "HI%u", 0, 3)>;
+def LO64 : RegisterClass<"Mips", [i64], 64, (add LO0_64)>;
+def HI64 : RegisterClass<"Mips", [i64], 64, (add HI0_64)>;
// Hardware registers
def HWRegs : RegisterClass<"Mips", [i32], 32, (add HWR29)>, Unallocatable;
-def HWRegs64 : RegisterClass<"Mips", [i64], 64, (add HWR29_64)>, Unallocatable;
// Accumulator Registers
-def ACRegs : RegisterClass<"Mips", [untyped], 64, (add AC0)> {
+def ACC64 : RegisterClass<"Mips", [untyped], 64, (add AC0)> {
let Size = 64;
}
-def ACRegs128 : RegisterClass<"Mips", [untyped], 128, (add AC0_64)> {
+def ACC128 : RegisterClass<"Mips", [untyped], 128, (add AC0_64)> {
let Size = 128;
}
-def ACRegsDSP : RegisterClass<"Mips", [untyped], 64, (sequence "AC%u", 0, 3)> {
+def ACC64DSP : RegisterClass<"Mips", [untyped], 64, (sequence "AC%u", 0, 3)> {
let Size = 64;
}
def DSPCC : RegisterClass<"Mips", [v4i8, v2i16], 32, (add DSPCCond)>;
+// Coprocessor 2 registers.
+def COP2 : RegisterClass<"Mips", [i32], 32, (sequence "COP2%u", 0, 31)>,
+ Unallocatable;
+
// Register Operands.
-def CPURegsAsmOperand : AsmOperandClass {
- let Name = "CPURegsAsm";
- let ParserMethod = "parseCPURegs";
+
+class MipsAsmRegOperand : AsmOperandClass {
+ let RenderMethod = "addRegAsmOperands";
+}
+def GPR32AsmOperand : MipsAsmRegOperand {
+ let Name = "GPR32Asm";
+ let ParserMethod = "parseGPR32";
+}
+
+def GPR64AsmOperand : MipsAsmRegOperand {
+ let Name = "GPR64Asm";
+ let ParserMethod = "parseGPR64";
+}
+
+def ACC64DSPAsmOperand : MipsAsmRegOperand {
+ let Name = "ACC64DSPAsm";
+ let ParserMethod = "parseACC64DSP";
+}
+
+def LO32DSPAsmOperand : MipsAsmRegOperand {
+ let Name = "LO32DSPAsm";
+ let ParserMethod = "parseLO32DSP";
}
-def CPU64RegsAsmOperand : AsmOperandClass {
- let Name = "CPU64RegsAsm";
- let ParserMethod = "parseCPU64Regs";
+def HI32DSPAsmOperand : MipsAsmRegOperand {
+ let Name = "HI32DSPAsm";
+ let ParserMethod = "parseHI32DSP";
}
-def CCRAsmOperand : AsmOperandClass {
+def CCRAsmOperand : MipsAsmRegOperand {
let Name = "CCRAsm";
let ParserMethod = "parseCCRRegs";
}
-def CPURegsOpnd : RegisterOperand<CPURegs, "printCPURegs"> {
- let ParserMatchClass = CPURegsAsmOperand;
+def AFGR64AsmOperand : MipsAsmRegOperand {
+ let Name = "AFGR64Asm";
+ let ParserMethod = "parseAFGR64Regs";
+}
+
+def FGR64AsmOperand : MipsAsmRegOperand {
+ let Name = "FGR64Asm";
+ let ParserMethod = "parseFGR64Regs";
}
-def CPU64RegsOpnd : RegisterOperand<CPU64Regs, "printCPURegs"> {
- let ParserMatchClass = CPU64RegsAsmOperand;
+def FGR32AsmOperand : MipsAsmRegOperand {
+ let Name = "FGR32Asm";
+ let ParserMethod = "parseFGR32Regs";
}
-def CCROpnd : RegisterOperand<CCR, "printCPURegs"> {
+def FGRH32AsmOperand : MipsAsmRegOperand {
+ let Name = "FGRH32Asm";
+ let ParserMethod = "parseFGRH32Regs";
+}
+
+def FCCRegsAsmOperand : MipsAsmRegOperand {
+ let Name = "FCCRegsAsm";
+ let ParserMethod = "parseFCCRegs";
+}
+
+def MSA128BAsmOperand : MipsAsmRegOperand {
+ let Name = "MSA128BAsm";
+ let ParserMethod = "parseMSA128BRegs";
+}
+
+def MSA128HAsmOperand : MipsAsmRegOperand {
+ let Name = "MSA128HAsm";
+ let ParserMethod = "parseMSA128HRegs";
+}
+
+def MSA128WAsmOperand : MipsAsmRegOperand {
+ let Name = "MSA128WAsm";
+ let ParserMethod = "parseMSA128WRegs";
+}
+
+def MSA128DAsmOperand : MipsAsmRegOperand {
+ let Name = "MSA128DAsm";
+ let ParserMethod = "parseMSA128DRegs";
+}
+
+def MSA128CRAsmOperand : MipsAsmRegOperand {
+ let Name = "MSA128CRAsm";
+ let ParserMethod = "parseMSA128CtrlRegs";
+}
+
+def GPR32Opnd : RegisterOperand<GPR32> {
+ let ParserMatchClass = GPR32AsmOperand;
+}
+
+def GPR64Opnd : RegisterOperand<GPR64> {
+ let ParserMatchClass = GPR64AsmOperand;
+}
+
+def DSPROpnd : RegisterOperand<DSPR> {
+ let ParserMatchClass = GPR32AsmOperand;
+}
+
+def CCROpnd : RegisterOperand<CCR> {
let ParserMatchClass = CCRAsmOperand;
}
-def HWRegsAsmOperand : AsmOperandClass {
+def HWRegsAsmOperand : MipsAsmRegOperand {
let Name = "HWRegsAsm";
let ParserMethod = "parseHWRegs";
}
-def HW64RegsAsmOperand : AsmOperandClass {
- let Name = "HW64RegsAsm";
- let ParserMethod = "parseHW64Regs";
+def COP2AsmOperand : MipsAsmRegOperand {
+ let Name = "COP2Asm";
+ let ParserMethod = "parseCOP2";
}
-def HWRegsOpnd : RegisterOperand<HWRegs, "printCPURegs"> {
+def HWRegsOpnd : RegisterOperand<HWRegs> {
let ParserMatchClass = HWRegsAsmOperand;
}
-def HW64RegsOpnd : RegisterOperand<HWRegs64, "printCPURegs"> {
- let ParserMatchClass = HW64RegsAsmOperand;
+def AFGR64Opnd : RegisterOperand<AFGR64> {
+ let ParserMatchClass = AFGR64AsmOperand;
+}
+
+def FGR64Opnd : RegisterOperand<FGR64> {
+ let ParserMatchClass = FGR64AsmOperand;
+}
+
+def FGR32Opnd : RegisterOperand<FGR32> {
+ let ParserMatchClass = FGR32AsmOperand;
+}
+
+def FGRH32Opnd : RegisterOperand<FGRH32> {
+ let ParserMatchClass = FGRH32AsmOperand;
+}
+
+def FCCRegsOpnd : RegisterOperand<FCC> {
+ let ParserMatchClass = FCCRegsAsmOperand;
}
+
+def LO32DSPOpnd : RegisterOperand<LO32DSP> {
+ let ParserMatchClass = LO32DSPAsmOperand;
+}
+
+def HI32DSPOpnd : RegisterOperand<HI32DSP> {
+ let ParserMatchClass = HI32DSPAsmOperand;
+}
+
+def ACC64DSPOpnd : RegisterOperand<ACC64DSP> {
+ let ParserMatchClass = ACC64DSPAsmOperand;
+}
+
+def COP2Opnd : RegisterOperand<COP2> {
+ let ParserMatchClass = COP2AsmOperand;
+}
+
+def MSA128BOpnd : RegisterOperand<MSA128B> {
+ let ParserMatchClass = MSA128BAsmOperand;
+}
+
+def MSA128HOpnd : RegisterOperand<MSA128H> {
+ let ParserMatchClass = MSA128HAsmOperand;
+}
+
+def MSA128WOpnd : RegisterOperand<MSA128W> {
+ let ParserMatchClass = MSA128WAsmOperand;
+}
+
+def MSA128DOpnd : RegisterOperand<MSA128D> {
+ let ParserMatchClass = MSA128DAsmOperand;
+}
+
+def MSA128CROpnd : RegisterOperand<MSACtrl> {
+ let ParserMatchClass = MSA128CRAsmOperand;
+}
+
diff --git a/lib/Target/Mips/MipsSEFrameLowering.cpp b/lib/Target/Mips/MipsSEFrameLowering.cpp
index b295e911bdc9..33ed4b3e3a67 100644
--- a/lib/Target/Mips/MipsSEFrameLowering.cpp
+++ b/lib/Target/Mips/MipsSEFrameLowering.cpp
@@ -32,6 +32,21 @@ using namespace llvm;
namespace {
typedef MachineBasicBlock::iterator Iter;
+static std::pair<unsigned, unsigned> getMFHiLoOpc(unsigned Src) {
+ if (Mips::ACC64RegClass.contains(Src))
+ return std::make_pair((unsigned)Mips::PseudoMFHI,
+ (unsigned)Mips::PseudoMFLO);
+
+ if (Mips::ACC64DSPRegClass.contains(Src))
+ return std::make_pair((unsigned)Mips::MFHI_DSP, (unsigned)Mips::MFLO_DSP);
+
+ if (Mips::ACC128RegClass.contains(Src))
+ return std::make_pair((unsigned)Mips::PseudoMFHI64,
+ (unsigned)Mips::PseudoMFLO64);
+
+ return std::make_pair(0, 0);
+}
+
/// Helper class to expand pseudos.
class ExpandPseudo {
public:
@@ -43,22 +58,19 @@ private:
void expandLoadCCond(MachineBasicBlock &MBB, Iter I);
void expandStoreCCond(MachineBasicBlock &MBB, Iter I);
void expandLoadACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize);
- void expandStoreACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize);
+ void expandStoreACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc,
+ unsigned MFLoOpc, unsigned RegSize);
bool expandCopy(MachineBasicBlock &MBB, Iter I);
- bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned Dst,
- unsigned Src, unsigned RegSize);
+ bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc,
+ unsigned MFLoOpc);
MachineFunction &MF;
- const MipsSEInstrInfo &TII;
- const MipsRegisterInfo &RegInfo;
MachineRegisterInfo &MRI;
};
}
ExpandPseudo::ExpandPseudo(MachineFunction &MF_)
- : MF(MF_),
- TII(*static_cast<const MipsSEInstrInfo*>(MF.getTarget().getInstrInfo())),
- RegInfo(TII.getRegisterInfo()), MRI(MF.getRegInfo()) {}
+ : MF(MF_), MRI(MF.getRegInfo()) {}
bool ExpandPseudo::expand() {
bool Expanded = false;
@@ -74,32 +86,26 @@ bool ExpandPseudo::expand() {
bool ExpandPseudo::expandInstr(MachineBasicBlock &MBB, Iter I) {
switch(I->getOpcode()) {
case Mips::LOAD_CCOND_DSP:
- case Mips::LOAD_CCOND_DSP_P8:
expandLoadCCond(MBB, I);
break;
case Mips::STORE_CCOND_DSP:
- case Mips::STORE_CCOND_DSP_P8:
expandStoreCCond(MBB, I);
break;
- case Mips::LOAD_AC64:
- case Mips::LOAD_AC64_P8:
- case Mips::LOAD_AC_DSP:
- case Mips::LOAD_AC_DSP_P8:
+ case Mips::LOAD_ACC64:
+ case Mips::LOAD_ACC64DSP:
expandLoadACC(MBB, I, 4);
break;
- case Mips::LOAD_AC128:
- case Mips::LOAD_AC128_P8:
+ case Mips::LOAD_ACC128:
expandLoadACC(MBB, I, 8);
break;
- case Mips::STORE_AC64:
- case Mips::STORE_AC64_P8:
- case Mips::STORE_AC_DSP:
- case Mips::STORE_AC_DSP_P8:
- expandStoreACC(MBB, I, 4);
+ case Mips::STORE_ACC64:
+ expandStoreACC(MBB, I, Mips::PseudoMFHI, Mips::PseudoMFLO, 4);
+ break;
+ case Mips::STORE_ACC64DSP:
+ expandStoreACC(MBB, I, Mips::MFHI_DSP, Mips::MFLO_DSP, 4);
break;
- case Mips::STORE_AC128:
- case Mips::STORE_AC128_P8:
- expandStoreACC(MBB, I, 8);
+ case Mips::STORE_ACC128:
+ expandStoreACC(MBB, I, Mips::PseudoMFHI64, Mips::PseudoMFLO64, 8);
break;
case TargetOpcode::COPY:
if (!expandCopy(MBB, I))
@@ -119,6 +125,11 @@ void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) {
assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
+ const MipsSEInstrInfo &TII =
+ *static_cast<const MipsSEInstrInfo*>(MF.getTarget().getInstrInfo());
+ const MipsRegisterInfo &RegInfo =
+ *static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
const TargetRegisterClass *RC = RegInfo.intRegClass(4);
unsigned VR = MRI.createVirtualRegister(RC);
unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
@@ -134,6 +145,11 @@ void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) {
assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
+ const MipsSEInstrInfo &TII =
+ *static_cast<const MipsSEInstrInfo*>(MF.getTarget().getInstrInfo());
+ const MipsRegisterInfo &RegInfo =
+ *static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
const TargetRegisterClass *RC = RegInfo.intRegClass(4);
unsigned VR = MRI.createVirtualRegister(RC);
unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
@@ -152,6 +168,11 @@ void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I,
assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
+ const MipsSEInstrInfo &TII =
+ *static_cast<const MipsSEInstrInfo*>(MF.getTarget().getInstrInfo());
+ const MipsRegisterInfo &RegInfo =
+ *static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize);
unsigned VR0 = MRI.createVirtualRegister(RC);
unsigned VR1 = MRI.createVirtualRegister(RC);
@@ -168,62 +189,69 @@ void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I,
}
void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I,
+ unsigned MFHiOpc, unsigned MFLoOpc,
unsigned RegSize) {
- // copy $vr0, lo
+ // mflo $vr0, src
// store $vr0, FI
- // copy $vr1, hi
+ // mfhi $vr1, src
// store $vr1, FI + 4
assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
+ const MipsSEInstrInfo &TII =
+ *static_cast<const MipsSEInstrInfo*>(MF.getTarget().getInstrInfo());
+ const MipsRegisterInfo &RegInfo =
+ *static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize);
unsigned VR0 = MRI.createVirtualRegister(RC);
unsigned VR1 = MRI.createVirtualRegister(RC);
unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
unsigned SrcKill = getKillRegState(I->getOperand(0).isKill());
- unsigned Lo = RegInfo.getSubReg(Src, Mips::sub_lo);
- unsigned Hi = RegInfo.getSubReg(Src, Mips::sub_hi);
DebugLoc DL = I->getDebugLoc();
- BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), VR0).addReg(Lo, SrcKill);
+ BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src);
TII.storeRegToStack(MBB, I, VR0, true, FI, RC, &RegInfo, 0);
- BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), VR1).addReg(Hi, SrcKill);
+ BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill);
TII.storeRegToStack(MBB, I, VR1, true, FI, RC, &RegInfo, RegSize);
}
bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) {
- unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg();
+ unsigned Src = I->getOperand(1).getReg();
+ std::pair<unsigned, unsigned> Opcodes = getMFHiLoOpc(Src);
- if (Mips::ACRegsDSPRegClass.contains(Dst, Src))
- return expandCopyACC(MBB, I, Dst, Src, 4);
-
- if (Mips::ACRegs128RegClass.contains(Dst, Src))
- return expandCopyACC(MBB, I, Dst, Src, 8);
+ if (!Opcodes.first)
+ return false;
- return false;
+ return expandCopyACC(MBB, I, Opcodes.first, Opcodes.second);
}
-bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned Dst,
- unsigned Src, unsigned RegSize) {
- // copy $vr0, src_lo
+bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I,
+ unsigned MFHiOpc, unsigned MFLoOpc) {
+ // mflo $vr0, src
// copy dst_lo, $vr0
- // copy $vr1, src_hi
+ // mfhi $vr1, src
// copy dst_hi, $vr1
- const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize);
+ const MipsSEInstrInfo &TII =
+ *static_cast<const MipsSEInstrInfo*>(MF.getTarget().getInstrInfo());
+ const MipsRegisterInfo &RegInfo =
+ *static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
+ unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg();
+ unsigned VRegSize = RegInfo.getMinimalPhysRegClass(Dst)->getSize() / 2;
+ const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize);
unsigned VR0 = MRI.createVirtualRegister(RC);
unsigned VR1 = MRI.createVirtualRegister(RC);
unsigned SrcKill = getKillRegState(I->getOperand(1).isKill());
unsigned DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo);
unsigned DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi);
- unsigned SrcLo = RegInfo.getSubReg(Src, Mips::sub_lo);
- unsigned SrcHi = RegInfo.getSubReg(Src, Mips::sub_hi);
DebugLoc DL = I->getDebugLoc();
- BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), VR0).addReg(SrcLo, SrcKill);
+ BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src);
BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstLo)
.addReg(VR0, RegState::Kill);
- BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), VR1).addReg(SrcHi, SrcKill);
+ BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill);
BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstHi)
.addReg(VR1, RegState::Kill);
return true;
@@ -244,10 +272,12 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock &MBB = MF.front();
MachineFrameInfo *MFI = MF.getFrameInfo();
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
- const MipsRegisterInfo *RegInfo =
- static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
const MipsSEInstrInfo &TII =
*static_cast<const MipsSEInstrInfo*>(MF.getTarget().getInstrInfo());
+ const MipsRegisterInfo &RegInfo =
+ *static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
MachineBasicBlock::iterator MBBI = MBB.begin();
DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP;
@@ -262,7 +292,7 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
if (StackSize == 0 && !MFI->adjustsStack()) return;
MachineModuleInfo &MMI = MF.getMMI();
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
+ const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
MachineLocation DstML, SrcML;
// Adjust stack.
@@ -272,9 +302,8 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
MCSymbol *AdjustSPLabel = MMI.getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, dl,
TII.get(TargetOpcode::PROLOG_LABEL)).addSym(AdjustSPLabel);
- DstML = MachineLocation(MachineLocation::VirtualFP);
- SrcML = MachineLocation(MachineLocation::VirtualFP, -StackSize);
- Moves.push_back(MachineMove(AdjustSPLabel, DstML, SrcML));
+ MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(AdjustSPLabel, -StackSize));
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
@@ -298,35 +327,36 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
// If Reg is a double precision register, emit two cfa_offsets,
// one for each of the paired single precision registers.
if (Mips::AFGR64RegClass.contains(Reg)) {
- MachineLocation DstML0(MachineLocation::VirtualFP, Offset);
- MachineLocation DstML1(MachineLocation::VirtualFP, Offset + 4);
- MachineLocation SrcML0(RegInfo->getSubReg(Reg, Mips::sub_fpeven));
- MachineLocation SrcML1(RegInfo->getSubReg(Reg, Mips::sub_fpodd));
+ unsigned Reg0 =
+ MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_lo), true);
+ unsigned Reg1 =
+ MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_hi), true);
if (!STI.isLittle())
- std::swap(SrcML0, SrcML1);
+ std::swap(Reg0, Reg1);
- Moves.push_back(MachineMove(CSLabel, DstML0, SrcML0));
- Moves.push_back(MachineMove(CSLabel, DstML1, SrcML1));
+ MMI.addFrameInst(
+ MCCFIInstruction::createOffset(CSLabel, Reg0, Offset));
+ MMI.addFrameInst(
+ MCCFIInstruction::createOffset(CSLabel, Reg1, Offset + 4));
} else {
- // Reg is either in CPURegs or FGR32.
- DstML = MachineLocation(MachineLocation::VirtualFP, Offset);
- SrcML = MachineLocation(Reg);
- Moves.push_back(MachineMove(CSLabel, DstML, SrcML));
+ // Reg is either in GPR32 or FGR32.
+ MMI.addFrameInst(MCCFIInstruction::createOffset(
+ CSLabel, MRI->getDwarfRegNum(Reg, 1), Offset));
}
}
}
if (MipsFI->callsEhReturn()) {
const TargetRegisterClass *RC = STI.isABI_N64() ?
- &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass;
+ &Mips::GPR64RegClass : &Mips::GPR32RegClass;
// Insert instructions that spill eh data registers.
for (int I = 0; I < 4; ++I) {
if (!MBB.isLiveIn(ehDataReg(I)))
MBB.addLiveIn(ehDataReg(I));
TII.storeRegToStackSlot(MBB, MBBI, ehDataReg(I), false,
- MipsFI->getEhDataRegFI(I), RC, RegInfo);
+ MipsFI->getEhDataRegFI(I), RC, &RegInfo);
}
// Emit .cfi_offset directives for eh data registers.
@@ -335,9 +365,8 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
TII.get(TargetOpcode::PROLOG_LABEL)).addSym(CSLabel2);
for (int I = 0; I < 4; ++I) {
int64_t Offset = MFI->getObjectOffset(MipsFI->getEhDataRegFI(I));
- DstML = MachineLocation(MachineLocation::VirtualFP, Offset);
- SrcML = MachineLocation(ehDataReg(I));
- Moves.push_back(MachineMove(CSLabel2, DstML, SrcML));
+ unsigned Reg = MRI->getDwarfRegNum(ehDataReg(I), true);
+ MMI.addFrameInst(MCCFIInstruction::createOffset(CSLabel2, Reg, Offset));
}
}
@@ -350,9 +379,8 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
MCSymbol *SetFPLabel = MMI.getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, dl,
TII.get(TargetOpcode::PROLOG_LABEL)).addSym(SetFPLabel);
- DstML = MachineLocation(FP);
- SrcML = MachineLocation(MachineLocation::VirtualFP);
- Moves.push_back(MachineMove(SetFPLabel, DstML, SrcML));
+ MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister(
+ SetFPLabel, MRI->getDwarfRegNum(FP, true)));
}
}
@@ -361,10 +389,12 @@ void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
MachineFrameInfo *MFI = MF.getFrameInfo();
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
- const MipsRegisterInfo *RegInfo =
- static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
const MipsSEInstrInfo &TII =
*static_cast<const MipsSEInstrInfo*>(MF.getTarget().getInstrInfo());
+ const MipsRegisterInfo &RegInfo =
+ *static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo());
+
DebugLoc dl = MBBI->getDebugLoc();
unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP;
unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP;
@@ -385,7 +415,7 @@ void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF,
if (MipsFI->callsEhReturn()) {
const TargetRegisterClass *RC = STI.isABI_N64() ?
- &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass;
+ &Mips::GPR64RegClass : &Mips::GPR32RegClass;
// Find first instruction that restores a callee-saved register.
MachineBasicBlock::iterator I = MBBI;
@@ -395,7 +425,7 @@ void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF,
// Insert instructions that restore eh data registers.
for (int J = 0; J < 4; ++J) {
TII.loadRegFromStackSlot(MBB, I, ehDataReg(J), MipsFI->getEhDataRegFI(J),
- RC, RegInfo);
+ RC, &RegInfo);
}
}
@@ -493,7 +523,7 @@ processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// The spill slot should be half the size of the accumulator. If target is
// mips64, it should be 64-bit, otherwise it should be 32-bt.
const TargetRegisterClass *RC = STI.hasMips64() ?
- &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass;
+ &Mips::GPR64RegClass : &Mips::GPR32RegClass;
int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
RC->getAlignment(), false);
RS->addScavengingFrameIndex(FI);
@@ -507,7 +537,7 @@ processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
return;
const TargetRegisterClass *RC = STI.isABI_N64() ?
- &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass;
+ &Mips::GPR64RegClass : &Mips::GPR32RegClass;
int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
RC->getAlignment(), false);
RS->addScavengingFrameIndex(FI);
diff --git a/lib/Target/Mips/MipsSEFrameLowering.h b/lib/Target/Mips/MipsSEFrameLowering.h
index 193a66cc65a7..8fa9e469887d 100644
--- a/lib/Target/Mips/MipsSEFrameLowering.h
+++ b/lib/Target/Mips/MipsSEFrameLowering.h
@@ -21,7 +21,7 @@ namespace llvm {
class MipsSEFrameLowering : public MipsFrameLowering {
public:
explicit MipsSEFrameLowering(const MipsSubtarget &STI)
- : MipsFrameLowering(STI, STI.hasMips64() ? 16 : 8) {}
+ : MipsFrameLowering(STI, STI.stackAlignment()) {}
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
index 8a6523a5d424..737660ec876c 100644
--- a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
+++ b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
@@ -66,6 +66,21 @@ void MipsSEDAGToDAGISel::addDSPCtrlRegOperands(bool IsDef, MachineInstr &MI,
MIB.addReg(Mips::DSPEFI, Flag);
}
+unsigned MipsSEDAGToDAGISel::getMSACtrlReg(const SDValue RegIdx) const {
+ switch (cast<ConstantSDNode>(RegIdx)->getZExtValue()) {
+ default:
+ llvm_unreachable("Could not map int to register");
+ case 0: return Mips::MSAIR;
+ case 1: return Mips::MSACSR;
+ case 2: return Mips::MSAAccess;
+ case 3: return Mips::MSASave;
+ case 4: return Mips::MSAModify;
+ case 5: return Mips::MSARequest;
+ case 6: return Mips::MSAMap;
+ case 7: return Mips::MSAUnmap;
+ }
+}
+
bool MipsSEDAGToDAGISel::replaceUsesWithZeroReg(MachineRegisterInfo *MRI,
const MachineInstr& MI) {
unsigned DstReg = 0, ZeroReg = 0;
@@ -119,9 +134,9 @@ void MipsSEDAGToDAGISel::initGlobalBaseReg(MachineFunction &MF) {
const TargetRegisterClass *RC;
if (Subtarget.isABI_N64())
- RC = (const TargetRegisterClass*)&Mips::CPU64RegsRegClass;
+ RC = (const TargetRegisterClass*)&Mips::GPR64RegClass;
else
- RC = (const TargetRegisterClass*)&Mips::CPURegsRegClass;
+ RC = (const TargetRegisterClass*)&Mips::GPR32RegClass;
V0 = RegInfo.createVirtualRegister(RC);
V1 = RegInfo.createVirtualRegister(RC);
@@ -214,7 +229,7 @@ void MipsSEDAGToDAGISel::processFunctionAfterISel(MachineFunction &MF) {
}
SDNode *MipsSEDAGToDAGISel::selectAddESubE(unsigned MOp, SDValue InFlag,
- SDValue CmpLHS, DebugLoc DL,
+ SDValue CmpLHS, SDLoc DL,
SDNode *Node) const {
unsigned Opc = InFlag.getOpcode(); (void)Opc;
@@ -301,6 +316,20 @@ bool MipsSEDAGToDAGISel::selectAddrRegImm(SDValue Addr, SDValue &Base,
return false;
}
+/// ComplexPattern used on MipsInstrInfo
+/// Used on Mips Load/Store instructions
+bool MipsSEDAGToDAGISel::selectAddrRegReg(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ // Operand is a result from an ADD.
+ if (Addr.getOpcode() == ISD::ADD) {
+ Base = Addr.getOperand(0);
+ Offset = Addr.getOperand(1);
+ return true;
+ }
+
+ return false;
+}
+
bool MipsSEDAGToDAGISel::selectAddrDefault(SDValue Addr, SDValue &Base,
SDValue &Offset) const {
Base = Addr;
@@ -314,9 +343,266 @@ bool MipsSEDAGToDAGISel::selectIntAddr(SDValue Addr, SDValue &Base,
selectAddrDefault(Addr, Base, Offset);
}
+/// Used on microMIPS Load/Store unaligned instructions (12-bit offset)
+bool MipsSEDAGToDAGISel::selectAddrRegImm12(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ EVT ValTy = Addr.getValueType();
+
+ // Addresses of the form FI+const or FI|const
+ if (CurDAG->isBaseWithConstantOffset(Addr)) {
+ ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1));
+ if (isInt<12>(CN->getSExtValue())) {
+
+ // If the first operand is a FI then get the TargetFI Node
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>
+ (Addr.getOperand(0)))
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy);
+ else
+ Base = Addr.getOperand(0);
+
+ Offset = CurDAG->getTargetConstant(CN->getZExtValue(), ValTy);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool MipsSEDAGToDAGISel::selectIntAddrMM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const {
+ return selectAddrRegImm12(Addr, Base, Offset) ||
+ selectAddrDefault(Addr, Base, Offset);
+}
+
+// Select constant vector splats.
+//
+// Returns true and sets Imm if:
+// * MSA is enabled
+// * N is a ISD::BUILD_VECTOR representing a constant splat
+bool MipsSEDAGToDAGISel::selectVSplat(SDNode *N, APInt &Imm) const {
+ if (!Subtarget.hasMSA())
+ return false;
+
+ BuildVectorSDNode *Node = dyn_cast<BuildVectorSDNode>(N);
+
+ if (Node == NULL)
+ return false;
+
+ APInt SplatValue, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+
+ if (!Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
+ HasAnyUndefs, 8,
+ !Subtarget.isLittle()))
+ return false;
+
+ Imm = SplatValue;
+
+ return true;
+}
+
+// Select constant vector splats.
+//
+// In addition to the requirements of selectVSplat(), this function returns
+// true and sets Imm if:
+// * The splat value is the same width as the elements of the vector
+// * The splat value fits in an integer with the specified signed-ness and
+// width.
+//
+// This function looks through ISD::BITCAST nodes.
+// TODO: This might not be appropriate for big-endian MSA since BITCAST is
+// sometimes a shuffle in big-endian mode.
+//
+// It's worth noting that this function is not used as part of the selection
+// of ldi.[bhwd] since it does not permit using the wrong-typed ldi.[bhwd]
+// instruction to achieve the desired bit pattern. ldi.[bhwd] is selected in
+// MipsSEDAGToDAGISel::selectNode.
+bool MipsSEDAGToDAGISel::
+selectVSplatCommon(SDValue N, SDValue &Imm, bool Signed,
+ unsigned ImmBitSize) const {
+ APInt ImmValue;
+ EVT EltTy = N->getValueType(0).getVectorElementType();
+
+ if (N->getOpcode() == ISD::BITCAST)
+ N = N->getOperand(0);
+
+ if (selectVSplat (N.getNode(), ImmValue) &&
+ ImmValue.getBitWidth() == EltTy.getSizeInBits()) {
+ if (( Signed && ImmValue.isSignedIntN(ImmBitSize)) ||
+ (!Signed && ImmValue.isIntN(ImmBitSize))) {
+ Imm = CurDAG->getTargetConstant(ImmValue, EltTy);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// Select constant vector splats.
+bool MipsSEDAGToDAGISel::
+selectVSplatUimm1(SDValue N, SDValue &Imm) const {
+ return selectVSplatCommon(N, Imm, false, 1);
+}
+
+bool MipsSEDAGToDAGISel::
+selectVSplatUimm2(SDValue N, SDValue &Imm) const {
+ return selectVSplatCommon(N, Imm, false, 2);
+}
+
+bool MipsSEDAGToDAGISel::
+selectVSplatUimm3(SDValue N, SDValue &Imm) const {
+ return selectVSplatCommon(N, Imm, false, 3);
+}
+
+// Select constant vector splats.
+bool MipsSEDAGToDAGISel::
+selectVSplatUimm4(SDValue N, SDValue &Imm) const {
+ return selectVSplatCommon(N, Imm, false, 4);
+}
+
+// Select constant vector splats.
+bool MipsSEDAGToDAGISel::
+selectVSplatUimm5(SDValue N, SDValue &Imm) const {
+ return selectVSplatCommon(N, Imm, false, 5);
+}
+
+// Select constant vector splats.
+bool MipsSEDAGToDAGISel::
+selectVSplatUimm6(SDValue N, SDValue &Imm) const {
+ return selectVSplatCommon(N, Imm, false, 6);
+}
+
+// Select constant vector splats.
+bool MipsSEDAGToDAGISel::
+selectVSplatUimm8(SDValue N, SDValue &Imm) const {
+ return selectVSplatCommon(N, Imm, false, 8);
+}
+
+// Select constant vector splats.
+bool MipsSEDAGToDAGISel::
+selectVSplatSimm5(SDValue N, SDValue &Imm) const {
+ return selectVSplatCommon(N, Imm, true, 5);
+}
+
+// Select constant vector splats whose value is a power of 2.
+//
+// In addition to the requirements of selectVSplat(), this function returns
+// true and sets Imm if:
+// * The splat value is the same width as the elements of the vector
+// * The splat value is a power of two.
+//
+// This function looks through ISD::BITCAST nodes.
+// TODO: This might not be appropriate for big-endian MSA since BITCAST is
+// sometimes a shuffle in big-endian mode.
+bool MipsSEDAGToDAGISel::selectVSplatUimmPow2(SDValue N, SDValue &Imm) const {
+ APInt ImmValue;
+ EVT EltTy = N->getValueType(0).getVectorElementType();
+
+ if (N->getOpcode() == ISD::BITCAST)
+ N = N->getOperand(0);
+
+ if (selectVSplat (N.getNode(), ImmValue) &&
+ ImmValue.getBitWidth() == EltTy.getSizeInBits()) {
+ int32_t Log2 = ImmValue.exactLogBase2();
+
+ if (Log2 != -1) {
+ Imm = CurDAG->getTargetConstant(Log2, EltTy);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// Select constant vector splats whose value only has a consecutive sequence
+// of left-most bits set (e.g. 0b11...1100...00).
+//
+// In addition to the requirements of selectVSplat(), this function returns
+// true and sets Imm if:
+// * The splat value is the same width as the elements of the vector
+// * The splat value is a consecutive sequence of left-most bits.
+//
+// This function looks through ISD::BITCAST nodes.
+// TODO: This might not be appropriate for big-endian MSA since BITCAST is
+// sometimes a shuffle in big-endian mode.
+bool MipsSEDAGToDAGISel::selectVSplatMaskL(SDValue N, SDValue &Imm) const {
+ APInt ImmValue;
+ EVT EltTy = N->getValueType(0).getVectorElementType();
+
+ if (N->getOpcode() == ISD::BITCAST)
+ N = N->getOperand(0);
+
+ if (selectVSplat(N.getNode(), ImmValue) &&
+ ImmValue.getBitWidth() == EltTy.getSizeInBits()) {
+ // Extract the run of set bits starting with bit zero from the bitwise
+ // inverse of ImmValue, and test that the inverse of this is the same
+ // as the original value.
+ if (ImmValue == ~(~ImmValue & ~(~ImmValue + 1))) {
+
+ Imm = CurDAG->getTargetConstant(ImmValue.countPopulation(), EltTy);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// Select constant vector splats whose value only has a consecutive sequence
+// of right-most bits set (e.g. 0b00...0011...11).
+//
+// In addition to the requirements of selectVSplat(), this function returns
+// true and sets Imm if:
+// * The splat value is the same width as the elements of the vector
+// * The splat value is a consecutive sequence of right-most bits.
+//
+// This function looks through ISD::BITCAST nodes.
+// TODO: This might not be appropriate for big-endian MSA since BITCAST is
+// sometimes a shuffle in big-endian mode.
+bool MipsSEDAGToDAGISel::selectVSplatMaskR(SDValue N, SDValue &Imm) const {
+ APInt ImmValue;
+ EVT EltTy = N->getValueType(0).getVectorElementType();
+
+ if (N->getOpcode() == ISD::BITCAST)
+ N = N->getOperand(0);
+
+ if (selectVSplat(N.getNode(), ImmValue) &&
+ ImmValue.getBitWidth() == EltTy.getSizeInBits()) {
+ // Extract the run of set bits starting with bit zero, and test that the
+ // result is the same as the original value
+ if (ImmValue == (ImmValue & ~(ImmValue + 1))) {
+ Imm = CurDAG->getTargetConstant(ImmValue.countPopulation(), EltTy);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool MipsSEDAGToDAGISel::selectVSplatUimmInvPow2(SDValue N,
+ SDValue &Imm) const {
+ APInt ImmValue;
+ EVT EltTy = N->getValueType(0).getVectorElementType();
+
+ if (N->getOpcode() == ISD::BITCAST)
+ N = N->getOperand(0);
+
+ if (selectVSplat(N.getNode(), ImmValue) &&
+ ImmValue.getBitWidth() == EltTy.getSizeInBits()) {
+ int32_t Log2 = (~ImmValue).exactLogBase2();
+
+ if (Log2 != -1) {
+ Imm = CurDAG->getTargetConstant(Log2, EltTy);
+ return true;
+ }
+ }
+
+ return false;
+}
+
std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
unsigned Opcode = Node->getOpcode();
- DebugLoc DL = Node->getDebugLoc();
+ SDLoc DL(Node);
///
// Instruction Selection not handled by the auto-generated
@@ -348,6 +634,11 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
SDValue Zero = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
Mips::ZERO_64, MVT::i64);
Result = CurDAG->getMachineNode(Mips::DMTC1, DL, MVT::f64, Zero);
+ } else if (Subtarget.isFP64bit()) {
+ SDValue Zero = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
+ Mips::ZERO, MVT::i32);
+ Result = CurDAG->getMachineNode(Mips::BuildPairF64_64, DL, MVT::f64,
+ Zero, Zero);
} else {
SDValue Zero = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
Mips::ZERO, MVT::i32);
@@ -374,7 +665,7 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
AnalyzeImm.Analyze(Imm, Size, false);
MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin();
- DebugLoc DL = CN->getDebugLoc();
+ SDLoc DL(CN);
SDNode *RegOpnd;
SDValue ImmOpnd = CurDAG->getTargetConstant(SignExtend64<16>(Inst->ImmOpnd),
MVT::i64);
@@ -401,24 +692,71 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
return std::make_pair(true, RegOpnd);
}
+ case ISD::INTRINSIC_W_CHAIN: {
+ switch (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue()) {
+ default:
+ break;
+
+ case Intrinsic::mips_cfcmsa: {
+ SDValue ChainIn = Node->getOperand(0);
+ SDValue RegIdx = Node->getOperand(2);
+ SDValue Reg = CurDAG->getCopyFromReg(ChainIn, DL,
+ getMSACtrlReg(RegIdx), MVT::i32);
+ return std::make_pair(true, Reg.getNode());
+ }
+ }
+ break;
+ }
+
+ case ISD::INTRINSIC_WO_CHAIN: {
+ switch (cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue()) {
+ default:
+ break;
+
+ case Intrinsic::mips_move_v:
+ // Like an assignment but will always produce a move.v even if
+ // unnecessary.
+ return std::make_pair(true,
+ CurDAG->getMachineNode(Mips::MOVE_V, DL,
+ Node->getValueType(0),
+ Node->getOperand(1)));
+ }
+ break;
+ }
+
+ case ISD::INTRINSIC_VOID: {
+ switch (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue()) {
+ default:
+ break;
+
+ case Intrinsic::mips_ctcmsa: {
+ SDValue ChainIn = Node->getOperand(0);
+ SDValue RegIdx = Node->getOperand(2);
+ SDValue Value = Node->getOperand(3);
+ SDValue ChainOut = CurDAG->getCopyToReg(ChainIn, DL,
+ getMSACtrlReg(RegIdx), Value);
+ return std::make_pair(true, ChainOut.getNode());
+ }
+ }
+ break;
+ }
+
case MipsISD::ThreadPointer: {
- EVT PtrVT = TLI.getPointerTy();
- unsigned RdhwrOpc, SrcReg, DestReg;
+ EVT PtrVT = getTargetLowering()->getPointerTy();
+ unsigned RdhwrOpc, DestReg;
if (PtrVT == MVT::i32) {
RdhwrOpc = Mips::RDHWR;
- SrcReg = Mips::HWR29;
DestReg = Mips::V1;
} else {
RdhwrOpc = Mips::RDHWR64;
- SrcReg = Mips::HWR29_64;
DestReg = Mips::V1_64;
}
SDNode *Rdhwr =
- CurDAG->getMachineNode(RdhwrOpc, Node->getDebugLoc(),
+ CurDAG->getMachineNode(RdhwrOpc, SDLoc(Node),
Node->getValueType(0),
- CurDAG->getRegister(SrcReg, PtrVT));
+ CurDAG->getRegister(Mips::HWR29, MVT::i32));
SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL, DestReg,
SDValue(Rdhwr, 0));
SDValue ResNode = CurDAG->getCopyFromReg(Chain, DL, DestReg, PtrVT);
@@ -426,18 +764,81 @@ std::pair<bool, SDNode*> MipsSEDAGToDAGISel::selectNode(SDNode *Node) {
return std::make_pair(true, ResNode.getNode());
}
- case MipsISD::InsertLOHI: {
- unsigned RCID = Subtarget.hasDSP() ? Mips::ACRegsDSPRegClassID :
- Mips::ACRegsRegClassID;
- SDValue RegClass = CurDAG->getTargetConstant(RCID, MVT::i32);
- SDValue LoIdx = CurDAG->getTargetConstant(Mips::sub_lo, MVT::i32);
- SDValue HiIdx = CurDAG->getTargetConstant(Mips::sub_hi, MVT::i32);
- const SDValue Ops[] = { RegClass, Node->getOperand(0), LoIdx,
- Node->getOperand(1), HiIdx };
- SDNode *Res = CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
- MVT::Untyped, Ops);
+ case ISD::BUILD_VECTOR: {
+ // Select appropriate ldi.[bhwd] instructions for constant splats of
+ // 128-bit when MSA is enabled. Fixup any register class mismatches that
+ // occur as a result.
+ //
+ // This allows the compiler to use a wider range of immediates than would
+ // otherwise be allowed. If, for example, v4i32 could only use ldi.h then
+ // it would not be possible to load { 0x01010101, 0x01010101, 0x01010101,
+ // 0x01010101 } without using a constant pool. This would be sub-optimal
+ // when // 'ldi.b wd, 1' is capable of producing that bit-pattern in the
+ // same set/ of registers. Similarly, ldi.h isn't capable of producing {
+ // 0x00000000, 0x00000001, 0x00000000, 0x00000001 } but 'ldi.d wd, 1' can.
+
+ BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Node);
+ APInt SplatValue, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+ unsigned LdiOp;
+ EVT ResVecTy = BVN->getValueType(0);
+ EVT ViaVecTy;
+
+ if (!Subtarget.hasMSA() || !BVN->getValueType(0).is128BitVector())
+ return std::make_pair(false, (SDNode*)NULL);
+
+ if (!BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
+ HasAnyUndefs, 8,
+ !Subtarget.isLittle()))
+ return std::make_pair(false, (SDNode*)NULL);
+
+ switch (SplatBitSize) {
+ default:
+ return std::make_pair(false, (SDNode*)NULL);
+ case 8:
+ LdiOp = Mips::LDI_B;
+ ViaVecTy = MVT::v16i8;
+ break;
+ case 16:
+ LdiOp = Mips::LDI_H;
+ ViaVecTy = MVT::v8i16;
+ break;
+ case 32:
+ LdiOp = Mips::LDI_W;
+ ViaVecTy = MVT::v4i32;
+ break;
+ case 64:
+ LdiOp = Mips::LDI_D;
+ ViaVecTy = MVT::v2i64;
+ break;
+ }
+
+ if (!SplatValue.isSignedIntN(10))
+ return std::make_pair(false, (SDNode*)NULL);
+
+ SDValue Imm = CurDAG->getTargetConstant(SplatValue,
+ ViaVecTy.getVectorElementType());
+
+ SDNode *Res = CurDAG->getMachineNode(LdiOp, SDLoc(Node), ViaVecTy, Imm);
+
+ if (ResVecTy != ViaVecTy) {
+ // If LdiOp is writing to a different register class to ResVecTy, then
+ // fix it up here. This COPY_TO_REGCLASS should never cause a move.v
+ // since the source and destination register sets contain the same
+ // registers.
+ const TargetLowering *TLI = getTargetLowering();
+ MVT ResVecTySimple = ResVecTy.getSimpleVT();
+ const TargetRegisterClass *RC = TLI->getRegClassFor(ResVecTySimple);
+ Res = CurDAG->getMachineNode(Mips::COPY_TO_REGCLASS, SDLoc(Node),
+ ResVecTy, SDValue(Res, 0),
+ CurDAG->getTargetConstant(RC->getID(),
+ MVT::i32));
+ }
+
return std::make_pair(true, Res);
}
+
}
return std::make_pair(false, (SDNode*)NULL);
diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.h b/lib/Target/Mips/MipsSEISelDAGToDAG.h
index a235e96b96a1..dc52064c9830 100644
--- a/lib/Target/Mips/MipsSEISelDAGToDAG.h
+++ b/lib/Target/Mips/MipsSEISelDAGToDAG.h
@@ -30,23 +30,67 @@ private:
void addDSPCtrlRegOperands(bool IsDef, MachineInstr &MI,
MachineFunction &MF);
+ unsigned getMSACtrlReg(const SDValue RegIdx) const;
+
bool replaceUsesWithZeroReg(MachineRegisterInfo *MRI, const MachineInstr&);
- std::pair<SDNode*, SDNode*> selectMULT(SDNode *N, unsigned Opc, DebugLoc dl,
+ std::pair<SDNode*, SDNode*> selectMULT(SDNode *N, unsigned Opc, SDLoc dl,
EVT Ty, bool HasLo, bool HasHi);
SDNode *selectAddESubE(unsigned MOp, SDValue InFlag, SDValue CmpLHS,
- DebugLoc DL, SDNode *Node) const;
+ SDLoc DL, SDNode *Node) const;
virtual bool selectAddrRegImm(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
+ virtual bool selectAddrRegReg(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const;
+
virtual bool selectAddrDefault(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
virtual bool selectIntAddr(SDValue Addr, SDValue &Base,
SDValue &Offset) const;
+ virtual bool selectAddrRegImm12(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const;
+
+ virtual bool selectIntAddrMM(SDValue Addr, SDValue &Base,
+ SDValue &Offset) const;
+
+ /// \brief Select constant vector splats.
+ virtual bool selectVSplat(SDNode *N, APInt &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a given integer.
+ virtual bool selectVSplatCommon(SDValue N, SDValue &Imm, bool Signed,
+ unsigned ImmBitSize) const;
+ /// \brief Select constant vector splats whose value fits in a uimm1.
+ virtual bool selectVSplatUimm1(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm2.
+ virtual bool selectVSplatUimm2(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm3.
+ virtual bool selectVSplatUimm3(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm4.
+ virtual bool selectVSplatUimm4(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm5.
+ virtual bool selectVSplatUimm5(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm6.
+ virtual bool selectVSplatUimm6(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a uimm8.
+ virtual bool selectVSplatUimm8(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value fits in a simm5.
+ virtual bool selectVSplatSimm5(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value is a power of 2.
+ virtual bool selectVSplatUimmPow2(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value is the inverse of a
+ /// power of 2.
+ virtual bool selectVSplatUimmInvPow2(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value is a run of set bits
+ /// ending at the most significant bit
+ virtual bool selectVSplatMaskL(SDValue N, SDValue &Imm) const;
+ /// \brief Select constant vector splats whose value is a run of set bits
+ /// starting at bit zero.
+ virtual bool selectVSplatMaskR(SDValue N, SDValue &Imm) const;
+
virtual std::pair<bool, SDNode*> selectNode(SDNode *Node);
virtual void processFunctionAfterISel(MachineFunction &MF);
diff --git a/lib/Target/Mips/MipsSEISelLowering.cpp b/lib/Target/Mips/MipsSEISelLowering.cpp
index 8544bb891073..809adc03b151 100644
--- a/lib/Target/Mips/MipsSEISelLowering.cpp
+++ b/lib/Target/Mips/MipsSEISelLowering.cpp
@@ -10,6 +10,7 @@
// Subclass of MipsTargetLowering specialized for mips32/64.
//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "mips-isel"
#include "MipsSEISelLowering.h"
#include "MipsRegisterInfo.h"
#include "MipsTargetMachine.h"
@@ -17,6 +18,8 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
@@ -25,22 +28,40 @@ static cl::opt<bool>
EnableMipsTailCalls("enable-mips-tail-calls", cl::Hidden,
cl::desc("MIPS: Enable tail calls."), cl::init(false));
+static cl::opt<bool> NoDPLoadStore("mno-ldc1-sdc1", cl::init(false),
+ cl::desc("Expand double precision loads and "
+ "stores to their single precision "
+ "counterparts"));
+
MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM)
: MipsTargetLowering(TM) {
// Set up the register classes
+ addRegisterClass(MVT::i32, &Mips::GPR32RegClass);
- clearRegisterClasses();
+ if (HasMips64)
+ addRegisterClass(MVT::i64, &Mips::GPR64RegClass);
- addRegisterClass(MVT::i32, &Mips::CPURegsRegClass);
+ if (Subtarget->hasDSP() || Subtarget->hasMSA()) {
+ // Expand all truncating stores and extending loads.
+ unsigned FirstVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
+ unsigned LastVT = (unsigned)MVT::LAST_VECTOR_VALUETYPE;
- if (HasMips64)
- addRegisterClass(MVT::i64, &Mips::CPU64RegsRegClass);
+ for (unsigned VT0 = FirstVT; VT0 <= LastVT; ++VT0) {
+ for (unsigned VT1 = FirstVT; VT1 <= LastVT; ++VT1)
+ setTruncStoreAction((MVT::SimpleValueType)VT0,
+ (MVT::SimpleValueType)VT1, Expand);
+
+ setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT0, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT0, Expand);
+ setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT0, Expand);
+ }
+ }
if (Subtarget->hasDSP()) {
MVT::SimpleValueType VecTys[2] = {MVT::v2i16, MVT::v4i8};
for (unsigned i = 0; i < array_lengthof(VecTys); ++i) {
- addRegisterClass(VecTys[i], &Mips::DSPRegsRegClass);
+ addRegisterClass(VecTys[i], &Mips::DSPRRegClass);
// Expand all builtin opcodes.
for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
@@ -63,12 +84,28 @@ MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM)
if (Subtarget->hasDSPR2())
setOperationAction(ISD::MUL, MVT::v2i16, Legal);
- if (!TM.Options.UseSoftFloat) {
+ if (Subtarget->hasMSA()) {
+ addMSAIntType(MVT::v16i8, &Mips::MSA128BRegClass);
+ addMSAIntType(MVT::v8i16, &Mips::MSA128HRegClass);
+ addMSAIntType(MVT::v4i32, &Mips::MSA128WRegClass);
+ addMSAIntType(MVT::v2i64, &Mips::MSA128DRegClass);
+ addMSAFloatType(MVT::v8f16, &Mips::MSA128HRegClass);
+ addMSAFloatType(MVT::v4f32, &Mips::MSA128WRegClass);
+ addMSAFloatType(MVT::v2f64, &Mips::MSA128DRegClass);
+
+ setTargetDAGCombine(ISD::AND);
+ setTargetDAGCombine(ISD::OR);
+ setTargetDAGCombine(ISD::SRA);
+ setTargetDAGCombine(ISD::VSELECT);
+ setTargetDAGCombine(ISD::XOR);
+ }
+
+ if (!Subtarget->mipsSEUsesSoftFloat()) {
addRegisterClass(MVT::f32, &Mips::FGR32RegClass);
// When dealing with single precision only, use libcalls
if (!Subtarget->isSingleFloat()) {
- if (HasMips64)
+ if (Subtarget->isFP64bit())
addRegisterClass(MVT::f64, &Mips::FGR64RegClass);
else
addRegisterClass(MVT::f64, &Mips::AFGR64RegClass);
@@ -99,6 +136,16 @@ MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM)
setTargetDAGCombine(ISD::ADDE);
setTargetDAGCombine(ISD::SUBE);
+ setTargetDAGCombine(ISD::MUL);
+
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
+ setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
+
+ if (NoDPLoadStore) {
+ setOperationAction(ISD::LOAD, MVT::f64, Custom);
+ setOperationAction(ISD::STORE, MVT::f64, Custom);
+ }
computeRegisterProperties();
}
@@ -108,6 +155,93 @@ llvm::createMipsSETargetLowering(MipsTargetMachine &TM) {
return new MipsSETargetLowering(TM);
}
+// Enable MSA support for the given integer type and Register class.
+void MipsSETargetLowering::
+addMSAIntType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) {
+ addRegisterClass(Ty, RC);
+
+ // Expand all builtin opcodes.
+ for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
+ setOperationAction(Opc, Ty, Expand);
+
+ setOperationAction(ISD::BITCAST, Ty, Legal);
+ setOperationAction(ISD::LOAD, Ty, Legal);
+ setOperationAction(ISD::STORE, Ty, Legal);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, Ty, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, Ty, Legal);
+ setOperationAction(ISD::BUILD_VECTOR, Ty, Custom);
+
+ setOperationAction(ISD::ADD, Ty, Legal);
+ setOperationAction(ISD::AND, Ty, Legal);
+ setOperationAction(ISD::CTLZ, Ty, Legal);
+ setOperationAction(ISD::CTPOP, Ty, Legal);
+ setOperationAction(ISD::MUL, Ty, Legal);
+ setOperationAction(ISD::OR, Ty, Legal);
+ setOperationAction(ISD::SDIV, Ty, Legal);
+ setOperationAction(ISD::SREM, Ty, Legal);
+ setOperationAction(ISD::SHL, Ty, Legal);
+ setOperationAction(ISD::SRA, Ty, Legal);
+ setOperationAction(ISD::SRL, Ty, Legal);
+ setOperationAction(ISD::SUB, Ty, Legal);
+ setOperationAction(ISD::UDIV, Ty, Legal);
+ setOperationAction(ISD::UREM, Ty, Legal);
+ setOperationAction(ISD::VECTOR_SHUFFLE, Ty, Custom);
+ setOperationAction(ISD::VSELECT, Ty, Legal);
+ setOperationAction(ISD::XOR, Ty, Legal);
+
+ if (Ty == MVT::v4i32 || Ty == MVT::v2i64) {
+ setOperationAction(ISD::FP_TO_SINT, Ty, Legal);
+ setOperationAction(ISD::FP_TO_UINT, Ty, Legal);
+ setOperationAction(ISD::SINT_TO_FP, Ty, Legal);
+ setOperationAction(ISD::UINT_TO_FP, Ty, Legal);
+ }
+
+ setOperationAction(ISD::SETCC, Ty, Legal);
+ setCondCodeAction(ISD::SETNE, Ty, Expand);
+ setCondCodeAction(ISD::SETGE, Ty, Expand);
+ setCondCodeAction(ISD::SETGT, Ty, Expand);
+ setCondCodeAction(ISD::SETUGE, Ty, Expand);
+ setCondCodeAction(ISD::SETUGT, Ty, Expand);
+}
+
+// Enable MSA support for the given floating-point type and Register class.
+void MipsSETargetLowering::
+addMSAFloatType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) {
+ addRegisterClass(Ty, RC);
+
+ // Expand all builtin opcodes.
+ for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
+ setOperationAction(Opc, Ty, Expand);
+
+ setOperationAction(ISD::LOAD, Ty, Legal);
+ setOperationAction(ISD::STORE, Ty, Legal);
+ setOperationAction(ISD::BITCAST, Ty, Legal);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, Ty, Legal);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, Ty, Legal);
+ setOperationAction(ISD::BUILD_VECTOR, Ty, Custom);
+
+ if (Ty != MVT::v8f16) {
+ setOperationAction(ISD::FABS, Ty, Legal);
+ setOperationAction(ISD::FADD, Ty, Legal);
+ setOperationAction(ISD::FDIV, Ty, Legal);
+ setOperationAction(ISD::FEXP2, Ty, Legal);
+ setOperationAction(ISD::FLOG2, Ty, Legal);
+ setOperationAction(ISD::FMA, Ty, Legal);
+ setOperationAction(ISD::FMUL, Ty, Legal);
+ setOperationAction(ISD::FRINT, Ty, Legal);
+ setOperationAction(ISD::FSQRT, Ty, Legal);
+ setOperationAction(ISD::FSUB, Ty, Legal);
+ setOperationAction(ISD::VSELECT, Ty, Legal);
+
+ setOperationAction(ISD::SETCC, Ty, Legal);
+ setCondCodeAction(ISD::SETOGE, Ty, Expand);
+ setCondCodeAction(ISD::SETOGT, Ty, Expand);
+ setCondCodeAction(ISD::SETUGE, Ty, Expand);
+ setCondCodeAction(ISD::SETUGT, Ty, Expand);
+ setCondCodeAction(ISD::SETGE, Ty, Expand);
+ setCondCodeAction(ISD::SETGT, Ty, Expand);
+ }
+}
bool
MipsSETargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const {
@@ -127,6 +261,8 @@ MipsSETargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const {
SDValue MipsSETargetLowering::LowerOperation(SDValue Op,
SelectionDAG &DAG) const {
switch(Op.getOpcode()) {
+ case ISD::LOAD: return lowerLOAD(Op, DAG);
+ case ISD::STORE: return lowerSTORE(Op, DAG);
case ISD::SMUL_LOHI: return lowerMulDiv(Op, MipsISD::Mult, true, true, DAG);
case ISD::UMUL_LOHI: return lowerMulDiv(Op, MipsISD::Multu, true, true, DAG);
case ISD::MULHS: return lowerMulDiv(Op, MipsISD::Mult, false, true, DAG);
@@ -137,6 +273,10 @@ SDValue MipsSETargetLowering::LowerOperation(SDValue Op,
DAG);
case ISD::INTRINSIC_WO_CHAIN: return lowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::INTRINSIC_W_CHAIN: return lowerINTRINSIC_W_CHAIN(Op, DAG);
+ case ISD::INTRINSIC_VOID: return lowerINTRINSIC_VOID(Op, DAG);
+ case ISD::EXTRACT_VECTOR_ELT: return lowerEXTRACT_VECTOR_ELT(Op, DAG);
+ case ISD::BUILD_VECTOR: return lowerBUILD_VECTOR(Op, DAG);
+ case ISD::VECTOR_SHUFFLE: return lowerVECTOR_SHUFFLE(Op, DAG);
}
return MipsTargetLowering::LowerOperation(Op, DAG);
@@ -186,10 +326,10 @@ static bool selectMADD(SDNode *ADDENode, SelectionDAG *CurDAG) {
if (!MultHi.hasOneUse() || !MultLo.hasOneUse())
return false;
- DebugLoc DL = ADDENode->getDebugLoc();
+ SDLoc DL(ADDENode);
// Initialize accumulator.
- SDValue ACCIn = CurDAG->getNode(MipsISD::InsertLOHI, DL, MVT::Untyped,
+ SDValue ACCIn = CurDAG->getNode(MipsISD::MTLOHI, DL, MVT::Untyped,
ADDCNode->getOperand(1),
ADDENode->getOperand(1));
@@ -203,15 +343,11 @@ static bool selectMADD(SDNode *ADDENode, SelectionDAG *CurDAG) {
// replace uses of adde and addc here
if (!SDValue(ADDCNode, 0).use_empty()) {
- SDValue LoIdx = CurDAG->getConstant(Mips::sub_lo, MVT::i32);
- SDValue LoOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MAdd,
- LoIdx);
+ SDValue LoOut = CurDAG->getNode(MipsISD::MFLO, DL, MVT::i32, MAdd);
CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDCNode, 0), LoOut);
}
if (!SDValue(ADDENode, 0).use_empty()) {
- SDValue HiIdx = CurDAG->getConstant(Mips::sub_hi, MVT::i32);
- SDValue HiOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MAdd,
- HiIdx);
+ SDValue HiOut = CurDAG->getNode(MipsISD::MFHI, DL, MVT::i32, MAdd);
CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDENode, 0), HiOut);
}
@@ -262,10 +398,10 @@ static bool selectMSUB(SDNode *SUBENode, SelectionDAG *CurDAG) {
if (!MultHi.hasOneUse() || !MultLo.hasOneUse())
return false;
- DebugLoc DL = SUBENode->getDebugLoc();
+ SDLoc DL(SUBENode);
// Initialize accumulator.
- SDValue ACCIn = CurDAG->getNode(MipsISD::InsertLOHI, DL, MVT::Untyped,
+ SDValue ACCIn = CurDAG->getNode(MipsISD::MTLOHI, DL, MVT::Untyped,
SUBCNode->getOperand(0),
SUBENode->getOperand(0));
@@ -279,15 +415,11 @@ static bool selectMSUB(SDNode *SUBENode, SelectionDAG *CurDAG) {
// replace uses of sube and subc here
if (!SDValue(SUBCNode, 0).use_empty()) {
- SDValue LoIdx = CurDAG->getConstant(Mips::sub_lo, MVT::i32);
- SDValue LoOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MSub,
- LoIdx);
+ SDValue LoOut = CurDAG->getNode(MipsISD::MFLO, DL, MVT::i32, MSub);
CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBCNode, 0), LoOut);
}
if (!SDValue(SUBENode, 0).use_empty()) {
- SDValue HiIdx = CurDAG->getConstant(Mips::sub_hi, MVT::i32);
- SDValue HiOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MSub,
- HiIdx);
+ SDValue HiOut = CurDAG->getNode(MipsISD::MFHI, DL, MVT::i32, MSub);
CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBENode, 0), HiOut);
}
@@ -307,6 +439,248 @@ static SDValue performADDECombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+// Fold zero extensions into MipsISD::VEXTRACT_[SZ]EXT_ELT
+//
+// Performs the following transformations:
+// - Changes MipsISD::VEXTRACT_[SZ]EXT_ELT to zero extension if its
+// sign/zero-extension is completely overwritten by the new one performed by
+// the ISD::AND.
+// - Removes redundant zero extensions performed by an ISD::AND.
+static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const MipsSubtarget *Subtarget) {
+ if (!Subtarget->hasMSA())
+ return SDValue();
+
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+ unsigned Op0Opcode = Op0->getOpcode();
+
+ // (and (MipsVExtract[SZ]Ext $a, $b, $c), imm:$d)
+ // where $d + 1 == 2^n and n == 32
+ // or $d + 1 == 2^n and n <= 32 and ZExt
+ // -> (MipsVExtractZExt $a, $b, $c)
+ if (Op0Opcode == MipsISD::VEXTRACT_SEXT_ELT ||
+ Op0Opcode == MipsISD::VEXTRACT_ZEXT_ELT) {
+ ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(Op1);
+
+ if (!Mask)
+ return SDValue();
+
+ int32_t Log2IfPositive = (Mask->getAPIntValue() + 1).exactLogBase2();
+
+ if (Log2IfPositive <= 0)
+ return SDValue(); // Mask+1 is not a power of 2
+
+ SDValue Op0Op2 = Op0->getOperand(2);
+ EVT ExtendTy = cast<VTSDNode>(Op0Op2)->getVT();
+ unsigned ExtendTySize = ExtendTy.getSizeInBits();
+ unsigned Log2 = Log2IfPositive;
+
+ if ((Op0Opcode == MipsISD::VEXTRACT_ZEXT_ELT && Log2 >= ExtendTySize) ||
+ Log2 == ExtendTySize) {
+ SDValue Ops[] = { Op0->getOperand(0), Op0->getOperand(1), Op0Op2 };
+ DAG.MorphNodeTo(Op0.getNode(), MipsISD::VEXTRACT_ZEXT_ELT,
+ Op0->getVTList(), Ops, Op0->getNumOperands());
+ return Op0;
+ }
+ }
+
+ return SDValue();
+}
+
+// Determine if the specified node is a constant vector splat.
+//
+// Returns true and sets Imm if:
+// * N is a ISD::BUILD_VECTOR representing a constant splat
+//
+// This function is quite similar to MipsSEDAGToDAGISel::selectVSplat. The
+// differences are that it assumes the MSA has already been checked and the
+// arbitrary requirement for a maximum of 32-bit integers isn't applied (and
+// must not be in order for binsri.d to be selectable).
+static bool isVSplat(SDValue N, APInt &Imm, bool IsLittleEndian) {
+ BuildVectorSDNode *Node = dyn_cast<BuildVectorSDNode>(N.getNode());
+
+ if (Node == NULL)
+ return false;
+
+ APInt SplatValue, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+
+ if (!Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
+ 8, !IsLittleEndian))
+ return false;
+
+ Imm = SplatValue;
+
+ return true;
+}
+
+// Test whether the given node is an all-ones build_vector.
+static bool isVectorAllOnes(SDValue N) {
+ // Look through bitcasts. Endianness doesn't matter because we are looking
+ // for an all-ones value.
+ if (N->getOpcode() == ISD::BITCAST)
+ N = N->getOperand(0);
+
+ BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N);
+
+ if (!BVN)
+ return false;
+
+ APInt SplatValue, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+
+ // Endianness doesn't matter in this context because we are looking for
+ // an all-ones value.
+ if (BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs))
+ return SplatValue.isAllOnesValue();
+
+ return false;
+}
+
+// Test whether N is the bitwise inverse of OfNode.
+static bool isBitwiseInverse(SDValue N, SDValue OfNode) {
+ if (N->getOpcode() != ISD::XOR)
+ return false;
+
+ if (isVectorAllOnes(N->getOperand(0)))
+ return N->getOperand(1) == OfNode;
+
+ if (isVectorAllOnes(N->getOperand(1)))
+ return N->getOperand(0) == OfNode;
+
+ return false;
+}
+
+// Perform combines where ISD::OR is the root node.
+//
+// Performs the following transformations:
+// - (or (and $a, $mask), (and $b, $inv_mask)) => (vselect $mask, $a, $b)
+// where $inv_mask is the bitwise inverse of $mask and the 'or' has a 128-bit
+// vector type.
+static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const MipsSubtarget *Subtarget) {
+ if (!Subtarget->hasMSA())
+ return SDValue();
+
+ EVT Ty = N->getValueType(0);
+
+ if (!Ty.is128BitVector())
+ return SDValue();
+
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+
+ if (Op0->getOpcode() == ISD::AND && Op1->getOpcode() == ISD::AND) {
+ SDValue Op0Op0 = Op0->getOperand(0);
+ SDValue Op0Op1 = Op0->getOperand(1);
+ SDValue Op1Op0 = Op1->getOperand(0);
+ SDValue Op1Op1 = Op1->getOperand(1);
+ bool IsLittleEndian = !Subtarget->isLittle();
+
+ SDValue IfSet, IfClr, Cond;
+ bool IsConstantMask = false;
+ APInt Mask, InvMask;
+
+ // If Op0Op0 is an appropriate mask, try to find it's inverse in either
+ // Op1Op0, or Op1Op1. Keep track of the Cond, IfSet, and IfClr nodes, while
+ // looking.
+ // IfClr will be set if we find a valid match.
+ if (isVSplat(Op0Op0, Mask, IsLittleEndian)) {
+ Cond = Op0Op0;
+ IfSet = Op0Op1;
+
+ if (isVSplat(Op1Op0, InvMask, IsLittleEndian) &&
+ Mask.getBitWidth() == InvMask.getBitWidth() && Mask == ~InvMask)
+ IfClr = Op1Op1;
+ else if (isVSplat(Op1Op1, InvMask, IsLittleEndian) &&
+ Mask.getBitWidth() == InvMask.getBitWidth() && Mask == ~InvMask)
+ IfClr = Op1Op0;
+
+ IsConstantMask = true;
+ }
+
+ // If IfClr is not yet set, and Op0Op1 is an appropriate mask, try the same
+ // thing again using this mask.
+ // IfClr will be set if we find a valid match.
+ if (!IfClr.getNode() && isVSplat(Op0Op1, Mask, IsLittleEndian)) {
+ Cond = Op0Op1;
+ IfSet = Op0Op0;
+
+ if (isVSplat(Op1Op0, InvMask, IsLittleEndian) &&
+ Mask.getBitWidth() == InvMask.getBitWidth() && Mask == ~InvMask)
+ IfClr = Op1Op1;
+ else if (isVSplat(Op1Op1, InvMask, IsLittleEndian) &&
+ Mask.getBitWidth() == InvMask.getBitWidth() && Mask == ~InvMask)
+ IfClr = Op1Op0;
+
+ IsConstantMask = true;
+ }
+
+ // If IfClr is not yet set, try looking for a non-constant match.
+ // IfClr will be set if we find a valid match amongst the eight
+ // possibilities.
+ if (!IfClr.getNode()) {
+ if (isBitwiseInverse(Op0Op0, Op1Op0)) {
+ Cond = Op1Op0;
+ IfSet = Op1Op1;
+ IfClr = Op0Op1;
+ } else if (isBitwiseInverse(Op0Op1, Op1Op0)) {
+ Cond = Op1Op0;
+ IfSet = Op1Op1;
+ IfClr = Op0Op0;
+ } else if (isBitwiseInverse(Op0Op0, Op1Op1)) {
+ Cond = Op1Op1;
+ IfSet = Op1Op0;
+ IfClr = Op0Op1;
+ } else if (isBitwiseInverse(Op0Op1, Op1Op1)) {
+ Cond = Op1Op1;
+ IfSet = Op1Op0;
+ IfClr = Op0Op0;
+ } else if (isBitwiseInverse(Op1Op0, Op0Op0)) {
+ Cond = Op0Op0;
+ IfSet = Op0Op1;
+ IfClr = Op1Op1;
+ } else if (isBitwiseInverse(Op1Op1, Op0Op0)) {
+ Cond = Op0Op0;
+ IfSet = Op0Op1;
+ IfClr = Op1Op0;
+ } else if (isBitwiseInverse(Op1Op0, Op0Op1)) {
+ Cond = Op0Op1;
+ IfSet = Op0Op0;
+ IfClr = Op1Op1;
+ } else if (isBitwiseInverse(Op1Op1, Op0Op1)) {
+ Cond = Op0Op1;
+ IfSet = Op0Op0;
+ IfClr = Op1Op0;
+ }
+ }
+
+ // At this point, IfClr will be set if we have a valid match.
+ if (!IfClr.getNode())
+ return SDValue();
+
+ assert(Cond.getNode() && IfSet.getNode());
+
+ // Fold degenerate cases.
+ if (IsConstantMask) {
+ if (Mask.isAllOnesValue())
+ return IfSet;
+ else if (Mask == 0)
+ return IfClr;
+ }
+
+ // Transform the DAG into an equivalent VSELECT.
+ return DAG.getNode(ISD::VSELECT, SDLoc(N), Ty, Cond, IfClr, IfSet);
+ }
+
+ return SDValue();
+}
+
static SDValue performSUBECombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const MipsSubtarget *Subtarget) {
@@ -320,6 +694,57 @@ static SDValue performSUBECombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+static SDValue genConstMult(SDValue X, uint64_t C, SDLoc DL, EVT VT,
+ EVT ShiftTy, SelectionDAG &DAG) {
+ // Clear the upper (64 - VT.sizeInBits) bits.
+ C &= ((uint64_t)-1) >> (64 - VT.getSizeInBits());
+
+ // Return 0.
+ if (C == 0)
+ return DAG.getConstant(0, VT);
+
+ // Return x.
+ if (C == 1)
+ return X;
+
+ // If c is power of 2, return (shl x, log2(c)).
+ if (isPowerOf2_64(C))
+ return DAG.getNode(ISD::SHL, DL, VT, X,
+ DAG.getConstant(Log2_64(C), ShiftTy));
+
+ unsigned Log2Ceil = Log2_64_Ceil(C);
+ uint64_t Floor = 1LL << Log2_64(C);
+ uint64_t Ceil = Log2Ceil == 64 ? 0LL : 1LL << Log2Ceil;
+
+ // If |c - floor_c| <= |c - ceil_c|,
+ // where floor_c = pow(2, floor(log2(c))) and ceil_c = pow(2, ceil(log2(c))),
+ // return (add constMult(x, floor_c), constMult(x, c - floor_c)).
+ if (C - Floor <= Ceil - C) {
+ SDValue Op0 = genConstMult(X, Floor, DL, VT, ShiftTy, DAG);
+ SDValue Op1 = genConstMult(X, C - Floor, DL, VT, ShiftTy, DAG);
+ return DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
+ }
+
+ // If |c - floor_c| > |c - ceil_c|,
+ // return (sub constMult(x, ceil_c), constMult(x, ceil_c - c)).
+ SDValue Op0 = genConstMult(X, Ceil, DL, VT, ShiftTy, DAG);
+ SDValue Op1 = genConstMult(X, Ceil - C, DL, VT, ShiftTy, DAG);
+ return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
+}
+
+static SDValue performMULCombine(SDNode *N, SelectionDAG &DAG,
+ const TargetLowering::DAGCombinerInfo &DCI,
+ const MipsSETargetLowering *TL) {
+ EVT VT = N->getValueType(0);
+
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)))
+ if (!VT.isVector())
+ return genConstMult(N->getOperand(0), C->getZExtValue(), SDLoc(N),
+ VT, TL->getScalarShiftAmountTy(VT), DAG);
+
+ return SDValue(N, 0);
+}
+
static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty,
SelectionDAG &DAG,
const MipsSubtarget *Subtarget) {
@@ -330,6 +755,9 @@ static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty,
unsigned EltSize = Ty.getVectorElementType().getSizeInBits();
BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
+ if (!Subtarget->hasDSP())
+ return SDValue();
+
if (!BV ||
!BV->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
EltSize, !Subtarget->isLittle()) ||
@@ -337,7 +765,7 @@ static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty,
(SplatValue.getZExtValue() >= EltSize))
return SDValue();
- return DAG.getNode(Opc, N->getDebugLoc(), Ty, N->getOperand(0),
+ return DAG.getNode(Opc, SDLoc(N), Ty, N->getOperand(0),
DAG.getConstant(SplatValue.getZExtValue(), MVT::i32));
}
@@ -352,11 +780,57 @@ static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG,
return performDSPShiftCombine(MipsISD::SHLL_DSP, N, Ty, DAG, Subtarget);
}
+// Fold sign-extensions into MipsISD::VEXTRACT_[SZ]EXT_ELT for MSA and fold
+// constant splats into MipsISD::SHRA_DSP for DSPr2.
+//
+// Performs the following transformations:
+// - Changes MipsISD::VEXTRACT_[SZ]EXT_ELT to sign extension if its
+// sign/zero-extension is completely overwritten by the new one performed by
+// the ISD::SRA and ISD::SHL nodes.
+// - Removes redundant sign extensions performed by an ISD::SRA and ISD::SHL
+// sequence.
+//
+// See performDSPShiftCombine for more information about the transformation
+// used for DSPr2.
static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const MipsSubtarget *Subtarget) {
EVT Ty = N->getValueType(0);
+ if (Subtarget->hasMSA()) {
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+
+ // (sra (shl (MipsVExtract[SZ]Ext $a, $b, $c), imm:$d), imm:$d)
+ // where $d + sizeof($c) == 32
+ // or $d + sizeof($c) <= 32 and SExt
+ // -> (MipsVExtractSExt $a, $b, $c)
+ if (Op0->getOpcode() == ISD::SHL && Op1 == Op0->getOperand(1)) {
+ SDValue Op0Op0 = Op0->getOperand(0);
+ ConstantSDNode *ShAmount = dyn_cast<ConstantSDNode>(Op1);
+
+ if (!ShAmount)
+ return SDValue();
+
+ if (Op0Op0->getOpcode() != MipsISD::VEXTRACT_SEXT_ELT &&
+ Op0Op0->getOpcode() != MipsISD::VEXTRACT_ZEXT_ELT)
+ return SDValue();
+
+ EVT ExtendTy = cast<VTSDNode>(Op0Op0->getOperand(2))->getVT();
+ unsigned TotalBits = ShAmount->getZExtValue() + ExtendTy.getSizeInBits();
+
+ if (TotalBits == 32 ||
+ (Op0Op0->getOpcode() == MipsISD::VEXTRACT_SEXT_ELT &&
+ TotalBits <= 32)) {
+ SDValue Ops[] = { Op0Op0->getOperand(0), Op0Op0->getOperand(1),
+ Op0Op0->getOperand(2) };
+ DAG.MorphNodeTo(Op0Op0.getNode(), MipsISD::VEXTRACT_SEXT_ELT,
+ Op0Op0->getVTList(), Ops, Op0Op0->getNumOperands());
+ return Op0Op0;
+ }
+ }
+ }
+
if ((Ty != MVT::v2i16) && ((Ty != MVT::v4i8) || !Subtarget->hasDSPR2()))
return SDValue();
@@ -402,24 +876,91 @@ static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG) {
if (!isLegalDSPCondCode(Ty, cast<CondCodeSDNode>(N->getOperand(2))->get()))
return SDValue();
- return DAG.getNode(MipsISD::SETCC_DSP, N->getDebugLoc(), Ty, N->getOperand(0),
+ return DAG.getNode(MipsISD::SETCC_DSP, SDLoc(N), Ty, N->getOperand(0),
N->getOperand(1), N->getOperand(2));
}
static SDValue performVSELECTCombine(SDNode *N, SelectionDAG &DAG) {
EVT Ty = N->getValueType(0);
- if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8))
- return SDValue();
+ if (Ty.is128BitVector() && Ty.isInteger()) {
+ // Try the following combines:
+ // (vselect (setcc $a, $b, SETLT), $b, $a)) -> (vsmax $a, $b)
+ // (vselect (setcc $a, $b, SETLE), $b, $a)) -> (vsmax $a, $b)
+ // (vselect (setcc $a, $b, SETLT), $a, $b)) -> (vsmin $a, $b)
+ // (vselect (setcc $a, $b, SETLE), $a, $b)) -> (vsmin $a, $b)
+ // (vselect (setcc $a, $b, SETULT), $b, $a)) -> (vumax $a, $b)
+ // (vselect (setcc $a, $b, SETULE), $b, $a)) -> (vumax $a, $b)
+ // (vselect (setcc $a, $b, SETULT), $a, $b)) -> (vumin $a, $b)
+ // (vselect (setcc $a, $b, SETULE), $a, $b)) -> (vumin $a, $b)
+ // SETGT/SETGE/SETUGT/SETUGE variants of these will show up initially but
+ // will be expanded to equivalent SETLT/SETLE/SETULT/SETULE versions by the
+ // legalizer.
+ SDValue Op0 = N->getOperand(0);
+
+ if (Op0->getOpcode() != ISD::SETCC)
+ return SDValue();
+
+ ISD::CondCode CondCode = cast<CondCodeSDNode>(Op0->getOperand(2))->get();
+ bool Signed;
+
+ if (CondCode == ISD::SETLT || CondCode == ISD::SETLE)
+ Signed = true;
+ else if (CondCode == ISD::SETULT || CondCode == ISD::SETULE)
+ Signed = false;
+ else
+ return SDValue();
+
+ SDValue Op1 = N->getOperand(1);
+ SDValue Op2 = N->getOperand(2);
+ SDValue Op0Op0 = Op0->getOperand(0);
+ SDValue Op0Op1 = Op0->getOperand(1);
+
+ if (Op1 == Op0Op0 && Op2 == Op0Op1)
+ return DAG.getNode(Signed ? MipsISD::VSMIN : MipsISD::VUMIN, SDLoc(N),
+ Ty, Op1, Op2);
+ else if (Op1 == Op0Op1 && Op2 == Op0Op0)
+ return DAG.getNode(Signed ? MipsISD::VSMAX : MipsISD::VUMAX, SDLoc(N),
+ Ty, Op1, Op2);
+ } else if ((Ty == MVT::v2i16) || (Ty == MVT::v4i8)) {
+ SDValue SetCC = N->getOperand(0);
+
+ if (SetCC.getOpcode() != MipsISD::SETCC_DSP)
+ return SDValue();
+
+ return DAG.getNode(MipsISD::SELECT_CC_DSP, SDLoc(N), Ty,
+ SetCC.getOperand(0), SetCC.getOperand(1),
+ N->getOperand(1), N->getOperand(2), SetCC.getOperand(2));
+ }
- SDValue SetCC = N->getOperand(0);
+ return SDValue();
+}
- if (SetCC.getOpcode() != MipsISD::SETCC_DSP)
- return SDValue();
+static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
+ const MipsSubtarget *Subtarget) {
+ EVT Ty = N->getValueType(0);
- return DAG.getNode(MipsISD::SELECT_CC_DSP, N->getDebugLoc(), Ty,
- SetCC.getOperand(0), SetCC.getOperand(1), N->getOperand(1),
- N->getOperand(2), SetCC.getOperand(2));
+ if (Subtarget->hasMSA() && Ty.is128BitVector() && Ty.isInteger()) {
+ // Try the following combines:
+ // (xor (or $a, $b), (build_vector allones))
+ // (xor (or $a, $b), (bitcast (build_vector allones)))
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+ SDValue NotOp;
+
+ if (ISD::isBuildVectorAllOnes(Op0.getNode()))
+ NotOp = Op1;
+ else if (ISD::isBuildVectorAllOnes(Op1.getNode()))
+ NotOp = Op0;
+ else
+ return SDValue();
+
+ if (NotOp->getOpcode() == ISD::OR)
+ return DAG.getNode(MipsISD::VNOR, SDLoc(N), Ty, NotOp->getOperand(0),
+ NotOp->getOperand(1));
+ }
+
+ return SDValue();
}
SDValue
@@ -430,8 +971,16 @@ MipsSETargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
switch (N->getOpcode()) {
case ISD::ADDE:
return performADDECombine(N, DAG, DCI, Subtarget);
+ case ISD::AND:
+ Val = performANDCombine(N, DAG, DCI, Subtarget);
+ break;
+ case ISD::OR:
+ Val = performORCombine(N, DAG, DCI, Subtarget);
+ break;
case ISD::SUBE:
return performSUBECombine(N, DAG, DCI, Subtarget);
+ case ISD::MUL:
+ return performMULCombine(N, DAG, DCI, this);
case ISD::SHL:
return performSHLCombine(N, DAG, DCI, Subtarget);
case ISD::SRA:
@@ -440,14 +989,22 @@ MipsSETargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
return performSRLCombine(N, DAG, DCI, Subtarget);
case ISD::VSELECT:
return performVSELECTCombine(N, DAG);
- case ISD::SETCC: {
+ case ISD::XOR:
+ Val = performXORCombine(N, DAG, Subtarget);
+ break;
+ case ISD::SETCC:
Val = performSETCCCombine(N, DAG);
break;
}
- }
- if (Val.getNode())
+ if (Val.getNode()) {
+ DEBUG(dbgs() << "\nMipsSE DAG Combine:\n";
+ N->printrWithDepth(dbgs(), &DAG);
+ dbgs() << "\n=> \n";
+ Val.getNode()->printrWithDepth(dbgs(), &DAG);
+ dbgs() << "\n");
return Val;
+ }
return MipsTargetLowering::PerformDAGCombine(N, DCI);
}
@@ -460,6 +1017,42 @@ MipsSETargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
return MipsTargetLowering::EmitInstrWithCustomInserter(MI, BB);
case Mips::BPOSGE32_PSEUDO:
return emitBPOSGE32(MI, BB);
+ case Mips::SNZ_B_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BNZ_B);
+ case Mips::SNZ_H_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BNZ_H);
+ case Mips::SNZ_W_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BNZ_W);
+ case Mips::SNZ_D_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BNZ_D);
+ case Mips::SNZ_V_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BNZ_V);
+ case Mips::SZ_B_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BZ_B);
+ case Mips::SZ_H_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BZ_H);
+ case Mips::SZ_W_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BZ_W);
+ case Mips::SZ_D_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BZ_D);
+ case Mips::SZ_V_PSEUDO:
+ return emitMSACBranchPseudo(MI, BB, Mips::BZ_V);
+ case Mips::COPY_FW_PSEUDO:
+ return emitCOPY_FW(MI, BB);
+ case Mips::COPY_FD_PSEUDO:
+ return emitCOPY_FD(MI, BB);
+ case Mips::INSERT_FW_PSEUDO:
+ return emitINSERT_FW(MI, BB);
+ case Mips::INSERT_FD_PSEUDO:
+ return emitINSERT_FD(MI, BB);
+ case Mips::FILL_FW_PSEUDO:
+ return emitFILL_FW(MI, BB);
+ case Mips::FILL_FD_PSEUDO:
+ return emitFILL_FD(MI, BB);
+ case Mips::FEXP2_W_1_PSEUDO:
+ return emitFEXP2_W_1(MI, BB);
+ case Mips::FEXP2_D_1_PSEUDO:
+ return emitFEXP2_D_1(MI, BB);
}
}
@@ -496,21 +1089,81 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
InternalLinkage, CLI, Callee, Chain);
}
+SDValue MipsSETargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
+ LoadSDNode &Nd = *cast<LoadSDNode>(Op);
+
+ if (Nd.getMemoryVT() != MVT::f64 || !NoDPLoadStore)
+ return MipsTargetLowering::lowerLOAD(Op, DAG);
+
+ // Replace a double precision load with two i32 loads and a buildpair64.
+ SDLoc DL(Op);
+ SDValue Ptr = Nd.getBasePtr(), Chain = Nd.getChain();
+ EVT PtrVT = Ptr.getValueType();
+
+ // i32 load from lower address.
+ SDValue Lo = DAG.getLoad(MVT::i32, DL, Chain, Ptr,
+ MachinePointerInfo(), Nd.isVolatile(),
+ Nd.isNonTemporal(), Nd.isInvariant(),
+ Nd.getAlignment());
+
+ // i32 load from higher address.
+ Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, PtrVT));
+ SDValue Hi = DAG.getLoad(MVT::i32, DL, Lo.getValue(1), Ptr,
+ MachinePointerInfo(), Nd.isVolatile(),
+ Nd.isNonTemporal(), Nd.isInvariant(),
+ std::min(Nd.getAlignment(), 4U));
+
+ if (!Subtarget->isLittle())
+ std::swap(Lo, Hi);
+
+ SDValue BP = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
+ SDValue Ops[2] = {BP, Hi.getValue(1)};
+ return DAG.getMergeValues(Ops, 2, DL);
+}
+
+SDValue MipsSETargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
+ StoreSDNode &Nd = *cast<StoreSDNode>(Op);
+
+ if (Nd.getMemoryVT() != MVT::f64 || !NoDPLoadStore)
+ return MipsTargetLowering::lowerSTORE(Op, DAG);
+
+ // Replace a double precision store with two extractelement64s and i32 stores.
+ SDLoc DL(Op);
+ SDValue Val = Nd.getValue(), Ptr = Nd.getBasePtr(), Chain = Nd.getChain();
+ EVT PtrVT = Ptr.getValueType();
+ SDValue Lo = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
+ Val, DAG.getConstant(0, MVT::i32));
+ SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
+ Val, DAG.getConstant(1, MVT::i32));
+
+ if (!Subtarget->isLittle())
+ std::swap(Lo, Hi);
+
+ // i32 store to lower address.
+ Chain = DAG.getStore(Chain, DL, Lo, Ptr, MachinePointerInfo(),
+ Nd.isVolatile(), Nd.isNonTemporal(), Nd.getAlignment(),
+ Nd.getTBAAInfo());
+
+ // i32 store to higher address.
+ Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, PtrVT));
+ return DAG.getStore(Chain, DL, Hi, Ptr, MachinePointerInfo(),
+ Nd.isVolatile(), Nd.isNonTemporal(),
+ std::min(Nd.getAlignment(), 4U), Nd.getTBAAInfo());
+}
+
SDValue MipsSETargetLowering::lowerMulDiv(SDValue Op, unsigned NewOpc,
bool HasLo, bool HasHi,
SelectionDAG &DAG) const {
EVT Ty = Op.getOperand(0).getValueType();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue Mult = DAG.getNode(NewOpc, DL, MVT::Untyped,
Op.getOperand(0), Op.getOperand(1));
SDValue Lo, Hi;
if (HasLo)
- Lo = DAG.getNode(MipsISD::ExtractLOHI, DL, Ty, Mult,
- DAG.getConstant(Mips::sub_lo, MVT::i32));
+ Lo = DAG.getNode(MipsISD::MFLO, DL, Ty, Mult);
if (HasHi)
- Hi = DAG.getNode(MipsISD::ExtractLOHI, DL, Ty, Mult,
- DAG.getConstant(Mips::sub_hi, MVT::i32));
+ Hi = DAG.getNode(MipsISD::MFHI, DL, Ty, Mult);
if (!HasLo || !HasHi)
return HasLo ? Lo : Hi;
@@ -520,19 +1173,17 @@ SDValue MipsSETargetLowering::lowerMulDiv(SDValue Op, unsigned NewOpc,
}
-static SDValue initAccumulator(SDValue In, DebugLoc DL, SelectionDAG &DAG) {
+static SDValue initAccumulator(SDValue In, SDLoc DL, SelectionDAG &DAG) {
SDValue InLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In,
DAG.getConstant(0, MVT::i32));
SDValue InHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In,
DAG.getConstant(1, MVT::i32));
- return DAG.getNode(MipsISD::InsertLOHI, DL, MVT::Untyped, InLo, InHi);
+ return DAG.getNode(MipsISD::MTLOHI, DL, MVT::Untyped, InLo, InHi);
}
-static SDValue extractLOHI(SDValue Op, DebugLoc DL, SelectionDAG &DAG) {
- SDValue Lo = DAG.getNode(MipsISD::ExtractLOHI, DL, MVT::i32, Op,
- DAG.getConstant(Mips::sub_lo, MVT::i32));
- SDValue Hi = DAG.getNode(MipsISD::ExtractLOHI, DL, MVT::i32, Op,
- DAG.getConstant(Mips::sub_hi, MVT::i32));
+static SDValue extractLOHI(SDValue Op, SDLoc DL, SelectionDAG &DAG) {
+ SDValue Lo = DAG.getNode(MipsISD::MFLO, DL, MVT::i32, Op);
+ SDValue Hi = DAG.getNode(MipsISD::MFHI, DL, MVT::i32, Op);
return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Lo, Hi);
}
@@ -549,7 +1200,7 @@ static SDValue extractLOHI(SDValue Op, DebugLoc DL, SelectionDAG &DAG) {
// out64 = merge-values (v0, v1)
//
static SDValue lowerDSPIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
bool HasChainIn = Op->getOperand(0).getValueType() == MVT::Other;
SmallVector<SDValue, 3> Ops;
unsigned OpNo = 0;
@@ -596,8 +1247,156 @@ static SDValue lowerDSPIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
return DAG.getMergeValues(Vals, 2, DL);
}
+// Lower an MSA copy intrinsic into the specified SelectionDAG node
+static SDValue lowerMSACopyIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
+ SDLoc DL(Op);
+ SDValue Vec = Op->getOperand(1);
+ SDValue Idx = Op->getOperand(2);
+ EVT ResTy = Op->getValueType(0);
+ EVT EltTy = Vec->getValueType(0).getVectorElementType();
+
+ SDValue Result = DAG.getNode(Opc, DL, ResTy, Vec, Idx,
+ DAG.getValueType(EltTy));
+
+ return Result;
+}
+
+static SDValue lowerMSASplatZExt(SDValue Op, unsigned OpNr, SelectionDAG &DAG) {
+ EVT ResVecTy = Op->getValueType(0);
+ EVT ViaVecTy = ResVecTy;
+ SDLoc DL(Op);
+
+ // When ResVecTy == MVT::v2i64, LaneA is the upper 32 bits of the lane and
+ // LaneB is the lower 32-bits. Otherwise LaneA and LaneB are alternating
+ // lanes.
+ SDValue LaneA;
+ SDValue LaneB = Op->getOperand(2);
+
+ if (ResVecTy == MVT::v2i64) {
+ LaneA = DAG.getConstant(0, MVT::i32);
+ ViaVecTy = MVT::v4i32;
+ } else
+ LaneA = LaneB;
+
+ SDValue Ops[16] = { LaneA, LaneB, LaneA, LaneB, LaneA, LaneB, LaneA, LaneB,
+ LaneA, LaneB, LaneA, LaneB, LaneA, LaneB, LaneA, LaneB };
+
+ SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, DL, ViaVecTy, Ops,
+ ViaVecTy.getVectorNumElements());
+
+ if (ViaVecTy != ResVecTy)
+ Result = DAG.getNode(ISD::BITCAST, DL, ResVecTy, Result);
+
+ return Result;
+}
+
+static SDValue lowerMSASplatImm(SDValue Op, unsigned ImmOp, SelectionDAG &DAG) {
+ return DAG.getConstant(Op->getConstantOperandVal(ImmOp), Op->getValueType(0));
+}
+
+static SDValue getBuildVectorSplat(EVT VecTy, SDValue SplatValue,
+ bool BigEndian, SelectionDAG &DAG) {
+ EVT ViaVecTy = VecTy;
+ SDValue SplatValueA = SplatValue;
+ SDValue SplatValueB = SplatValue;
+ SDLoc DL(SplatValue);
+
+ if (VecTy == MVT::v2i64) {
+ // v2i64 BUILD_VECTOR must be performed via v4i32 so split into i32's.
+ ViaVecTy = MVT::v4i32;
+
+ SplatValueA = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, SplatValue);
+ SplatValueB = DAG.getNode(ISD::SRL, DL, MVT::i64, SplatValue,
+ DAG.getConstant(32, MVT::i32));
+ SplatValueB = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, SplatValueB);
+ }
+
+ // We currently hold the parts in little endian order. Swap them if
+ // necessary.
+ if (BigEndian)
+ std::swap(SplatValueA, SplatValueB);
+
+ SDValue Ops[16] = { SplatValueA, SplatValueB, SplatValueA, SplatValueB,
+ SplatValueA, SplatValueB, SplatValueA, SplatValueB,
+ SplatValueA, SplatValueB, SplatValueA, SplatValueB,
+ SplatValueA, SplatValueB, SplatValueA, SplatValueB };
+
+ SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, DL, ViaVecTy, Ops,
+ ViaVecTy.getVectorNumElements());
+
+ if (VecTy != ViaVecTy)
+ Result = DAG.getNode(ISD::BITCAST, DL, VecTy, Result);
+
+ return Result;
+}
+
+static SDValue lowerMSABinaryBitImmIntr(SDValue Op, SelectionDAG &DAG,
+ unsigned Opc, SDValue Imm,
+ bool BigEndian) {
+ EVT VecTy = Op->getValueType(0);
+ SDValue Exp2Imm;
+ SDLoc DL(Op);
+
+ // The DAG Combiner can't constant fold bitcasted vectors yet so we must do it
+ // here for now.
+ if (VecTy == MVT::v2i64) {
+ if (ConstantSDNode *CImm = dyn_cast<ConstantSDNode>(Imm)) {
+ APInt BitImm = APInt(64, 1) << CImm->getAPIntValue();
+
+ SDValue BitImmHiOp = DAG.getConstant(BitImm.lshr(32).trunc(32), MVT::i32);
+ SDValue BitImmLoOp = DAG.getConstant(BitImm.trunc(32), MVT::i32);
+
+ if (BigEndian)
+ std::swap(BitImmLoOp, BitImmHiOp);
+
+ Exp2Imm =
+ DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
+ DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, BitImmLoOp,
+ BitImmHiOp, BitImmLoOp, BitImmHiOp));
+ }
+ }
+
+ if (Exp2Imm.getNode() == NULL) {
+ // We couldnt constant fold, do a vector shift instead
+
+ // Extend i32 to i64 if necessary. Sign or zero extend doesn't matter since
+ // only values 0-63 are valid.
+ if (VecTy == MVT::v2i64)
+ Imm = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Imm);
+
+ Exp2Imm = getBuildVectorSplat(VecTy, Imm, BigEndian, DAG);
+
+ Exp2Imm =
+ DAG.getNode(ISD::SHL, DL, VecTy, DAG.getConstant(1, VecTy), Exp2Imm);
+ }
+
+ return DAG.getNode(Opc, DL, VecTy, Op->getOperand(1), Exp2Imm);
+}
+
+static SDValue lowerMSABitClear(SDValue Op, SelectionDAG &DAG) {
+ EVT ResTy = Op->getValueType(0);
+ SDLoc DL(Op);
+ SDValue One = DAG.getConstant(1, ResTy);
+ SDValue Bit = DAG.getNode(ISD::SHL, DL, ResTy, One, Op->getOperand(2));
+
+ return DAG.getNode(ISD::AND, DL, ResTy, Op->getOperand(1),
+ DAG.getNOT(DL, Bit, ResTy));
+}
+
+static SDValue lowerMSABitClearImm(SDValue Op, SelectionDAG &DAG) {
+ SDLoc DL(Op);
+ EVT ResTy = Op->getValueType(0);
+ APInt BitImm = APInt(ResTy.getVectorElementType().getSizeInBits(), 1)
+ << cast<ConstantSDNode>(Op->getOperand(2))->getAPIntValue();
+ SDValue BitMask = DAG.getConstant(~BitImm, ResTy);
+
+ return DAG.getNode(ISD::AND, DL, ResTy, Op->getOperand(1), BitMask);
+}
+
SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+
switch (cast<ConstantSDNode>(Op->getOperand(0))->getZExtValue()) {
default:
return SDValue();
@@ -633,12 +1432,610 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
return lowerDSPIntr(Op, DAG, MipsISD::MSub);
case Intrinsic::mips_msubu:
return lowerDSPIntr(Op, DAG, MipsISD::MSubu);
+ case Intrinsic::mips_addv_b:
+ case Intrinsic::mips_addv_h:
+ case Intrinsic::mips_addv_w:
+ case Intrinsic::mips_addv_d:
+ return DAG.getNode(ISD::ADD, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_addvi_b:
+ case Intrinsic::mips_addvi_h:
+ case Intrinsic::mips_addvi_w:
+ case Intrinsic::mips_addvi_d:
+ return DAG.getNode(ISD::ADD, DL, Op->getValueType(0), Op->getOperand(1),
+ lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_and_v:
+ return DAG.getNode(ISD::AND, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_andi_b:
+ return DAG.getNode(ISD::AND, DL, Op->getValueType(0), Op->getOperand(1),
+ lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_bclr_b:
+ case Intrinsic::mips_bclr_h:
+ case Intrinsic::mips_bclr_w:
+ case Intrinsic::mips_bclr_d:
+ return lowerMSABitClear(Op, DAG);
+ case Intrinsic::mips_bclri_b:
+ case Intrinsic::mips_bclri_h:
+ case Intrinsic::mips_bclri_w:
+ case Intrinsic::mips_bclri_d:
+ return lowerMSABitClearImm(Op, DAG);
+ case Intrinsic::mips_binsli_b:
+ case Intrinsic::mips_binsli_h:
+ case Intrinsic::mips_binsli_w:
+ case Intrinsic::mips_binsli_d: {
+ EVT VecTy = Op->getValueType(0);
+ EVT EltTy = VecTy.getVectorElementType();
+ APInt Mask = APInt::getHighBitsSet(EltTy.getSizeInBits(),
+ Op->getConstantOperandVal(3));
+ return DAG.getNode(ISD::VSELECT, DL, VecTy,
+ DAG.getConstant(Mask, VecTy, true), Op->getOperand(1),
+ Op->getOperand(2));
+ }
+ case Intrinsic::mips_binsri_b:
+ case Intrinsic::mips_binsri_h:
+ case Intrinsic::mips_binsri_w:
+ case Intrinsic::mips_binsri_d: {
+ EVT VecTy = Op->getValueType(0);
+ EVT EltTy = VecTy.getVectorElementType();
+ APInt Mask = APInt::getLowBitsSet(EltTy.getSizeInBits(),
+ Op->getConstantOperandVal(3));
+ return DAG.getNode(ISD::VSELECT, DL, VecTy,
+ DAG.getConstant(Mask, VecTy, true), Op->getOperand(1),
+ Op->getOperand(2));
+ }
+ case Intrinsic::mips_bmnz_v:
+ return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0), Op->getOperand(3),
+ Op->getOperand(2), Op->getOperand(1));
+ case Intrinsic::mips_bmnzi_b:
+ return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0),
+ lowerMSASplatImm(Op, 3, DAG), Op->getOperand(2),
+ Op->getOperand(1));
+ case Intrinsic::mips_bmz_v:
+ return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0), Op->getOperand(3),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_bmzi_b:
+ return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0),
+ lowerMSASplatImm(Op, 3, DAG), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_bneg_b:
+ case Intrinsic::mips_bneg_h:
+ case Intrinsic::mips_bneg_w:
+ case Intrinsic::mips_bneg_d: {
+ EVT VecTy = Op->getValueType(0);
+ SDValue One = DAG.getConstant(1, VecTy);
+
+ return DAG.getNode(ISD::XOR, DL, VecTy, Op->getOperand(1),
+ DAG.getNode(ISD::SHL, DL, VecTy, One,
+ Op->getOperand(2)));
+ }
+ case Intrinsic::mips_bnegi_b:
+ case Intrinsic::mips_bnegi_h:
+ case Intrinsic::mips_bnegi_w:
+ case Intrinsic::mips_bnegi_d:
+ return lowerMSABinaryBitImmIntr(Op, DAG, ISD::XOR, Op->getOperand(2),
+ !Subtarget->isLittle());
+ case Intrinsic::mips_bnz_b:
+ case Intrinsic::mips_bnz_h:
+ case Intrinsic::mips_bnz_w:
+ case Intrinsic::mips_bnz_d:
+ return DAG.getNode(MipsISD::VALL_NONZERO, DL, Op->getValueType(0),
+ Op->getOperand(1));
+ case Intrinsic::mips_bnz_v:
+ return DAG.getNode(MipsISD::VANY_NONZERO, DL, Op->getValueType(0),
+ Op->getOperand(1));
+ case Intrinsic::mips_bsel_v:
+ return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2),
+ Op->getOperand(3));
+ case Intrinsic::mips_bseli_b:
+ return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2),
+ lowerMSASplatImm(Op, 3, DAG));
+ case Intrinsic::mips_bset_b:
+ case Intrinsic::mips_bset_h:
+ case Intrinsic::mips_bset_w:
+ case Intrinsic::mips_bset_d: {
+ EVT VecTy = Op->getValueType(0);
+ SDValue One = DAG.getConstant(1, VecTy);
+
+ return DAG.getNode(ISD::OR, DL, VecTy, Op->getOperand(1),
+ DAG.getNode(ISD::SHL, DL, VecTy, One,
+ Op->getOperand(2)));
+ }
+ case Intrinsic::mips_bseti_b:
+ case Intrinsic::mips_bseti_h:
+ case Intrinsic::mips_bseti_w:
+ case Intrinsic::mips_bseti_d:
+ return lowerMSABinaryBitImmIntr(Op, DAG, ISD::OR, Op->getOperand(2),
+ !Subtarget->isLittle());
+ case Intrinsic::mips_bz_b:
+ case Intrinsic::mips_bz_h:
+ case Intrinsic::mips_bz_w:
+ case Intrinsic::mips_bz_d:
+ return DAG.getNode(MipsISD::VALL_ZERO, DL, Op->getValueType(0),
+ Op->getOperand(1));
+ case Intrinsic::mips_bz_v:
+ return DAG.getNode(MipsISD::VANY_ZERO, DL, Op->getValueType(0),
+ Op->getOperand(1));
+ case Intrinsic::mips_ceq_b:
+ case Intrinsic::mips_ceq_h:
+ case Intrinsic::mips_ceq_w:
+ case Intrinsic::mips_ceq_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETEQ);
+ case Intrinsic::mips_ceqi_b:
+ case Intrinsic::mips_ceqi_h:
+ case Intrinsic::mips_ceqi_w:
+ case Intrinsic::mips_ceqi_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ lowerMSASplatImm(Op, 2, DAG), ISD::SETEQ);
+ case Intrinsic::mips_cle_s_b:
+ case Intrinsic::mips_cle_s_h:
+ case Intrinsic::mips_cle_s_w:
+ case Intrinsic::mips_cle_s_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETLE);
+ case Intrinsic::mips_clei_s_b:
+ case Intrinsic::mips_clei_s_h:
+ case Intrinsic::mips_clei_s_w:
+ case Intrinsic::mips_clei_s_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ lowerMSASplatImm(Op, 2, DAG), ISD::SETLE);
+ case Intrinsic::mips_cle_u_b:
+ case Intrinsic::mips_cle_u_h:
+ case Intrinsic::mips_cle_u_w:
+ case Intrinsic::mips_cle_u_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETULE);
+ case Intrinsic::mips_clei_u_b:
+ case Intrinsic::mips_clei_u_h:
+ case Intrinsic::mips_clei_u_w:
+ case Intrinsic::mips_clei_u_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ lowerMSASplatImm(Op, 2, DAG), ISD::SETULE);
+ case Intrinsic::mips_clt_s_b:
+ case Intrinsic::mips_clt_s_h:
+ case Intrinsic::mips_clt_s_w:
+ case Intrinsic::mips_clt_s_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETLT);
+ case Intrinsic::mips_clti_s_b:
+ case Intrinsic::mips_clti_s_h:
+ case Intrinsic::mips_clti_s_w:
+ case Intrinsic::mips_clti_s_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ lowerMSASplatImm(Op, 2, DAG), ISD::SETLT);
+ case Intrinsic::mips_clt_u_b:
+ case Intrinsic::mips_clt_u_h:
+ case Intrinsic::mips_clt_u_w:
+ case Intrinsic::mips_clt_u_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETULT);
+ case Intrinsic::mips_clti_u_b:
+ case Intrinsic::mips_clti_u_h:
+ case Intrinsic::mips_clti_u_w:
+ case Intrinsic::mips_clti_u_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ lowerMSASplatImm(Op, 2, DAG), ISD::SETULT);
+ case Intrinsic::mips_copy_s_b:
+ case Intrinsic::mips_copy_s_h:
+ case Intrinsic::mips_copy_s_w:
+ return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_SEXT_ELT);
+ case Intrinsic::mips_copy_s_d:
+ // Don't lower directly into VEXTRACT_SEXT_ELT since i64 might be illegal.
+ // Instead lower to the generic EXTRACT_VECTOR_ELT node and let the type
+ // legalizer and EXTRACT_VECTOR_ELT lowering sort it out.
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op), Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_copy_u_b:
+ case Intrinsic::mips_copy_u_h:
+ case Intrinsic::mips_copy_u_w:
+ return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_ZEXT_ELT);
+ case Intrinsic::mips_copy_u_d:
+ // Don't lower directly into VEXTRACT_ZEXT_ELT since i64 might be illegal.
+ // Instead lower to the generic EXTRACT_VECTOR_ELT node and let the type
+ // legalizer and EXTRACT_VECTOR_ELT lowering sort it out.
+ //
+ // Note: When i64 is illegal, this results in copy_s.w instructions instead
+ // of copy_u.w instructions. This makes no difference to the behaviour
+ // since i64 is only illegal when the register file is 32-bit.
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op), Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_div_s_b:
+ case Intrinsic::mips_div_s_h:
+ case Intrinsic::mips_div_s_w:
+ case Intrinsic::mips_div_s_d:
+ return DAG.getNode(ISD::SDIV, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_div_u_b:
+ case Intrinsic::mips_div_u_h:
+ case Intrinsic::mips_div_u_w:
+ case Intrinsic::mips_div_u_d:
+ return DAG.getNode(ISD::UDIV, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_fadd_w:
+ case Intrinsic::mips_fadd_d:
+ return DAG.getNode(ISD::FADD, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ // Don't lower mips_fcaf_[wd] since LLVM folds SETFALSE condcodes away
+ case Intrinsic::mips_fceq_w:
+ case Intrinsic::mips_fceq_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETOEQ);
+ case Intrinsic::mips_fcle_w:
+ case Intrinsic::mips_fcle_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETOLE);
+ case Intrinsic::mips_fclt_w:
+ case Intrinsic::mips_fclt_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETOLT);
+ case Intrinsic::mips_fcne_w:
+ case Intrinsic::mips_fcne_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETONE);
+ case Intrinsic::mips_fcor_w:
+ case Intrinsic::mips_fcor_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETO);
+ case Intrinsic::mips_fcueq_w:
+ case Intrinsic::mips_fcueq_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETUEQ);
+ case Intrinsic::mips_fcule_w:
+ case Intrinsic::mips_fcule_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETULE);
+ case Intrinsic::mips_fcult_w:
+ case Intrinsic::mips_fcult_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETULT);
+ case Intrinsic::mips_fcun_w:
+ case Intrinsic::mips_fcun_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETUO);
+ case Intrinsic::mips_fcune_w:
+ case Intrinsic::mips_fcune_d:
+ return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2), ISD::SETUNE);
+ case Intrinsic::mips_fdiv_w:
+ case Intrinsic::mips_fdiv_d:
+ return DAG.getNode(ISD::FDIV, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_ffint_u_w:
+ case Intrinsic::mips_ffint_u_d:
+ return DAG.getNode(ISD::UINT_TO_FP, DL, Op->getValueType(0),
+ Op->getOperand(1));
+ case Intrinsic::mips_ffint_s_w:
+ case Intrinsic::mips_ffint_s_d:
+ return DAG.getNode(ISD::SINT_TO_FP, DL, Op->getValueType(0),
+ Op->getOperand(1));
+ case Intrinsic::mips_fill_b:
+ case Intrinsic::mips_fill_h:
+ case Intrinsic::mips_fill_w:
+ case Intrinsic::mips_fill_d: {
+ SmallVector<SDValue, 16> Ops;
+ EVT ResTy = Op->getValueType(0);
+
+ for (unsigned i = 0; i < ResTy.getVectorNumElements(); ++i)
+ Ops.push_back(Op->getOperand(1));
+
+ // If ResTy is v2i64 then the type legalizer will break this node down into
+ // an equivalent v4i32.
+ return DAG.getNode(ISD::BUILD_VECTOR, DL, ResTy, &Ops[0], Ops.size());
+ }
+ case Intrinsic::mips_fexp2_w:
+ case Intrinsic::mips_fexp2_d: {
+ EVT ResTy = Op->getValueType(0);
+ return DAG.getNode(
+ ISD::FMUL, SDLoc(Op), ResTy, Op->getOperand(1),
+ DAG.getNode(ISD::FEXP2, SDLoc(Op), ResTy, Op->getOperand(2)));
+ }
+ case Intrinsic::mips_flog2_w:
+ case Intrinsic::mips_flog2_d:
+ return DAG.getNode(ISD::FLOG2, DL, Op->getValueType(0), Op->getOperand(1));
+ case Intrinsic::mips_fmadd_w:
+ case Intrinsic::mips_fmadd_d:
+ return DAG.getNode(ISD::FMA, SDLoc(Op), Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2), Op->getOperand(3));
+ case Intrinsic::mips_fmul_w:
+ case Intrinsic::mips_fmul_d:
+ return DAG.getNode(ISD::FMUL, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_fmsub_w:
+ case Intrinsic::mips_fmsub_d: {
+ EVT ResTy = Op->getValueType(0);
+ return DAG.getNode(ISD::FSUB, SDLoc(Op), ResTy, Op->getOperand(1),
+ DAG.getNode(ISD::FMUL, SDLoc(Op), ResTy,
+ Op->getOperand(2), Op->getOperand(3)));
+ }
+ case Intrinsic::mips_frint_w:
+ case Intrinsic::mips_frint_d:
+ return DAG.getNode(ISD::FRINT, DL, Op->getValueType(0), Op->getOperand(1));
+ case Intrinsic::mips_fsqrt_w:
+ case Intrinsic::mips_fsqrt_d:
+ return DAG.getNode(ISD::FSQRT, DL, Op->getValueType(0), Op->getOperand(1));
+ case Intrinsic::mips_fsub_w:
+ case Intrinsic::mips_fsub_d:
+ return DAG.getNode(ISD::FSUB, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_ftrunc_u_w:
+ case Intrinsic::mips_ftrunc_u_d:
+ return DAG.getNode(ISD::FP_TO_UINT, DL, Op->getValueType(0),
+ Op->getOperand(1));
+ case Intrinsic::mips_ftrunc_s_w:
+ case Intrinsic::mips_ftrunc_s_d:
+ return DAG.getNode(ISD::FP_TO_SINT, DL, Op->getValueType(0),
+ Op->getOperand(1));
+ case Intrinsic::mips_ilvev_b:
+ case Intrinsic::mips_ilvev_h:
+ case Intrinsic::mips_ilvev_w:
+ case Intrinsic::mips_ilvev_d:
+ return DAG.getNode(MipsISD::ILVEV, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_ilvl_b:
+ case Intrinsic::mips_ilvl_h:
+ case Intrinsic::mips_ilvl_w:
+ case Intrinsic::mips_ilvl_d:
+ return DAG.getNode(MipsISD::ILVL, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_ilvod_b:
+ case Intrinsic::mips_ilvod_h:
+ case Intrinsic::mips_ilvod_w:
+ case Intrinsic::mips_ilvod_d:
+ return DAG.getNode(MipsISD::ILVOD, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_ilvr_b:
+ case Intrinsic::mips_ilvr_h:
+ case Intrinsic::mips_ilvr_w:
+ case Intrinsic::mips_ilvr_d:
+ return DAG.getNode(MipsISD::ILVR, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_insert_b:
+ case Intrinsic::mips_insert_h:
+ case Intrinsic::mips_insert_w:
+ case Intrinsic::mips_insert_d:
+ return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(3), Op->getOperand(2));
+ case Intrinsic::mips_ldi_b:
+ case Intrinsic::mips_ldi_h:
+ case Intrinsic::mips_ldi_w:
+ case Intrinsic::mips_ldi_d:
+ return lowerMSASplatImm(Op, 1, DAG);
+ case Intrinsic::mips_lsa: {
+ EVT ResTy = Op->getValueType(0);
+ return DAG.getNode(ISD::ADD, SDLoc(Op), ResTy, Op->getOperand(1),
+ DAG.getNode(ISD::SHL, SDLoc(Op), ResTy,
+ Op->getOperand(2), Op->getOperand(3)));
+ }
+ case Intrinsic::mips_maddv_b:
+ case Intrinsic::mips_maddv_h:
+ case Intrinsic::mips_maddv_w:
+ case Intrinsic::mips_maddv_d: {
+ EVT ResTy = Op->getValueType(0);
+ return DAG.getNode(ISD::ADD, SDLoc(Op), ResTy, Op->getOperand(1),
+ DAG.getNode(ISD::MUL, SDLoc(Op), ResTy,
+ Op->getOperand(2), Op->getOperand(3)));
+ }
+ case Intrinsic::mips_max_s_b:
+ case Intrinsic::mips_max_s_h:
+ case Intrinsic::mips_max_s_w:
+ case Intrinsic::mips_max_s_d:
+ return DAG.getNode(MipsISD::VSMAX, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_max_u_b:
+ case Intrinsic::mips_max_u_h:
+ case Intrinsic::mips_max_u_w:
+ case Intrinsic::mips_max_u_d:
+ return DAG.getNode(MipsISD::VUMAX, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_maxi_s_b:
+ case Intrinsic::mips_maxi_s_h:
+ case Intrinsic::mips_maxi_s_w:
+ case Intrinsic::mips_maxi_s_d:
+ return DAG.getNode(MipsISD::VSMAX, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_maxi_u_b:
+ case Intrinsic::mips_maxi_u_h:
+ case Intrinsic::mips_maxi_u_w:
+ case Intrinsic::mips_maxi_u_d:
+ return DAG.getNode(MipsISD::VUMAX, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_min_s_b:
+ case Intrinsic::mips_min_s_h:
+ case Intrinsic::mips_min_s_w:
+ case Intrinsic::mips_min_s_d:
+ return DAG.getNode(MipsISD::VSMIN, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_min_u_b:
+ case Intrinsic::mips_min_u_h:
+ case Intrinsic::mips_min_u_w:
+ case Intrinsic::mips_min_u_d:
+ return DAG.getNode(MipsISD::VUMIN, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_mini_s_b:
+ case Intrinsic::mips_mini_s_h:
+ case Intrinsic::mips_mini_s_w:
+ case Intrinsic::mips_mini_s_d:
+ return DAG.getNode(MipsISD::VSMIN, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_mini_u_b:
+ case Intrinsic::mips_mini_u_h:
+ case Intrinsic::mips_mini_u_w:
+ case Intrinsic::mips_mini_u_d:
+ return DAG.getNode(MipsISD::VUMIN, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_mod_s_b:
+ case Intrinsic::mips_mod_s_h:
+ case Intrinsic::mips_mod_s_w:
+ case Intrinsic::mips_mod_s_d:
+ return DAG.getNode(ISD::SREM, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_mod_u_b:
+ case Intrinsic::mips_mod_u_h:
+ case Intrinsic::mips_mod_u_w:
+ case Intrinsic::mips_mod_u_d:
+ return DAG.getNode(ISD::UREM, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_mulv_b:
+ case Intrinsic::mips_mulv_h:
+ case Intrinsic::mips_mulv_w:
+ case Intrinsic::mips_mulv_d:
+ return DAG.getNode(ISD::MUL, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_msubv_b:
+ case Intrinsic::mips_msubv_h:
+ case Intrinsic::mips_msubv_w:
+ case Intrinsic::mips_msubv_d: {
+ EVT ResTy = Op->getValueType(0);
+ return DAG.getNode(ISD::SUB, SDLoc(Op), ResTy, Op->getOperand(1),
+ DAG.getNode(ISD::MUL, SDLoc(Op), ResTy,
+ Op->getOperand(2), Op->getOperand(3)));
+ }
+ case Intrinsic::mips_nlzc_b:
+ case Intrinsic::mips_nlzc_h:
+ case Intrinsic::mips_nlzc_w:
+ case Intrinsic::mips_nlzc_d:
+ return DAG.getNode(ISD::CTLZ, DL, Op->getValueType(0), Op->getOperand(1));
+ case Intrinsic::mips_nor_v: {
+ SDValue Res = DAG.getNode(ISD::OR, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ return DAG.getNOT(DL, Res, Res->getValueType(0));
+ }
+ case Intrinsic::mips_nori_b: {
+ SDValue Res = DAG.getNode(ISD::OR, DL, Op->getValueType(0),
+ Op->getOperand(1),
+ lowerMSASplatImm(Op, 2, DAG));
+ return DAG.getNOT(DL, Res, Res->getValueType(0));
+ }
+ case Intrinsic::mips_or_v:
+ return DAG.getNode(ISD::OR, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_ori_b:
+ return DAG.getNode(ISD::OR, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_pckev_b:
+ case Intrinsic::mips_pckev_h:
+ case Intrinsic::mips_pckev_w:
+ case Intrinsic::mips_pckev_d:
+ return DAG.getNode(MipsISD::PCKEV, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_pckod_b:
+ case Intrinsic::mips_pckod_h:
+ case Intrinsic::mips_pckod_w:
+ case Intrinsic::mips_pckod_d:
+ return DAG.getNode(MipsISD::PCKOD, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2));
+ case Intrinsic::mips_pcnt_b:
+ case Intrinsic::mips_pcnt_h:
+ case Intrinsic::mips_pcnt_w:
+ case Intrinsic::mips_pcnt_d:
+ return DAG.getNode(ISD::CTPOP, DL, Op->getValueType(0), Op->getOperand(1));
+ case Intrinsic::mips_shf_b:
+ case Intrinsic::mips_shf_h:
+ case Intrinsic::mips_shf_w:
+ return DAG.getNode(MipsISD::SHF, DL, Op->getValueType(0),
+ Op->getOperand(2), Op->getOperand(1));
+ case Intrinsic::mips_sll_b:
+ case Intrinsic::mips_sll_h:
+ case Intrinsic::mips_sll_w:
+ case Intrinsic::mips_sll_d:
+ return DAG.getNode(ISD::SHL, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_slli_b:
+ case Intrinsic::mips_slli_h:
+ case Intrinsic::mips_slli_w:
+ case Intrinsic::mips_slli_d:
+ return DAG.getNode(ISD::SHL, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_splat_b:
+ case Intrinsic::mips_splat_h:
+ case Intrinsic::mips_splat_w:
+ case Intrinsic::mips_splat_d:
+ // We can't lower via VECTOR_SHUFFLE because it requires constant shuffle
+ // masks, nor can we lower via BUILD_VECTOR & EXTRACT_VECTOR_ELT because
+ // EXTRACT_VECTOR_ELT can't extract i64's on MIPS32.
+ // Instead we lower to MipsISD::VSHF and match from there.
+ return DAG.getNode(MipsISD::VSHF, DL, Op->getValueType(0),
+ lowerMSASplatZExt(Op, 2, DAG), Op->getOperand(1),
+ Op->getOperand(1));
+ case Intrinsic::mips_splati_b:
+ case Intrinsic::mips_splati_h:
+ case Intrinsic::mips_splati_w:
+ case Intrinsic::mips_splati_d:
+ return DAG.getNode(MipsISD::VSHF, DL, Op->getValueType(0),
+ lowerMSASplatImm(Op, 2, DAG), Op->getOperand(1),
+ Op->getOperand(1));
+ case Intrinsic::mips_sra_b:
+ case Intrinsic::mips_sra_h:
+ case Intrinsic::mips_sra_w:
+ case Intrinsic::mips_sra_d:
+ return DAG.getNode(ISD::SRA, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_srai_b:
+ case Intrinsic::mips_srai_h:
+ case Intrinsic::mips_srai_w:
+ case Intrinsic::mips_srai_d:
+ return DAG.getNode(ISD::SRA, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_srl_b:
+ case Intrinsic::mips_srl_h:
+ case Intrinsic::mips_srl_w:
+ case Intrinsic::mips_srl_d:
+ return DAG.getNode(ISD::SRL, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_srli_b:
+ case Intrinsic::mips_srli_h:
+ case Intrinsic::mips_srli_w:
+ case Intrinsic::mips_srli_d:
+ return DAG.getNode(ISD::SRL, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_subv_b:
+ case Intrinsic::mips_subv_h:
+ case Intrinsic::mips_subv_w:
+ case Intrinsic::mips_subv_d:
+ return DAG.getNode(ISD::SUB, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_subvi_b:
+ case Intrinsic::mips_subvi_h:
+ case Intrinsic::mips_subvi_w:
+ case Intrinsic::mips_subvi_d:
+ return DAG.getNode(ISD::SUB, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
+ case Intrinsic::mips_vshf_b:
+ case Intrinsic::mips_vshf_h:
+ case Intrinsic::mips_vshf_w:
+ case Intrinsic::mips_vshf_d:
+ return DAG.getNode(MipsISD::VSHF, DL, Op->getValueType(0),
+ Op->getOperand(1), Op->getOperand(2), Op->getOperand(3));
+ case Intrinsic::mips_xor_v:
+ return DAG.getNode(ISD::XOR, DL, Op->getValueType(0), Op->getOperand(1),
+ Op->getOperand(2));
+ case Intrinsic::mips_xori_b:
+ return DAG.getNode(ISD::XOR, DL, Op->getValueType(0),
+ Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
}
}
+static SDValue lowerMSALoadIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr) {
+ SDLoc DL(Op);
+ SDValue ChainIn = Op->getOperand(0);
+ SDValue Address = Op->getOperand(2);
+ SDValue Offset = Op->getOperand(3);
+ EVT ResTy = Op->getValueType(0);
+ EVT PtrTy = Address->getValueType(0);
+
+ Address = DAG.getNode(ISD::ADD, DL, PtrTy, Address, Offset);
+
+ return DAG.getLoad(ResTy, DL, ChainIn, Address, MachinePointerInfo(), false,
+ false, false, 16);
+}
+
SDValue MipsSETargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
- switch (cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue()) {
+ unsigned Intr = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue();
+ switch (Intr) {
default:
return SDValue();
case Intrinsic::mips_extp:
@@ -681,9 +2078,524 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_S_W_PH);
case Intrinsic::mips_dpsqx_sa_w_ph:
return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_SA_W_PH);
+ case Intrinsic::mips_ld_b:
+ case Intrinsic::mips_ld_h:
+ case Intrinsic::mips_ld_w:
+ case Intrinsic::mips_ld_d:
+ return lowerMSALoadIntr(Op, DAG, Intr);
+ }
+}
+
+static SDValue lowerMSAStoreIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr) {
+ SDLoc DL(Op);
+ SDValue ChainIn = Op->getOperand(0);
+ SDValue Value = Op->getOperand(2);
+ SDValue Address = Op->getOperand(3);
+ SDValue Offset = Op->getOperand(4);
+ EVT PtrTy = Address->getValueType(0);
+
+ Address = DAG.getNode(ISD::ADD, DL, PtrTy, Address, Offset);
+
+ return DAG.getStore(ChainIn, DL, Value, Address, MachinePointerInfo(), false,
+ false, 16);
+}
+
+SDValue MipsSETargetLowering::lowerINTRINSIC_VOID(SDValue Op,
+ SelectionDAG &DAG) const {
+ unsigned Intr = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue();
+ switch (Intr) {
+ default:
+ return SDValue();
+ case Intrinsic::mips_st_b:
+ case Intrinsic::mips_st_h:
+ case Intrinsic::mips_st_w:
+ case Intrinsic::mips_st_d:
+ return lowerMSAStoreIntr(Op, DAG, Intr);
}
}
+/// \brief Check if the given BuildVectorSDNode is a splat.
+/// This method currently relies on DAG nodes being reused when equivalent,
+/// so it's possible for this to return false even when isConstantSplat returns
+/// true.
+static bool isSplatVector(const BuildVectorSDNode *N) {
+ unsigned int nOps = N->getNumOperands();
+ assert(nOps > 1 && "isSplatVector has 0 or 1 sized build vector");
+
+ SDValue Operand0 = N->getOperand(0);
+
+ for (unsigned int i = 1; i < nOps; ++i) {
+ if (N->getOperand(i) != Operand0)
+ return false;
+ }
+
+ return true;
+}
+
+// Lower ISD::EXTRACT_VECTOR_ELT into MipsISD::VEXTRACT_SEXT_ELT.
+//
+// The non-value bits resulting from ISD::EXTRACT_VECTOR_ELT are undefined. We
+// choose to sign-extend but we could have equally chosen zero-extend. The
+// DAGCombiner will fold any sign/zero extension of the ISD::EXTRACT_VECTOR_ELT
+// result into this node later (possibly changing it to a zero-extend in the
+// process).
+SDValue MipsSETargetLowering::
+lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT ResTy = Op->getValueType(0);
+ SDValue Op0 = Op->getOperand(0);
+ EVT VecTy = Op0->getValueType(0);
+
+ if (!VecTy.is128BitVector())
+ return SDValue();
+
+ if (ResTy.isInteger()) {
+ SDValue Op1 = Op->getOperand(1);
+ EVT EltTy = VecTy.getVectorElementType();
+ return DAG.getNode(MipsISD::VEXTRACT_SEXT_ELT, DL, ResTy, Op0, Op1,
+ DAG.getValueType(EltTy));
+ }
+
+ return Op;
+}
+
+static bool isConstantOrUndef(const SDValue Op) {
+ if (Op->getOpcode() == ISD::UNDEF)
+ return true;
+ if (dyn_cast<ConstantSDNode>(Op))
+ return true;
+ if (dyn_cast<ConstantFPSDNode>(Op))
+ return true;
+ return false;
+}
+
+static bool isConstantOrUndefBUILD_VECTOR(const BuildVectorSDNode *Op) {
+ for (unsigned i = 0; i < Op->getNumOperands(); ++i)
+ if (isConstantOrUndef(Op->getOperand(i)))
+ return true;
+ return false;
+}
+
+// Lowers ISD::BUILD_VECTOR into appropriate SelectionDAG nodes for the
+// backend.
+//
+// Lowers according to the following rules:
+// - Constant splats are legal as-is as long as the SplatBitSize is a power of
+// 2 less than or equal to 64 and the value fits into a signed 10-bit
+// immediate
+// - Constant splats are lowered to bitconverted BUILD_VECTORs if SplatBitSize
+// is a power of 2 less than or equal to 64 and the value does not fit into a
+// signed 10-bit immediate
+// - Non-constant splats are legal as-is.
+// - Non-constant non-splats are lowered to sequences of INSERT_VECTOR_ELT.
+// - All others are illegal and must be expanded.
+SDValue MipsSETargetLowering::lowerBUILD_VECTOR(SDValue Op,
+ SelectionDAG &DAG) const {
+ BuildVectorSDNode *Node = cast<BuildVectorSDNode>(Op);
+ EVT ResTy = Op->getValueType(0);
+ SDLoc DL(Op);
+ APInt SplatValue, SplatUndef;
+ unsigned SplatBitSize;
+ bool HasAnyUndefs;
+
+ if (!Subtarget->hasMSA() || !ResTy.is128BitVector())
+ return SDValue();
+
+ if (Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
+ HasAnyUndefs, 8,
+ !Subtarget->isLittle()) && SplatBitSize <= 64) {
+ // We can only cope with 8, 16, 32, or 64-bit elements
+ if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 &&
+ SplatBitSize != 64)
+ return SDValue();
+
+ // If the value fits into a simm10 then we can use ldi.[bhwd]
+ // However, if it isn't an integer type we will have to bitcast from an
+ // integer type first. Also, if there are any undefs, we must lower them
+ // to defined values first.
+ if (ResTy.isInteger() && !HasAnyUndefs && SplatValue.isSignedIntN(10))
+ return Op;
+
+ EVT ViaVecTy;
+
+ switch (SplatBitSize) {
+ default:
+ return SDValue();
+ case 8:
+ ViaVecTy = MVT::v16i8;
+ break;
+ case 16:
+ ViaVecTy = MVT::v8i16;
+ break;
+ case 32:
+ ViaVecTy = MVT::v4i32;
+ break;
+ case 64:
+ // There's no fill.d to fall back on for 64-bit values
+ return SDValue();
+ }
+
+ // SelectionDAG::getConstant will promote SplatValue appropriately.
+ SDValue Result = DAG.getConstant(SplatValue, ViaVecTy);
+
+ // Bitcast to the type we originally wanted
+ if (ViaVecTy != ResTy)
+ Result = DAG.getNode(ISD::BITCAST, SDLoc(Node), ResTy, Result);
+
+ return Result;
+ } else if (isSplatVector(Node))
+ return Op;
+ else if (!isConstantOrUndefBUILD_VECTOR(Node)) {
+ // Use INSERT_VECTOR_ELT operations rather than expand to stores.
+ // The resulting code is the same length as the expansion, but it doesn't
+ // use memory operations
+ EVT ResTy = Node->getValueType(0);
+
+ assert(ResTy.isVector());
+
+ unsigned NumElts = ResTy.getVectorNumElements();
+ SDValue Vector = DAG.getUNDEF(ResTy);
+ for (unsigned i = 0; i < NumElts; ++i) {
+ Vector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ResTy, Vector,
+ Node->getOperand(i),
+ DAG.getConstant(i, MVT::i32));
+ }
+ return Vector;
+ }
+
+ return SDValue();
+}
+
+// Lower VECTOR_SHUFFLE into SHF (if possible).
+//
+// SHF splits the vector into blocks of four elements, then shuffles these
+// elements according to a <4 x i2> constant (encoded as an integer immediate).
+//
+// It is therefore possible to lower into SHF when the mask takes the form:
+// <a, b, c, d, a+4, b+4, c+4, d+4, a+8, b+8, c+8, d+8, ...>
+// When undef's appear they are treated as if they were whatever value is
+// necessary in order to fit the above form.
+//
+// For example:
+// %2 = shufflevector <8 x i16> %0, <8 x i16> undef,
+// <8 x i32> <i32 3, i32 2, i32 1, i32 0,
+// i32 7, i32 6, i32 5, i32 4>
+// is lowered to:
+// (SHF_H $w0, $w1, 27)
+// where the 27 comes from:
+// 3 + (2 << 2) + (1 << 4) + (0 << 6)
+static SDValue lowerVECTOR_SHUFFLE_SHF(SDValue Op, EVT ResTy,
+ SmallVector<int, 16> Indices,
+ SelectionDAG &DAG) {
+ int SHFIndices[4] = { -1, -1, -1, -1 };
+
+ if (Indices.size() < 4)
+ return SDValue();
+
+ for (unsigned i = 0; i < 4; ++i) {
+ for (unsigned j = i; j < Indices.size(); j += 4) {
+ int Idx = Indices[j];
+
+ // Convert from vector index to 4-element subvector index
+ // If an index refers to an element outside of the subvector then give up
+ if (Idx != -1) {
+ Idx -= 4 * (j / 4);
+ if (Idx < 0 || Idx >= 4)
+ return SDValue();
+ }
+
+ // If the mask has an undef, replace it with the current index.
+ // Note that it might still be undef if the current index is also undef
+ if (SHFIndices[i] == -1)
+ SHFIndices[i] = Idx;
+
+ // Check that non-undef values are the same as in the mask. If they
+ // aren't then give up
+ if (!(Idx == -1 || Idx == SHFIndices[i]))
+ return SDValue();
+ }
+ }
+
+ // Calculate the immediate. Replace any remaining undefs with zero
+ APInt Imm(32, 0);
+ for (int i = 3; i >= 0; --i) {
+ int Idx = SHFIndices[i];
+
+ if (Idx == -1)
+ Idx = 0;
+
+ Imm <<= 2;
+ Imm |= Idx & 0x3;
+ }
+
+ return DAG.getNode(MipsISD::SHF, SDLoc(Op), ResTy,
+ DAG.getConstant(Imm, MVT::i32), Op->getOperand(0));
+}
+
+// Lower VECTOR_SHUFFLE into ILVEV (if possible).
+//
+// ILVEV interleaves the even elements from each vector.
+//
+// It is possible to lower into ILVEV when the mask takes the form:
+// <0, n, 2, n+2, 4, n+4, ...>
+// where n is the number of elements in the vector.
+//
+// When undef's appear in the mask they are treated as if they were whatever
+// value is necessary in order to fit the above form.
+static SDValue lowerVECTOR_SHUFFLE_ILVEV(SDValue Op, EVT ResTy,
+ SmallVector<int, 16> Indices,
+ SelectionDAG &DAG) {
+ assert ((Indices.size() % 2) == 0);
+ int WsIdx = 0;
+ int WtIdx = ResTy.getVectorNumElements();
+
+ for (unsigned i = 0; i < Indices.size(); i += 2) {
+ if (Indices[i] != -1 && Indices[i] != WsIdx)
+ return SDValue();
+ if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
+ return SDValue();
+ WsIdx += 2;
+ WtIdx += 2;
+ }
+
+ return DAG.getNode(MipsISD::ILVEV, SDLoc(Op), ResTy, Op->getOperand(0),
+ Op->getOperand(1));
+}
+
+// Lower VECTOR_SHUFFLE into ILVOD (if possible).
+//
+// ILVOD interleaves the odd elements from each vector.
+//
+// It is possible to lower into ILVOD when the mask takes the form:
+// <1, n+1, 3, n+3, 5, n+5, ...>
+// where n is the number of elements in the vector.
+//
+// When undef's appear in the mask they are treated as if they were whatever
+// value is necessary in order to fit the above form.
+static SDValue lowerVECTOR_SHUFFLE_ILVOD(SDValue Op, EVT ResTy,
+ SmallVector<int, 16> Indices,
+ SelectionDAG &DAG) {
+ assert ((Indices.size() % 2) == 0);
+ int WsIdx = 1;
+ int WtIdx = ResTy.getVectorNumElements() + 1;
+
+ for (unsigned i = 0; i < Indices.size(); i += 2) {
+ if (Indices[i] != -1 && Indices[i] != WsIdx)
+ return SDValue();
+ if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
+ return SDValue();
+ WsIdx += 2;
+ WtIdx += 2;
+ }
+
+ return DAG.getNode(MipsISD::ILVOD, SDLoc(Op), ResTy, Op->getOperand(0),
+ Op->getOperand(1));
+}
+
+// Lower VECTOR_SHUFFLE into ILVL (if possible).
+//
+// ILVL interleaves consecutive elements from the left half of each vector.
+//
+// It is possible to lower into ILVL when the mask takes the form:
+// <0, n, 1, n+1, 2, n+2, ...>
+// where n is the number of elements in the vector.
+//
+// When undef's appear in the mask they are treated as if they were whatever
+// value is necessary in order to fit the above form.
+static SDValue lowerVECTOR_SHUFFLE_ILVL(SDValue Op, EVT ResTy,
+ SmallVector<int, 16> Indices,
+ SelectionDAG &DAG) {
+ assert ((Indices.size() % 2) == 0);
+ int WsIdx = 0;
+ int WtIdx = ResTy.getVectorNumElements();
+
+ for (unsigned i = 0; i < Indices.size(); i += 2) {
+ if (Indices[i] != -1 && Indices[i] != WsIdx)
+ return SDValue();
+ if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
+ return SDValue();
+ WsIdx ++;
+ WtIdx ++;
+ }
+
+ return DAG.getNode(MipsISD::ILVL, SDLoc(Op), ResTy, Op->getOperand(0),
+ Op->getOperand(1));
+}
+
+// Lower VECTOR_SHUFFLE into ILVR (if possible).
+//
+// ILVR interleaves consecutive elements from the right half of each vector.
+//
+// It is possible to lower into ILVR when the mask takes the form:
+// <x, n+x, x+1, n+x+1, x+2, n+x+2, ...>
+// where n is the number of elements in the vector and x is half n.
+//
+// When undef's appear in the mask they are treated as if they were whatever
+// value is necessary in order to fit the above form.
+static SDValue lowerVECTOR_SHUFFLE_ILVR(SDValue Op, EVT ResTy,
+ SmallVector<int, 16> Indices,
+ SelectionDAG &DAG) {
+ assert ((Indices.size() % 2) == 0);
+ unsigned NumElts = ResTy.getVectorNumElements();
+ int WsIdx = NumElts / 2;
+ int WtIdx = NumElts + NumElts / 2;
+
+ for (unsigned i = 0; i < Indices.size(); i += 2) {
+ if (Indices[i] != -1 && Indices[i] != WsIdx)
+ return SDValue();
+ if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
+ return SDValue();
+ WsIdx ++;
+ WtIdx ++;
+ }
+
+ return DAG.getNode(MipsISD::ILVR, SDLoc(Op), ResTy, Op->getOperand(0),
+ Op->getOperand(1));
+}
+
+// Lower VECTOR_SHUFFLE into PCKEV (if possible).
+//
+// PCKEV copies the even elements of each vector into the result vector.
+//
+// It is possible to lower into PCKEV when the mask takes the form:
+// <0, 2, 4, ..., n, n+2, n+4, ...>
+// where n is the number of elements in the vector.
+//
+// When undef's appear in the mask they are treated as if they were whatever
+// value is necessary in order to fit the above form.
+static SDValue lowerVECTOR_SHUFFLE_PCKEV(SDValue Op, EVT ResTy,
+ SmallVector<int, 16> Indices,
+ SelectionDAG &DAG) {
+ assert ((Indices.size() % 2) == 0);
+ int Idx = 0;
+
+ for (unsigned i = 0; i < Indices.size(); ++i) {
+ if (Indices[i] != -1 && Indices[i] != Idx)
+ return SDValue();
+ Idx += 2;
+ }
+
+ return DAG.getNode(MipsISD::PCKEV, SDLoc(Op), ResTy, Op->getOperand(0),
+ Op->getOperand(1));
+}
+
+// Lower VECTOR_SHUFFLE into PCKOD (if possible).
+//
+// PCKOD copies the odd elements of each vector into the result vector.
+//
+// It is possible to lower into PCKOD when the mask takes the form:
+// <1, 3, 5, ..., n+1, n+3, n+5, ...>
+// where n is the number of elements in the vector.
+//
+// When undef's appear in the mask they are treated as if they were whatever
+// value is necessary in order to fit the above form.
+static SDValue lowerVECTOR_SHUFFLE_PCKOD(SDValue Op, EVT ResTy,
+ SmallVector<int, 16> Indices,
+ SelectionDAG &DAG) {
+ assert ((Indices.size() % 2) == 0);
+ int Idx = 1;
+
+ for (unsigned i = 0; i < Indices.size(); ++i) {
+ if (Indices[i] != -1 && Indices[i] != Idx)
+ return SDValue();
+ Idx += 2;
+ }
+
+ return DAG.getNode(MipsISD::PCKOD, SDLoc(Op), ResTy, Op->getOperand(0),
+ Op->getOperand(1));
+}
+
+// Lower VECTOR_SHUFFLE into VSHF.
+//
+// This mostly consists of converting the shuffle indices in Indices into a
+// BUILD_VECTOR and adding it as an operand to the resulting VSHF. There is
+// also code to eliminate unused operands of the VECTOR_SHUFFLE. For example,
+// if the type is v8i16 and all the indices are less than 8 then the second
+// operand is unused and can be replaced with anything. We choose to replace it
+// with the used operand since this reduces the number of instructions overall.
+static SDValue lowerVECTOR_SHUFFLE_VSHF(SDValue Op, EVT ResTy,
+ SmallVector<int, 16> Indices,
+ SelectionDAG &DAG) {
+ SmallVector<SDValue, 16> Ops;
+ SDValue Op0;
+ SDValue Op1;
+ EVT MaskVecTy = ResTy.changeVectorElementTypeToInteger();
+ EVT MaskEltTy = MaskVecTy.getVectorElementType();
+ bool Using1stVec = false;
+ bool Using2ndVec = false;
+ SDLoc DL(Op);
+ int ResTyNumElts = ResTy.getVectorNumElements();
+
+ for (int i = 0; i < ResTyNumElts; ++i) {
+ // Idx == -1 means UNDEF
+ int Idx = Indices[i];
+
+ if (0 <= Idx && Idx < ResTyNumElts)
+ Using1stVec = true;
+ if (ResTyNumElts <= Idx && Idx < ResTyNumElts * 2)
+ Using2ndVec = true;
+ }
+
+ for (SmallVector<int, 16>::iterator I = Indices.begin(); I != Indices.end();
+ ++I)
+ Ops.push_back(DAG.getTargetConstant(*I, MaskEltTy));
+
+ SDValue MaskVec = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskVecTy, &Ops[0],
+ Ops.size());
+
+ if (Using1stVec && Using2ndVec) {
+ Op0 = Op->getOperand(0);
+ Op1 = Op->getOperand(1);
+ } else if (Using1stVec)
+ Op0 = Op1 = Op->getOperand(0);
+ else if (Using2ndVec)
+ Op0 = Op1 = Op->getOperand(1);
+ else
+ llvm_unreachable("shuffle vector mask references neither vector operand?");
+
+ return DAG.getNode(MipsISD::VSHF, DL, ResTy, MaskVec, Op0, Op1);
+}
+
+// Lower VECTOR_SHUFFLE into one of a number of instructions depending on the
+// indices in the shuffle.
+SDValue MipsSETargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
+ SelectionDAG &DAG) const {
+ ShuffleVectorSDNode *Node = cast<ShuffleVectorSDNode>(Op);
+ EVT ResTy = Op->getValueType(0);
+
+ if (!ResTy.is128BitVector())
+ return SDValue();
+
+ int ResTyNumElts = ResTy.getVectorNumElements();
+ SmallVector<int, 16> Indices;
+
+ for (int i = 0; i < ResTyNumElts; ++i)
+ Indices.push_back(Node->getMaskElt(i));
+
+ SDValue Result = lowerVECTOR_SHUFFLE_SHF(Op, ResTy, Indices, DAG);
+ if (Result.getNode())
+ return Result;
+ Result = lowerVECTOR_SHUFFLE_ILVEV(Op, ResTy, Indices, DAG);
+ if (Result.getNode())
+ return Result;
+ Result = lowerVECTOR_SHUFFLE_ILVOD(Op, ResTy, Indices, DAG);
+ if (Result.getNode())
+ return Result;
+ Result = lowerVECTOR_SHUFFLE_ILVL(Op, ResTy, Indices, DAG);
+ if (Result.getNode())
+ return Result;
+ Result = lowerVECTOR_SHUFFLE_ILVR(Op, ResTy, Indices, DAG);
+ if (Result.getNode())
+ return Result;
+ Result = lowerVECTOR_SHUFFLE_PCKEV(Op, ResTy, Indices, DAG);
+ if (Result.getNode())
+ return Result;
+ Result = lowerVECTOR_SHUFFLE_PCKOD(Op, ResTy, Indices, DAG);
+ if (Result.getNode())
+ return Result;
+ return lowerVECTOR_SHUFFLE_VSHF(Op, ResTy, Indices, DAG);
+}
+
MachineBasicBlock * MipsSETargetLowering::
emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
// $bb:
@@ -701,7 +2613,7 @@ emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
- const TargetRegisterClass *RC = &Mips::CPURegsRegClass;
+ const TargetRegisterClass *RC = &Mips::GPR32RegClass;
DebugLoc DL = MI->getDebugLoc();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB));
@@ -746,3 +2658,318 @@ emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
MI->eraseFromParent(); // The pseudo instruction is gone now.
return Sink;
}
+
+MachineBasicBlock * MipsSETargetLowering::
+emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB,
+ unsigned BranchOp) const{
+ // $bb:
+ // vany_nonzero $rd, $ws
+ // =>
+ // $bb:
+ // bnz.b $ws, $tbb
+ // b $fbb
+ // $fbb:
+ // li $rd1, 0
+ // b $sink
+ // $tbb:
+ // li $rd2, 1
+ // $sink:
+ // $rd = phi($rd1, $fbb, $rd2, $tbb)
+
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ const TargetRegisterClass *RC = &Mips::GPR32RegClass;
+ DebugLoc DL = MI->getDebugLoc();
+ const BasicBlock *LLVM_BB = BB->getBasicBlock();
+ MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB));
+ MachineFunction *F = BB->getParent();
+ MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *Sink = F->CreateMachineBasicBlock(LLVM_BB);
+ F->insert(It, FBB);
+ F->insert(It, TBB);
+ F->insert(It, Sink);
+
+ // Transfer the remainder of BB and its successor edges to Sink.
+ Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ Sink->transferSuccessorsAndUpdatePHIs(BB);
+
+ // Add successors.
+ BB->addSuccessor(FBB);
+ BB->addSuccessor(TBB);
+ FBB->addSuccessor(Sink);
+ TBB->addSuccessor(Sink);
+
+ // Insert the real bnz.b instruction to $BB.
+ BuildMI(BB, DL, TII->get(BranchOp))
+ .addReg(MI->getOperand(1).getReg())
+ .addMBB(TBB);
+
+ // Fill $FBB.
+ unsigned RD1 = RegInfo.createVirtualRegister(RC);
+ BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), RD1)
+ .addReg(Mips::ZERO).addImm(0);
+ BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink);
+
+ // Fill $TBB.
+ unsigned RD2 = RegInfo.createVirtualRegister(RC);
+ BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), RD2)
+ .addReg(Mips::ZERO).addImm(1);
+
+ // Insert phi function to $Sink.
+ BuildMI(*Sink, Sink->begin(), DL, TII->get(Mips::PHI),
+ MI->getOperand(0).getReg())
+ .addReg(RD1).addMBB(FBB).addReg(RD2).addMBB(TBB);
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return Sink;
+}
+
+// Emit the COPY_FW pseudo instruction.
+//
+// copy_fw_pseudo $fd, $ws, n
+// =>
+// copy_u_w $rt, $ws, $n
+// mtc1 $rt, $fd
+//
+// When n is zero, the equivalent operation can be performed with (potentially)
+// zero instructions due to register overlaps. This optimization is never valid
+// for lane 1 because it would require FR=0 mode which isn't supported by MSA.
+MachineBasicBlock * MipsSETargetLowering::
+emitCOPY_FW(MachineInstr *MI, MachineBasicBlock *BB) const{
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned Fd = MI->getOperand(0).getReg();
+ unsigned Ws = MI->getOperand(1).getReg();
+ unsigned Lane = MI->getOperand(2).getImm();
+
+ if (Lane == 0)
+ BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Ws, 0, Mips::sub_lo);
+ else {
+ unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
+
+ BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_W), Wt).addReg(Ws).addImm(1);
+ BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_lo);
+ }
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+// Emit the COPY_FD pseudo instruction.
+//
+// copy_fd_pseudo $fd, $ws, n
+// =>
+// splati.d $wt, $ws, $n
+// copy $fd, $wt:sub_64
+//
+// When n is zero, the equivalent operation can be performed with (potentially)
+// zero instructions due to register overlaps. This optimization is always
+// valid because FR=1 mode which is the only supported mode in MSA.
+MachineBasicBlock * MipsSETargetLowering::
+emitCOPY_FD(MachineInstr *MI, MachineBasicBlock *BB) const{
+ assert(Subtarget->isFP64bit());
+
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ unsigned Fd = MI->getOperand(0).getReg();
+ unsigned Ws = MI->getOperand(1).getReg();
+ unsigned Lane = MI->getOperand(2).getImm() * 2;
+ DebugLoc DL = MI->getDebugLoc();
+
+ if (Lane == 0)
+ BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Ws, 0, Mips::sub_64);
+ else {
+ unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
+
+ BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_D), Wt).addReg(Ws).addImm(1);
+ BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_64);
+ }
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+// Emit the INSERT_FW pseudo instruction.
+//
+// insert_fw_pseudo $wd, $wd_in, $n, $fs
+// =>
+// subreg_to_reg $wt:sub_lo, $fs
+// insve_w $wd[$n], $wd_in, $wt[0]
+MachineBasicBlock *
+MipsSETargetLowering::emitINSERT_FW(MachineInstr *MI,
+ MachineBasicBlock *BB) const {
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned Wd = MI->getOperand(0).getReg();
+ unsigned Wd_in = MI->getOperand(1).getReg();
+ unsigned Lane = MI->getOperand(2).getImm();
+ unsigned Fs = MI->getOperand(3).getReg();
+ unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
+
+ BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt)
+ .addImm(0)
+ .addReg(Fs)
+ .addImm(Mips::sub_lo);
+ BuildMI(*BB, MI, DL, TII->get(Mips::INSVE_W), Wd)
+ .addReg(Wd_in)
+ .addImm(Lane)
+ .addReg(Wt);
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+// Emit the INSERT_FD pseudo instruction.
+//
+// insert_fd_pseudo $wd, $fs, n
+// =>
+// subreg_to_reg $wt:sub_64, $fs
+// insve_d $wd[$n], $wd_in, $wt[0]
+MachineBasicBlock *
+MipsSETargetLowering::emitINSERT_FD(MachineInstr *MI,
+ MachineBasicBlock *BB) const {
+ assert(Subtarget->isFP64bit());
+
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned Wd = MI->getOperand(0).getReg();
+ unsigned Wd_in = MI->getOperand(1).getReg();
+ unsigned Lane = MI->getOperand(2).getImm();
+ unsigned Fs = MI->getOperand(3).getReg();
+ unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
+
+ BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt)
+ .addImm(0)
+ .addReg(Fs)
+ .addImm(Mips::sub_64);
+ BuildMI(*BB, MI, DL, TII->get(Mips::INSVE_D), Wd)
+ .addReg(Wd_in)
+ .addImm(Lane)
+ .addReg(Wt);
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+// Emit the FILL_FW pseudo instruction.
+//
+// fill_fw_pseudo $wd, $fs
+// =>
+// implicit_def $wt1
+// insert_subreg $wt2:subreg_lo, $wt1, $fs
+// splati.w $wd, $wt2[0]
+MachineBasicBlock *
+MipsSETargetLowering::emitFILL_FW(MachineInstr *MI,
+ MachineBasicBlock *BB) const {
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned Wd = MI->getOperand(0).getReg();
+ unsigned Fs = MI->getOperand(1).getReg();
+ unsigned Wt1 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
+ unsigned Wt2 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
+
+ BuildMI(*BB, MI, DL, TII->get(Mips::IMPLICIT_DEF), Wt1);
+ BuildMI(*BB, MI, DL, TII->get(Mips::INSERT_SUBREG), Wt2)
+ .addReg(Wt1)
+ .addReg(Fs)
+ .addImm(Mips::sub_lo);
+ BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_W), Wd).addReg(Wt2).addImm(0);
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+// Emit the FILL_FD pseudo instruction.
+//
+// fill_fd_pseudo $wd, $fs
+// =>
+// implicit_def $wt1
+// insert_subreg $wt2:subreg_64, $wt1, $fs
+// splati.d $wd, $wt2[0]
+MachineBasicBlock *
+MipsSETargetLowering::emitFILL_FD(MachineInstr *MI,
+ MachineBasicBlock *BB) const {
+ assert(Subtarget->isFP64bit());
+
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned Wd = MI->getOperand(0).getReg();
+ unsigned Fs = MI->getOperand(1).getReg();
+ unsigned Wt1 = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
+ unsigned Wt2 = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
+
+ BuildMI(*BB, MI, DL, TII->get(Mips::IMPLICIT_DEF), Wt1);
+ BuildMI(*BB, MI, DL, TII->get(Mips::INSERT_SUBREG), Wt2)
+ .addReg(Wt1)
+ .addReg(Fs)
+ .addImm(Mips::sub_64);
+ BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_D), Wd).addReg(Wt2).addImm(0);
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+// Emit the FEXP2_W_1 pseudo instructions.
+//
+// fexp2_w_1_pseudo $wd, $wt
+// =>
+// ldi.w $ws, 1
+// fexp2.w $wd, $ws, $wt
+MachineBasicBlock *
+MipsSETargetLowering::emitFEXP2_W_1(MachineInstr *MI,
+ MachineBasicBlock *BB) const {
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ const TargetRegisterClass *RC = &Mips::MSA128WRegClass;
+ unsigned Ws1 = RegInfo.createVirtualRegister(RC);
+ unsigned Ws2 = RegInfo.createVirtualRegister(RC);
+ DebugLoc DL = MI->getDebugLoc();
+
+ // Splat 1.0 into a vector
+ BuildMI(*BB, MI, DL, TII->get(Mips::LDI_W), Ws1).addImm(1);
+ BuildMI(*BB, MI, DL, TII->get(Mips::FFINT_U_W), Ws2).addReg(Ws1);
+
+ // Emit 1.0 * fexp2(Wt)
+ BuildMI(*BB, MI, DL, TII->get(Mips::FEXP2_W), MI->getOperand(0).getReg())
+ .addReg(Ws2)
+ .addReg(MI->getOperand(1).getReg());
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+// Emit the FEXP2_D_1 pseudo instructions.
+//
+// fexp2_d_1_pseudo $wd, $wt
+// =>
+// ldi.d $ws, 1
+// fexp2.d $wd, $ws, $wt
+MachineBasicBlock *
+MipsSETargetLowering::emitFEXP2_D_1(MachineInstr *MI,
+ MachineBasicBlock *BB) const {
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
+ const TargetRegisterClass *RC = &Mips::MSA128DRegClass;
+ unsigned Ws1 = RegInfo.createVirtualRegister(RC);
+ unsigned Ws2 = RegInfo.createVirtualRegister(RC);
+ DebugLoc DL = MI->getDebugLoc();
+
+ // Splat 1.0 into a vector
+ BuildMI(*BB, MI, DL, TII->get(Mips::LDI_D), Ws1).addImm(1);
+ BuildMI(*BB, MI, DL, TII->get(Mips::FFINT_U_D), Ws2).addReg(Ws1);
+
+ // Emit 1.0 * fexp2(Wt)
+ BuildMI(*BB, MI, DL, TII->get(Mips::FEXP2_D), MI->getOperand(0).getReg())
+ .addReg(Ws2)
+ .addReg(MI->getOperand(1).getReg());
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
diff --git a/lib/Target/Mips/MipsSEISelLowering.h b/lib/Target/Mips/MipsSEISelLowering.h
index ec8a5c73f1a4..c5210d94b34d 100644
--- a/lib/Target/Mips/MipsSEISelLowering.h
+++ b/lib/Target/Mips/MipsSEISelLowering.h
@@ -22,6 +22,14 @@ namespace llvm {
public:
explicit MipsSETargetLowering(MipsTargetMachine &TM);
+ /// \brief Enable MSA support for the given integer type and Register
+ /// class.
+ void addMSAIntType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC);
+ /// \brief Enable MSA support for the given floating-point type and
+ /// Register class.
+ void addMSAFloatType(MVT::SimpleValueType Ty,
+ const TargetRegisterClass *RC);
+
virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const;
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
@@ -38,8 +46,8 @@ namespace llvm {
virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
if (VT == MVT::Untyped)
- return Subtarget->hasDSP() ? &Mips::ACRegsDSPRegClass :
- &Mips::ACRegsRegClass;
+ return Subtarget->hasDSP() ? &Mips::ACC64DSPRegClass :
+ &Mips::ACC64RegClass;
return TargetLowering::getRepRegClassFor(VT);
}
@@ -56,14 +64,50 @@ namespace llvm {
bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const;
+ SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const;
+
SDValue lowerMulDiv(SDValue Op, unsigned NewOpc, bool HasLo, bool HasHi,
SelectionDAG &DAG) const;
SDValue lowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
+ /// \brief Lower VECTOR_SHUFFLE into one of a number of instructions
+ /// depending on the indices in the shuffle.
+ SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
MachineBasicBlock *emitBPOSGE32(MachineInstr *MI,
MachineBasicBlock *BB) const;
+ MachineBasicBlock *emitMSACBranchPseudo(MachineInstr *MI,
+ MachineBasicBlock *BB,
+ unsigned BranchOp) const;
+ /// \brief Emit the COPY_FW pseudo instruction
+ MachineBasicBlock *emitCOPY_FW(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+ /// \brief Emit the COPY_FD pseudo instruction
+ MachineBasicBlock *emitCOPY_FD(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+ /// \brief Emit the INSERT_FW pseudo instruction
+ MachineBasicBlock *emitINSERT_FW(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+ /// \brief Emit the INSERT_FD pseudo instruction
+ MachineBasicBlock *emitINSERT_FD(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+ /// \brief Emit the FILL_FW pseudo instruction
+ MachineBasicBlock *emitFILL_FW(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+ /// \brief Emit the FILL_FD pseudo instruction
+ MachineBasicBlock *emitFILL_FD(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+ /// \brief Emit the FEXP2_W_1 pseudo instructions.
+ MachineBasicBlock *emitFEXP2_W_1(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+ /// \brief Emit the FEXP2_D_1 pseudo instructions.
+ MachineBasicBlock *emitFEXP2_D_1(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
};
}
diff --git a/lib/Target/Mips/MipsSEInstrInfo.cpp b/lib/Target/Mips/MipsSEInstrInfo.cpp
index a0768e51c079..02931a3e39ee 100644
--- a/lib/Target/Mips/MipsSEInstrInfo.cpp
+++ b/lib/Target/Mips/MipsSEInstrInfo.cpp
@@ -18,6 +18,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
@@ -26,7 +27,7 @@ using namespace llvm;
MipsSEInstrInfo::MipsSEInstrInfo(MipsTargetMachine &tm)
: MipsInstrInfo(tm,
tm.getRelocationModel() == Reloc::PIC_ ? Mips::B : Mips::J),
- RI(*tm.getSubtargetImpl(), *this),
+ RI(*tm.getSubtargetImpl()),
IsN64(tm.getSubtarget<MipsSubtarget>().isABI_N64()) {}
const MipsRegisterInfo &MipsSEInstrInfo::getRegisterInfo() const {
@@ -43,10 +44,8 @@ isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const
{
unsigned Opc = MI->getOpcode();
- if ((Opc == Mips::LW) || (Opc == Mips::LW_P8) || (Opc == Mips::LD) ||
- (Opc == Mips::LD_P8) || (Opc == Mips::LWC1) || (Opc == Mips::LWC1_P8) ||
- (Opc == Mips::LDC1) || (Opc == Mips::LDC164) ||
- (Opc == Mips::LDC164_P8)) {
+ if ((Opc == Mips::LW) || (Opc == Mips::LD) ||
+ (Opc == Mips::LWC1) || (Opc == Mips::LDC1) || (Opc == Mips::LDC164)) {
if ((MI->getOperand(1).isFI()) && // is a stack slot
(MI->getOperand(2).isImm()) && // the imm is zero
(isZeroImm(MI->getOperand(2)))) {
@@ -68,10 +67,8 @@ isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const
{
unsigned Opc = MI->getOpcode();
- if ((Opc == Mips::SW) || (Opc == Mips::SW_P8) || (Opc == Mips::SD) ||
- (Opc == Mips::SD_P8) || (Opc == Mips::SWC1) || (Opc == Mips::SWC1_P8) ||
- (Opc == Mips::SDC1) || (Opc == Mips::SDC164) ||
- (Opc == Mips::SDC164_P8)) {
+ if ((Opc == Mips::SW) || (Opc == Mips::SD) ||
+ (Opc == Mips::SWC1) || (Opc == Mips::SDC1) || (Opc == Mips::SDC164)) {
if ((MI->getOperand(1).isFI()) && // is a stack slot
(MI->getOperand(2).isImm()) && // the imm is zero
(isZeroImm(MI->getOperand(2)))) {
@@ -88,39 +85,41 @@ void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
bool KillSrc) const {
unsigned Opc = 0, ZeroReg = 0;
- if (Mips::CPURegsRegClass.contains(DestReg)) { // Copy to CPU Reg.
- if (Mips::CPURegsRegClass.contains(SrcReg))
- Opc = Mips::OR, ZeroReg = Mips::ZERO;
+ if (Mips::GPR32RegClass.contains(DestReg)) { // Copy to CPU Reg.
+ if (Mips::GPR32RegClass.contains(SrcReg))
+ Opc = Mips::ADDu, ZeroReg = Mips::ZERO;
else if (Mips::CCRRegClass.contains(SrcReg))
Opc = Mips::CFC1;
else if (Mips::FGR32RegClass.contains(SrcReg))
Opc = Mips::MFC1;
- else if (Mips::HIRegsRegClass.contains(SrcReg))
+ else if (Mips::HI32RegClass.contains(SrcReg))
Opc = Mips::MFHI, SrcReg = 0;
- else if (Mips::LORegsRegClass.contains(SrcReg))
+ else if (Mips::LO32RegClass.contains(SrcReg))
Opc = Mips::MFLO, SrcReg = 0;
- else if (Mips::HIRegsDSPRegClass.contains(SrcReg))
+ else if (Mips::HI32DSPRegClass.contains(SrcReg))
Opc = Mips::MFHI_DSP;
- else if (Mips::LORegsDSPRegClass.contains(SrcReg))
+ else if (Mips::LO32DSPRegClass.contains(SrcReg))
Opc = Mips::MFLO_DSP;
else if (Mips::DSPCCRegClass.contains(SrcReg)) {
BuildMI(MBB, I, DL, get(Mips::RDDSP), DestReg).addImm(1 << 4)
.addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
return;
}
+ else if (Mips::MSACtrlRegClass.contains(SrcReg))
+ Opc = Mips::CFCMSA;
}
- else if (Mips::CPURegsRegClass.contains(SrcReg)) { // Copy from CPU Reg.
+ else if (Mips::GPR32RegClass.contains(SrcReg)) { // Copy from CPU Reg.
if (Mips::CCRRegClass.contains(DestReg))
Opc = Mips::CTC1;
else if (Mips::FGR32RegClass.contains(DestReg))
Opc = Mips::MTC1;
- else if (Mips::HIRegsRegClass.contains(DestReg))
+ else if (Mips::HI32RegClass.contains(DestReg))
Opc = Mips::MTHI, DestReg = 0;
- else if (Mips::LORegsRegClass.contains(DestReg))
+ else if (Mips::LO32RegClass.contains(DestReg))
Opc = Mips::MTLO, DestReg = 0;
- else if (Mips::HIRegsDSPRegClass.contains(DestReg))
+ else if (Mips::HI32DSPRegClass.contains(DestReg))
Opc = Mips::MTHI_DSP;
- else if (Mips::LORegsDSPRegClass.contains(DestReg))
+ else if (Mips::LO32DSPRegClass.contains(DestReg))
Opc = Mips::MTLO_DSP;
else if (Mips::DSPCCRegClass.contains(DestReg)) {
BuildMI(MBB, I, DL, get(Mips::WRDSP))
@@ -128,6 +127,8 @@ void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
.addReg(DestReg, RegState::ImplicitDefine);
return;
}
+ else if (Mips::MSACtrlRegClass.contains(DestReg))
+ Opc = Mips::CTCMSA;
}
else if (Mips::FGR32RegClass.contains(DestReg, SrcReg))
Opc = Mips::FMOV_S;
@@ -135,26 +136,28 @@ void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
Opc = Mips::FMOV_D32;
else if (Mips::FGR64RegClass.contains(DestReg, SrcReg))
Opc = Mips::FMOV_D64;
- else if (Mips::CCRRegClass.contains(DestReg, SrcReg))
- Opc = Mips::MOVCCRToCCR;
- else if (Mips::CPU64RegsRegClass.contains(DestReg)) { // Copy to CPU64 Reg.
- if (Mips::CPU64RegsRegClass.contains(SrcReg))
- Opc = Mips::OR64, ZeroReg = Mips::ZERO_64;
- else if (Mips::HIRegs64RegClass.contains(SrcReg))
+ else if (Mips::GPR64RegClass.contains(DestReg)) { // Copy to CPU64 Reg.
+ if (Mips::GPR64RegClass.contains(SrcReg))
+ Opc = Mips::DADDu, ZeroReg = Mips::ZERO_64;
+ else if (Mips::HI64RegClass.contains(SrcReg))
Opc = Mips::MFHI64, SrcReg = 0;
- else if (Mips::LORegs64RegClass.contains(SrcReg))
+ else if (Mips::LO64RegClass.contains(SrcReg))
Opc = Mips::MFLO64, SrcReg = 0;
else if (Mips::FGR64RegClass.contains(SrcReg))
Opc = Mips::DMFC1;
}
- else if (Mips::CPU64RegsRegClass.contains(SrcReg)) { // Copy from CPU64 Reg.
- if (Mips::HIRegs64RegClass.contains(DestReg))
+ else if (Mips::GPR64RegClass.contains(SrcReg)) { // Copy from CPU64 Reg.
+ if (Mips::HI64RegClass.contains(DestReg))
Opc = Mips::MTHI64, DestReg = 0;
- else if (Mips::LORegs64RegClass.contains(DestReg))
+ else if (Mips::LO64RegClass.contains(DestReg))
Opc = Mips::MTLO64, DestReg = 0;
else if (Mips::FGR64RegClass.contains(DestReg))
Opc = Mips::DMTC1;
}
+ else if (Mips::MSA128BRegClass.contains(DestReg)) { // Copy to MSA reg
+ if (Mips::MSA128BRegClass.contains(SrcReg))
+ Opc = Mips::MOVE_V;
+ }
assert(Opc && "Cannot copy registers");
@@ -181,24 +184,32 @@ storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned Opc = 0;
- if (Mips::CPURegsRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::SW_P8 : Mips::SW;
- else if (Mips::CPU64RegsRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::SD_P8 : Mips::SD;
- else if (Mips::ACRegsRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::STORE_AC64_P8 : Mips::STORE_AC64;
- else if (Mips::ACRegsDSPRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::STORE_AC_DSP_P8 : Mips::STORE_AC_DSP;
- else if (Mips::ACRegs128RegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::STORE_AC128_P8 : Mips::STORE_AC128;
+ if (Mips::GPR32RegClass.hasSubClassEq(RC))
+ Opc = Mips::SW;
+ else if (Mips::GPR64RegClass.hasSubClassEq(RC))
+ Opc = Mips::SD;
+ else if (Mips::ACC64RegClass.hasSubClassEq(RC))
+ Opc = Mips::STORE_ACC64;
+ else if (Mips::ACC64DSPRegClass.hasSubClassEq(RC))
+ Opc = Mips::STORE_ACC64DSP;
+ else if (Mips::ACC128RegClass.hasSubClassEq(RC))
+ Opc = Mips::STORE_ACC128;
else if (Mips::DSPCCRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::STORE_CCOND_DSP_P8 : Mips::STORE_CCOND_DSP;
+ Opc = Mips::STORE_CCOND_DSP;
else if (Mips::FGR32RegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::SWC1_P8 : Mips::SWC1;
+ Opc = Mips::SWC1;
else if (Mips::AFGR64RegClass.hasSubClassEq(RC))
Opc = Mips::SDC1;
else if (Mips::FGR64RegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::SDC164_P8 : Mips::SDC164;
+ Opc = Mips::SDC164;
+ else if (RC->hasType(MVT::v16i8))
+ Opc = Mips::ST_B;
+ else if (RC->hasType(MVT::v8i16) || RC->hasType(MVT::v8f16))
+ Opc = Mips::ST_H;
+ else if (RC->hasType(MVT::v4i32) || RC->hasType(MVT::v4f32))
+ Opc = Mips::ST_W;
+ else if (RC->hasType(MVT::v2i64) || RC->hasType(MVT::v2f64))
+ Opc = Mips::ST_D;
assert(Opc && "Register class not handled!");
BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill))
@@ -214,24 +225,32 @@ loadRegFromStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad);
unsigned Opc = 0;
- if (Mips::CPURegsRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::LW_P8 : Mips::LW;
- else if (Mips::CPU64RegsRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::LD_P8 : Mips::LD;
- else if (Mips::ACRegsRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::LOAD_AC64_P8 : Mips::LOAD_AC64;
- else if (Mips::ACRegsDSPRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::LOAD_AC_DSP_P8 : Mips::LOAD_AC_DSP;
- else if (Mips::ACRegs128RegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::LOAD_AC128_P8 : Mips::LOAD_AC128;
+ if (Mips::GPR32RegClass.hasSubClassEq(RC))
+ Opc = Mips::LW;
+ else if (Mips::GPR64RegClass.hasSubClassEq(RC))
+ Opc = Mips::LD;
+ else if (Mips::ACC64RegClass.hasSubClassEq(RC))
+ Opc = Mips::LOAD_ACC64;
+ else if (Mips::ACC64DSPRegClass.hasSubClassEq(RC))
+ Opc = Mips::LOAD_ACC64DSP;
+ else if (Mips::ACC128RegClass.hasSubClassEq(RC))
+ Opc = Mips::LOAD_ACC128;
else if (Mips::DSPCCRegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::LOAD_CCOND_DSP_P8 : Mips::LOAD_CCOND_DSP;
+ Opc = Mips::LOAD_CCOND_DSP;
else if (Mips::FGR32RegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::LWC1_P8 : Mips::LWC1;
+ Opc = Mips::LWC1;
else if (Mips::AFGR64RegClass.hasSubClassEq(RC))
Opc = Mips::LDC1;
else if (Mips::FGR64RegClass.hasSubClassEq(RC))
- Opc = IsN64 ? Mips::LDC164_P8 : Mips::LDC164;
+ Opc = Mips::LDC164;
+ else if (RC->hasType(MVT::v16i8))
+ Opc = Mips::LD_B;
+ else if (RC->hasType(MVT::v8i16) || RC->hasType(MVT::v8f16))
+ Opc = Mips::LD_H;
+ else if (RC->hasType(MVT::v4i32) || RC->hasType(MVT::v4f32))
+ Opc = Mips::LD_W;
+ else if (RC->hasType(MVT::v2i64) || RC->hasType(MVT::v2f64))
+ Opc = Mips::LD_D;
assert(Opc && "Register class not handled!");
BuildMI(MBB, I, DL, get(Opc), DestReg).addFrameIndex(FI).addImm(Offset)
@@ -245,17 +264,59 @@ bool MipsSEInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
default:
return false;
case Mips::RetRA:
- ExpandRetRA(MBB, MI, Mips::RET);
+ expandRetRA(MBB, MI, Mips::RET);
+ break;
+ case Mips::PseudoMFHI:
+ expandPseudoMFHiLo(MBB, MI, Mips::MFHI);
+ break;
+ case Mips::PseudoMFLO:
+ expandPseudoMFHiLo(MBB, MI, Mips::MFLO);
+ break;
+ case Mips::PseudoMFHI64:
+ expandPseudoMFHiLo(MBB, MI, Mips::MFHI64);
+ break;
+ case Mips::PseudoMFLO64:
+ expandPseudoMFHiLo(MBB, MI, Mips::MFLO64);
+ break;
+ case Mips::PseudoMTLOHI:
+ expandPseudoMTLoHi(MBB, MI, Mips::MTLO, Mips::MTHI, false);
+ break;
+ case Mips::PseudoMTLOHI64:
+ expandPseudoMTLoHi(MBB, MI, Mips::MTLO64, Mips::MTHI64, false);
+ break;
+ case Mips::PseudoMTLOHI_DSP:
+ expandPseudoMTLoHi(MBB, MI, Mips::MTLO_DSP, Mips::MTHI_DSP, true);
+ break;
+ case Mips::PseudoCVT_S_W:
+ expandCvtFPInt(MBB, MI, Mips::CVT_S_W, Mips::MTC1, false);
+ break;
+ case Mips::PseudoCVT_D32_W:
+ expandCvtFPInt(MBB, MI, Mips::CVT_D32_W, Mips::MTC1, false);
+ break;
+ case Mips::PseudoCVT_S_L:
+ expandCvtFPInt(MBB, MI, Mips::CVT_S_L, Mips::DMTC1, true);
+ break;
+ case Mips::PseudoCVT_D64_W:
+ expandCvtFPInt(MBB, MI, Mips::CVT_D64_W, Mips::MTC1, true);
+ break;
+ case Mips::PseudoCVT_D64_L:
+ expandCvtFPInt(MBB, MI, Mips::CVT_D64_L, Mips::DMTC1, true);
break;
case Mips::BuildPairF64:
- ExpandBuildPairF64(MBB, MI);
+ expandBuildPairF64(MBB, MI, false);
+ break;
+ case Mips::BuildPairF64_64:
+ expandBuildPairF64(MBB, MI, true);
break;
case Mips::ExtractElementF64:
- ExpandExtractElementF64(MBB, MI);
+ expandExtractElementF64(MBB, MI, false);
+ break;
+ case Mips::ExtractElementF64_64:
+ expandExtractElementF64(MBB, MI, true);
break;
case Mips::MIPSeh_return32:
case Mips::MIPSeh_return64:
- ExpandEhReturn(MBB, MI);
+ expandEhReturn(MBB, MI);
break;
}
@@ -263,9 +324,9 @@ bool MipsSEInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
return true;
}
-/// GetOppositeBranchOpc - Return the inverse of the specified
+/// getOppositeBranchOpc - Return the inverse of the specified
/// opcode, e.g. turning BEQ to BNE.
-unsigned MipsSEInstrInfo::GetOppositeBranchOpc(unsigned Opc) const {
+unsigned MipsSEInstrInfo::getOppositeBranchOpc(unsigned Opc) const {
switch (Opc) {
default: llvm_unreachable("Illegal opcode!");
case Mips::BEQ: return Mips::BNE;
@@ -315,7 +376,7 @@ MipsSEInstrInfo::loadImmediate(int64_t Imm, MachineBasicBlock &MBB,
unsigned LUi = STI.isABI_N64() ? Mips::LUi64 : Mips::LUi;
unsigned ZEROReg = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO;
const TargetRegisterClass *RC = STI.isABI_N64() ?
- &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass;
+ &Mips::GPR64RegClass : &Mips::GPR32RegClass;
bool LastInstrIsADDiu = NewImm;
const MipsAnalyzeImmediate::InstSeq &Seq =
@@ -346,7 +407,7 @@ MipsSEInstrInfo::loadImmediate(int64_t Imm, MachineBasicBlock &MBB,
return Reg;
}
-unsigned MipsSEInstrInfo::GetAnalyzableBrOpc(unsigned Opc) const {
+unsigned MipsSEInstrInfo::getAnalyzableBrOpc(unsigned Opc) const {
return (Opc == Mips::BEQ || Opc == Mips::BNE || Opc == Mips::BGTZ ||
Opc == Mips::BGEZ || Opc == Mips::BLTZ || Opc == Mips::BLEZ ||
Opc == Mips::BEQ64 || Opc == Mips::BNE64 || Opc == Mips::BGTZ64 ||
@@ -356,51 +417,134 @@ unsigned MipsSEInstrInfo::GetAnalyzableBrOpc(unsigned Opc) const {
Opc : 0;
}
-void MipsSEInstrInfo::ExpandRetRA(MachineBasicBlock &MBB,
+void MipsSEInstrInfo::expandRetRA(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned Opc) const {
BuildMI(MBB, I, I->getDebugLoc(), get(Opc)).addReg(Mips::RA);
}
-void MipsSEInstrInfo::ExpandExtractElementF64(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const {
+std::pair<bool, bool>
+MipsSEInstrInfo::compareOpndSize(unsigned Opc,
+ const MachineFunction &MF) const {
+ const MCInstrDesc &Desc = get(Opc);
+ assert(Desc.NumOperands == 2 && "Unary instruction expected.");
+ const MipsRegisterInfo *RI = &getRegisterInfo();
+ unsigned DstRegSize = getRegClass(Desc, 0, RI, MF)->getSize();
+ unsigned SrcRegSize = getRegClass(Desc, 1, RI, MF)->getSize();
+
+ return std::make_pair(DstRegSize > SrcRegSize, DstRegSize < SrcRegSize);
+}
+
+void MipsSEInstrInfo::expandPseudoMFHiLo(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned NewOpc) const {
+ BuildMI(MBB, I, I->getDebugLoc(), get(NewOpc), I->getOperand(0).getReg());
+}
+
+void MipsSEInstrInfo::expandPseudoMTLoHi(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned LoOpc,
+ unsigned HiOpc,
+ bool HasExplicitDef) const {
+ // Expand
+ // lo_hi pseudomtlohi $gpr0, $gpr1
+ // to these two instructions:
+ // mtlo $gpr0
+ // mthi $gpr1
+
+ DebugLoc DL = I->getDebugLoc();
+ const MachineOperand &SrcLo = I->getOperand(1), &SrcHi = I->getOperand(2);
+ MachineInstrBuilder LoInst = BuildMI(MBB, I, DL, get(LoOpc));
+ MachineInstrBuilder HiInst = BuildMI(MBB, I, DL, get(HiOpc));
+ LoInst.addReg(SrcLo.getReg(), getKillRegState(SrcLo.isKill()));
+ HiInst.addReg(SrcHi.getReg(), getKillRegState(SrcHi.isKill()));
+
+ // Add lo/hi registers if the mtlo/hi instructions created have explicit
+ // def registers.
+ if (HasExplicitDef) {
+ unsigned DstReg = I->getOperand(0).getReg();
+ unsigned DstLo = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo);
+ unsigned DstHi = getRegisterInfo().getSubReg(DstReg, Mips::sub_hi);
+ LoInst.addReg(DstLo, RegState::Define);
+ HiInst.addReg(DstHi, RegState::Define);
+ }
+}
+
+void MipsSEInstrInfo::expandCvtFPInt(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned CvtOpc, unsigned MovOpc,
+ bool IsI64) const {
+ const MCInstrDesc &CvtDesc = get(CvtOpc), &MovDesc = get(MovOpc);
+ const MachineOperand &Dst = I->getOperand(0), &Src = I->getOperand(1);
+ unsigned DstReg = Dst.getReg(), SrcReg = Src.getReg(), TmpReg = DstReg;
+ unsigned KillSrc = getKillRegState(Src.isKill());
+ DebugLoc DL = I->getDebugLoc();
+ bool DstIsLarger, SrcIsLarger;
+
+ tie(DstIsLarger, SrcIsLarger) = compareOpndSize(CvtOpc, *MBB.getParent());
+
+ if (DstIsLarger)
+ TmpReg = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo);
+
+ if (SrcIsLarger)
+ DstReg = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo);
+
+ BuildMI(MBB, I, DL, MovDesc, TmpReg).addReg(SrcReg, KillSrc);
+ BuildMI(MBB, I, DL, CvtDesc, DstReg).addReg(TmpReg, RegState::Kill);
+}
+
+void MipsSEInstrInfo::expandExtractElementF64(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ bool FP64) const {
unsigned DstReg = I->getOperand(0).getReg();
unsigned SrcReg = I->getOperand(1).getReg();
unsigned N = I->getOperand(2).getImm();
- const MCInstrDesc& Mfc1Tdd = get(Mips::MFC1);
DebugLoc dl = I->getDebugLoc();
assert(N < 2 && "Invalid immediate");
- unsigned SubIdx = N ? Mips::sub_fpodd : Mips::sub_fpeven;
+ unsigned SubIdx = N ? Mips::sub_hi : Mips::sub_lo;
unsigned SubReg = getRegisterInfo().getSubReg(SrcReg, SubIdx);
- BuildMI(MBB, I, dl, Mfc1Tdd, DstReg).addReg(SubReg);
+ if (SubIdx == Mips::sub_hi && FP64)
+ BuildMI(MBB, I, dl, get(Mips::MFHC1), DstReg).addReg(SubReg);
+ else
+ BuildMI(MBB, I, dl, get(Mips::MFC1), DstReg).addReg(SubReg);
}
-void MipsSEInstrInfo::ExpandBuildPairF64(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const {
+void MipsSEInstrInfo::expandBuildPairF64(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ bool FP64) const {
unsigned DstReg = I->getOperand(0).getReg();
unsigned LoReg = I->getOperand(1).getReg(), HiReg = I->getOperand(2).getReg();
const MCInstrDesc& Mtc1Tdd = get(Mips::MTC1);
DebugLoc dl = I->getDebugLoc();
const TargetRegisterInfo &TRI = getRegisterInfo();
- // mtc1 Lo, $fp
- // mtc1 Hi, $fp + 1
- BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_fpeven))
+ // For FP32 mode:
+ // mtc1 Lo, $fp
+ // mtc1 Hi, $fp + 1
+ // For FP64 mode:
+ // mtc1 Lo, $fp
+ // mthc1 Hi, $fp
+
+ BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_lo))
.addReg(LoReg);
- BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_fpodd))
- .addReg(HiReg);
+
+ if (FP64)
+ BuildMI(MBB, I, dl, get(Mips::MTHC1), TRI.getSubReg(DstReg, Mips::sub_hi))
+ .addReg(HiReg);
+ else
+ BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_hi))
+ .addReg(HiReg);
}
-void MipsSEInstrInfo::ExpandEhReturn(MachineBasicBlock &MBB,
+void MipsSEInstrInfo::expandEhReturn(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
// This pseudo instruction is generated as part of the lowering of
// ISD::EH_RETURN. We convert it to a stack increment by OffsetReg, and
// indirect jump to TargetReg
const MipsSubtarget &STI = TM.getSubtarget<MipsSubtarget>();
unsigned ADDU = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu;
- unsigned OR = STI.isABI_N64() ? Mips::OR64 : Mips::OR;
unsigned JR = STI.isABI_N64() ? Mips::JR64 : Mips::JR;
unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP;
unsigned RA = STI.isABI_N64() ? Mips::RA_64 : Mips::RA;
@@ -409,13 +553,13 @@ void MipsSEInstrInfo::ExpandEhReturn(MachineBasicBlock &MBB,
unsigned OffsetReg = I->getOperand(0).getReg();
unsigned TargetReg = I->getOperand(1).getReg();
- // or $ra, $v0, $zero
+ // addu $ra, $v0, $zero
// addu $sp, $sp, $v1
// jr $ra
if (TM.getRelocationModel() == Reloc::PIC_)
- BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(OR), T9)
+ BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(ADDU), T9)
.addReg(TargetReg).addReg(ZERO);
- BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(OR), RA)
+ BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(ADDU), RA)
.addReg(TargetReg).addReg(ZERO);
BuildMI(MBB, I, I->getDebugLoc(), TM.getInstrInfo()->get(ADDU), SP)
.addReg(SP).addReg(OffsetReg);
diff --git a/lib/Target/Mips/MipsSEInstrInfo.h b/lib/Target/Mips/MipsSEInstrInfo.h
index 0bf7876f0fe0..6d2dd901f33b 100644
--- a/lib/Target/Mips/MipsSEInstrInfo.h
+++ b/lib/Target/Mips/MipsSEInstrInfo.h
@@ -65,7 +65,7 @@ public:
virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
- virtual unsigned GetOppositeBranchOpc(unsigned Opc) const;
+ virtual unsigned getOppositeBranchOpc(unsigned Opc) const;
/// Adjust SP by Amount bytes.
void adjustStackPtr(unsigned SP, int64_t Amount, MachineBasicBlock &MBB,
@@ -79,15 +79,39 @@ public:
unsigned *NewImm) const;
private:
- virtual unsigned GetAnalyzableBrOpc(unsigned Opc) const;
+ virtual unsigned getAnalyzableBrOpc(unsigned Opc) const;
- void ExpandRetRA(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ void expandRetRA(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned Opc) const;
- void ExpandExtractElementF64(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
- void ExpandBuildPairF64(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
- void ExpandEhReturn(MachineBasicBlock &MBB,
+
+ std::pair<bool, bool> compareOpndSize(unsigned Opc,
+ const MachineFunction &MF) const;
+
+ void expandPseudoMFHiLo(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned NewOpc) const;
+
+ void expandPseudoMTLoHi(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned LoOpc, unsigned HiOpc,
+ bool HasExplicitDef) const;
+
+ /// Expand pseudo Int-to-FP conversion instructions.
+ ///
+ /// For example, the following pseudo instruction
+ /// PseudoCVT_D32_W D2, A5
+ /// gets expanded into these two instructions:
+ /// MTC1 F4, A5
+ /// CVT_D32_W D2, F4
+ ///
+ /// We do this expansion post-RA to avoid inserting a floating point copy
+ /// instruction between MTC1 and CVT_D32_W.
+ void expandCvtFPInt(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned CvtOpc, unsigned MovOpc, bool IsI64) const;
+
+ void expandExtractElementF64(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, bool FP64) const;
+ void expandBuildPairF64(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, bool FP64) const;
+ void expandEhReturn(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const;
};
diff --git a/lib/Target/Mips/MipsSERegisterInfo.cpp b/lib/Target/Mips/MipsSERegisterInfo.cpp
index 96967380b29d..2d440840aaff 100644
--- a/lib/Target/Mips/MipsSERegisterInfo.cpp
+++ b/lib/Target/Mips/MipsSERegisterInfo.cpp
@@ -40,9 +40,8 @@
using namespace llvm;
-MipsSERegisterInfo::MipsSERegisterInfo(const MipsSubtarget &ST,
- const MipsSEInstrInfo &I)
- : MipsRegisterInfo(ST), TII(I) {}
+MipsSERegisterInfo::MipsSERegisterInfo(const MipsSubtarget &ST)
+ : MipsRegisterInfo(ST) {}
bool MipsSERegisterInfo::
requiresRegisterScavenging(const MachineFunction &MF) const {
@@ -57,10 +56,28 @@ requiresFrameIndexScavenging(const MachineFunction &MF) const {
const TargetRegisterClass *
MipsSERegisterInfo::intRegClass(unsigned Size) const {
if (Size == 4)
- return &Mips::CPURegsRegClass;
+ return &Mips::GPR32RegClass;
assert(Size == 8);
- return &Mips::CPU64RegsRegClass;
+ return &Mips::GPR64RegClass;
+}
+
+/// Determine whether a given opcode is an MSA load/store (supporting 10-bit
+/// offsets) or a non-MSA load/store (supporting 16-bit offsets).
+static inline bool isMSALoadOrStore(const unsigned Opcode) {
+ switch (Opcode) {
+ case Mips::LD_B:
+ case Mips::LD_H:
+ case Mips::LD_W:
+ case Mips::LD_D:
+ case Mips::ST_B:
+ case Mips::ST_H:
+ case Mips::ST_W:
+ case Mips::ST_D:
+ return true;
+ default:
+ return false;
+ }
}
void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
@@ -112,21 +129,49 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II,
DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n");
- // If MI is not a debug value, make sure Offset fits in the 16-bit immediate
- // field.
- if (!MI.isDebugValue() && !isInt<16>(Offset)) {
- MachineBasicBlock &MBB = *MI.getParent();
- DebugLoc DL = II->getDebugLoc();
- unsigned ADDu = Subtarget.isABI_N64() ? Mips::DADDu : Mips::ADDu;
- unsigned NewImm;
-
- unsigned Reg = TII.loadImmediate(Offset, MBB, II, DL, &NewImm);
- BuildMI(MBB, II, DL, TII.get(ADDu), Reg).addReg(FrameReg)
- .addReg(Reg, RegState::Kill);
-
- FrameReg = Reg;
- Offset = SignExtend64<16>(NewImm);
- IsKill = true;
+ if (!MI.isDebugValue()) {
+ // Make sure Offset fits within the field available.
+ // For MSA instructions, this is a 10-bit signed immediate, otherwise it is
+ // a 16-bit signed immediate.
+ unsigned OffsetBitSize = isMSALoadOrStore(MI.getOpcode()) ? 10 : 16;
+
+ if (OffsetBitSize == 10 && !isInt<10>(Offset) && isInt<16>(Offset)) {
+ // If we have an offset that needs to fit into a signed 10-bit immediate
+ // and doesn't, but does fit into 16-bits then use an ADDiu
+ MachineBasicBlock &MBB = *MI.getParent();
+ DebugLoc DL = II->getDebugLoc();
+ unsigned ADDiu = Subtarget.isABI_N64() ? Mips::DADDiu : Mips::ADDiu;
+ const TargetRegisterClass *RC =
+ Subtarget.isABI_N64() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
+ MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
+ unsigned Reg = RegInfo.createVirtualRegister(RC);
+ const MipsSEInstrInfo &TII =
+ *static_cast<const MipsSEInstrInfo *>(
+ MBB.getParent()->getTarget().getInstrInfo());
+ BuildMI(MBB, II, DL, TII.get(ADDiu), Reg).addReg(FrameReg).addImm(Offset);
+
+ FrameReg = Reg;
+ Offset = 0;
+ IsKill = true;
+ } else if (!isInt<16>(Offset)) {
+ // Otherwise split the offset into 16-bit pieces and add it in multiple
+ // instructions.
+ MachineBasicBlock &MBB = *MI.getParent();
+ DebugLoc DL = II->getDebugLoc();
+ unsigned ADDu = Subtarget.isABI_N64() ? Mips::DADDu : Mips::ADDu;
+ unsigned NewImm = 0;
+ const MipsSEInstrInfo &TII =
+ *static_cast<const MipsSEInstrInfo *>(
+ MBB.getParent()->getTarget().getInstrInfo());
+ unsigned Reg = TII.loadImmediate(Offset, MBB, II, DL,
+ OffsetBitSize == 16 ? &NewImm : NULL);
+ BuildMI(MBB, II, DL, TII.get(ADDu), Reg).addReg(FrameReg)
+ .addReg(Reg, RegState::Kill);
+
+ FrameReg = Reg;
+ Offset = SignExtend64<16>(NewImm);
+ IsKill = true;
+ }
}
MI.getOperand(OpNo).ChangeToRegister(FrameReg, false, false, IsKill);
diff --git a/lib/Target/Mips/MipsSERegisterInfo.h b/lib/Target/Mips/MipsSERegisterInfo.h
index 2f7c37bb460d..76cdd9d230d3 100644
--- a/lib/Target/Mips/MipsSERegisterInfo.h
+++ b/lib/Target/Mips/MipsSERegisterInfo.h
@@ -21,11 +21,8 @@ namespace llvm {
class MipsSEInstrInfo;
class MipsSERegisterInfo : public MipsRegisterInfo {
- const MipsSEInstrInfo &TII;
-
public:
- MipsSERegisterInfo(const MipsSubtarget &Subtarget,
- const MipsSEInstrInfo &TII);
+ MipsSERegisterInfo(const MipsSubtarget &Subtarget);
bool requiresRegisterScavenging(const MachineFunction &MF) const;
diff --git a/lib/Target/Mips/MipsSchedule.td b/lib/Target/Mips/MipsSchedule.td
index 1add02ff83e9..2779064c4149 100644
--- a/lib/Target/Mips/MipsSchedule.td
+++ b/lib/Target/Mips/MipsSchedule.td
@@ -17,13 +17,18 @@ def IMULDIV : FuncUnit;
// Instruction Itinerary classes used for Mips
//===----------------------------------------------------------------------===//
def IIAlu : InstrItinClass;
+def IIArith : InstrItinClass;
+def IILogic : InstrItinClass;
def IILoad : InstrItinClass;
def IIStore : InstrItinClass;
def IIXfer : InstrItinClass;
def IIBranch : InstrItinClass;
def IIHiLo : InstrItinClass;
def IIImul : InstrItinClass;
+def IIImult : InstrItinClass;
def IIIdiv : InstrItinClass;
+def IIseb : InstrItinClass;
+def IIslt : InstrItinClass;
def IIFcvt : InstrItinClass;
def IIFmove : InstrItinClass;
def IIFcmp : InstrItinClass;
@@ -35,6 +40,9 @@ def IIFdivDouble : InstrItinClass;
def IIFsqrtSingle : InstrItinClass;
def IIFsqrtDouble : InstrItinClass;
def IIFrecipFsqrtStep : InstrItinClass;
+def IIFLoad : InstrItinClass;
+def IIFStore : InstrItinClass;
+def IIFmoveC1 : InstrItinClass;
def IIPseudo : InstrItinClass;
//===----------------------------------------------------------------------===//
@@ -42,6 +50,8 @@ def IIPseudo : InstrItinClass;
//===----------------------------------------------------------------------===//
def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [
InstrItinData<IIAlu , [InstrStage<1, [ALU]>]>,
+ InstrItinData<IIArith , [InstrStage<1, [ALU]>]>,
+ InstrItinData<IILogic , [InstrStage<1, [ALU]>]>,
InstrItinData<IILoad , [InstrStage<3, [ALU]>]>,
InstrItinData<IIStore , [InstrStage<1, [ALU]>]>,
InstrItinData<IIXfer , [InstrStage<2, [ALU]>]>,
@@ -59,5 +69,8 @@ def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [
InstrItinData<IIFdivDouble , [InstrStage<36, [ALU]>]>,
InstrItinData<IIFsqrtSingle , [InstrStage<54, [ALU]>]>,
InstrItinData<IIFsqrtDouble , [InstrStage<12, [ALU]>]>,
- InstrItinData<IIFrecipFsqrtStep , [InstrStage<5, [ALU]>]>
+ InstrItinData<IIFrecipFsqrtStep , [InstrStage<5, [ALU]>]>,
+ InstrItinData<IIFLoad , [InstrStage<3, [ALU]>]>,
+ InstrItinData<IIFStore , [InstrStage<1, [ALU]>]>,
+ InstrItinData<IIFmoveC1 , [InstrStage<2, [ALU]>]>
]>;
diff --git a/lib/Target/Mips/MipsSubtarget.cpp b/lib/Target/Mips/MipsSubtarget.cpp
index 14a2b2779512..0a81072b0858 100644
--- a/lib/Target/Mips/MipsSubtarget.cpp
+++ b/lib/Target/Mips/MipsSubtarget.cpp
@@ -48,6 +48,17 @@ static cl::opt<bool> Mips_Os16(
"floating point as Mips 16"),
cl::Hidden);
+static cl::opt<bool>
+Mips16HardFloat("mips16-hard-float", cl::NotHidden,
+ cl::desc("MIPS: mips16 hard float enable."),
+ cl::init(false));
+
+static cl::opt<bool>
+Mips16ConstantIslands(
+ "mips16-constant-islands", cl::Hidden,
+ cl::desc("MIPS: mips16 constant islands enable. experimental feature"),
+ cl::init(false));
+
void MipsSubtarget::anchor() { }
MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
@@ -58,8 +69,9 @@ MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
IsSingleFloat(false), IsFP64bit(false), IsGP64bit(false), HasVFPU(false),
IsLinux(true), HasSEInReg(false), HasCondMov(false), HasSwap(false),
HasBitCount(false), HasFPIdx(false),
- InMips16Mode(false), InMicroMipsMode(false), HasDSP(false), HasDSPR2(false),
- AllowMixed16_32(Mixed16_32 | Mips_Os16), Os16(Mips_Os16),
+ InMips16Mode(false), InMips16HardFloat(Mips16HardFloat),
+ InMicroMipsMode(false), HasDSP(false), HasDSPR2(false),
+ AllowMixed16_32(Mixed16_32 | Mips_Os16), Os16(Mips_Os16), HasMSA(false),
RM(_RM), OverrideMode(NoOverride), TM(_TM)
{
std::string CPUName = CPU;
@@ -83,12 +95,20 @@ MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
(hasMips64() && (isABI_N32() || isABI_N64()))) &&
"Invalid Arch & ABI pair.");
+ if (hasMSA() && !isFP64bit())
+ report_fatal_error("MSA requires a 64-bit FPU register file (FR=1 mode). "
+ "See -mattr=+fp64.",
+ false);
+
// Is the target system Linux ?
if (TT.find("linux") == std::string::npos)
IsLinux = false;
// Set UseSmallSection.
UseSmallSection = !IsLinux && (RM == Reloc::Static);
+ // set some subtarget specific features
+ if (inMips16Mode())
+ HasBitCount=false;
}
bool
@@ -98,7 +118,7 @@ MipsSubtarget::enablePostRAScheduler(CodeGenOpt::Level OptLevel,
Mode = TargetSubtargetInfo::ANTIDEP_NONE;
CriticalPathRCs.clear();
CriticalPathRCs.push_back(hasMips64() ?
- &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass);
+ &Mips::GPR64RegClass : &Mips::GPR32RegClass);
return OptLevel >= CodeGenOpt::Aggressive;
}
@@ -146,3 +166,11 @@ void MipsSubtarget::resetSubtarget(MachineFunction *MF) {
}
}
+bool MipsSubtarget::mipsSEUsesSoftFloat() const {
+ return TM->Options.UseSoftFloat && !InMips16HardFloat;
+}
+
+bool MipsSubtarget::useConstantIslands() {
+ DEBUG(dbgs() << "use constant islands " << Mips16ConstantIslands << "\n");
+ return Mips16ConstantIslands;
+}
diff --git a/lib/Target/Mips/MipsSubtarget.h b/lib/Target/Mips/MipsSubtarget.h
index f2f0e15887e4..6b2ab1238b87 100644
--- a/lib/Target/Mips/MipsSubtarget.h
+++ b/lib/Target/Mips/MipsSubtarget.h
@@ -93,6 +93,9 @@ protected:
// InMips16 -- can process Mips16 instructions
bool InMips16Mode;
+ // Mips16 hard float
+ bool InMips16HardFloat;
+
// PreviousInMips16 -- the function we just processed was in Mips 16 Mode
bool PreviousInMips16Mode;
@@ -110,6 +113,9 @@ protected:
// compiled as Mips32
bool Os16;
+ // HasMSA -- supports MSA ASE.
+ bool HasMSA;
+
InstrItineraryData InstrItins;
// The instance to the register info section object
@@ -154,6 +160,7 @@ public:
bool isLittle() const { return IsLittle; }
bool isFP64bit() const { return IsFP64bit; }
+ bool isNotFP64bit() const { return !IsFP64bit; }
bool isGP64bit() const { return IsGP64bit; }
bool isGP32bit() const { return !IsGP64bit; }
bool isSingleFloat() const { return IsSingleFloat; }
@@ -170,28 +177,48 @@ public:
}
llvm_unreachable("Unexpected mode");
}
- bool inMips16ModeDefault() {
+ bool inMips16ModeDefault() const {
return InMips16Mode;
}
+ bool inMips16HardFloat() const {
+ return inMips16Mode() && InMips16HardFloat;
+ }
bool inMicroMipsMode() const { return InMicroMipsMode; }
bool hasDSP() const { return HasDSP; }
bool hasDSPR2() const { return HasDSPR2; }
+ bool hasMSA() const { return HasMSA; }
bool isLinux() const { return IsLinux; }
bool useSmallSection() const { return UseSmallSection; }
bool hasStandardEncoding() const { return !inMips16Mode(); }
+ bool mipsSEUsesSoftFloat() const;
+
+ bool enableLongBranchPass() const {
+ return hasStandardEncoding() || allowMixed16_32();
+ }
+
/// Features related to the presence of specific instructions.
bool hasSEInReg() const { return HasSEInReg; }
bool hasCondMov() const { return HasCondMov; }
bool hasSwap() const { return HasSwap; }
bool hasBitCount() const { return HasBitCount; }
bool hasFPIdx() const { return HasFPIdx; }
+ bool hasExtractInsert() const { return !inMips16Mode() && hasMips32r2(); }
- bool allowMixed16_32() const { return AllowMixed16_32;};
+ const InstrItineraryData &getInstrItineraryData() const { return InstrItins; }
+ bool allowMixed16_32() const { return inMips16ModeDefault() |
+ AllowMixed16_32;}
bool os16() const { return Os16;};
+// for now constant islands are on for the whole compilation unit but we only
+// really use them if in addition we are in mips16 mode
+//
+static bool useConstantIslands();
+
+ unsigned stackAlignment() const { return hasMips64() ? 16 : 8; }
+
// Grab MipsRegInfo object
const MipsReginfo &getMReginfo() const { return MRI; }
diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp
index ee28e2a122dd..5046c1b782f6 100644
--- a/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/lib/Target/Mips/MipsTargetMachine.cpp
@@ -22,6 +22,7 @@
#include "MipsSEISelLowering.h"
#include "MipsSEISelDAGToDAG.h"
#include "Mips16FrameLowering.h"
+#include "Mips16HardFloat.h"
#include "Mips16InstrInfo.h"
#include "Mips16ISelDAGToDAG.h"
#include "Mips16ISelLowering.h"
@@ -31,6 +32,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Transforms/Scalar.h"
using namespace llvm;
@@ -69,8 +71,9 @@ MipsTargetMachine(const Target &T, StringRef TT,
"E-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32-S64")),
InstrInfo(MipsInstrInfo::create(*this)),
FrameLowering(MipsFrameLowering::create(*this, Subtarget)),
- TLInfo(MipsTargetLowering::create(*this)),
- TSInfo(*this), JITInfo() {
+ TLInfo(MipsTargetLowering::create(*this)), TSInfo(*this),
+ InstrItins(Subtarget.getInstrItineraryData()), JITInfo() {
+ initAsmInfo();
}
@@ -132,7 +135,13 @@ namespace {
class MipsPassConfig : public TargetPassConfig {
public:
MipsPassConfig(MipsTargetMachine *TM, PassManagerBase &PM)
- : TargetPassConfig(TM, PM) {}
+ : TargetPassConfig(TM, PM) {
+ // The current implementation of long branch pass requires a scratch
+ // register ($at) to be available before branch instructions. Tail merging
+ // can break this requirement, so disable it when long branch pass is
+ // enabled.
+ EnableTailMerge = !getMipsSubtarget().enableLongBranchPass();
+ }
MipsTargetMachine &getMipsTargetMachine() const {
return getTM<MipsTargetMachine>();
@@ -156,6 +165,9 @@ void MipsPassConfig::addIRPasses() {
TargetPassConfig::addIRPasses();
if (getMipsSubtarget().os16())
addPass(createMipsOs16(getMipsTargetMachine()));
+ if (getMipsSubtarget().inMips16HardFloat())
+ addPass(createMips16HardFloat(getMipsTargetMachine()));
+ addPass(createPartiallyInlineLibCallsPass());
}
// Install an instruction selector pass using
// the ISelDag to gen Mips code.
@@ -191,8 +203,7 @@ bool MipsPassConfig::addPreEmitPass() {
const MipsSubtarget &Subtarget = TM.getSubtarget<MipsSubtarget>();
addPass(createMipsDelaySlotFillerPass(TM));
- if (Subtarget.hasStandardEncoding() ||
- Subtarget.allowMixed16_32())
+ if (Subtarget.enableLongBranchPass())
addPass(createMipsLongBranchPass(TM));
if (Subtarget.inMips16Mode() ||
Subtarget.allowMixed16_32())
diff --git a/lib/Target/Mips/MipsTargetMachine.h b/lib/Target/Mips/MipsTargetMachine.h
index ee557084fbbf..5a9a11d861c0 100644
--- a/lib/Target/Mips/MipsTargetMachine.h
+++ b/lib/Target/Mips/MipsTargetMachine.h
@@ -44,6 +44,7 @@ class MipsTargetMachine : public LLVMTargetMachine {
OwningPtr<const MipsFrameLowering> FrameLoweringSE;
OwningPtr<const MipsTargetLowering> TLInfoSE;
MipsSelectionDAGInfo TSInfo;
+ const InstrItineraryData &InstrItins;
MipsJITInfo JITInfo;
public:
@@ -65,6 +66,11 @@ public:
{ return &Subtarget; }
virtual const DataLayout *getDataLayout() const
{ return &DL;}
+
+ virtual const InstrItineraryData *getInstrItineraryData() const {
+ return Subtarget.inMips16Mode() ? 0 : &InstrItins;
+ }
+
virtual MipsJITInfo *getJITInfo()
{ return &JITInfo; }
diff --git a/lib/Target/Mips/MipsTargetStreamer.h b/lib/Target/Mips/MipsTargetStreamer.h
new file mode 100644
index 000000000000..96966fd7cbc0
--- /dev/null
+++ b/lib/Target/Mips/MipsTargetStreamer.h
@@ -0,0 +1,44 @@
+//===-- MipsTargetStreamer.h - Mips Target Streamer ------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MIPSTARGETSTREAMER_H
+#define MIPSTARGETSTREAMER_H
+
+#include "llvm/MC/MCELFStreamer.h"
+#include "llvm/MC/MCStreamer.h"
+
+namespace llvm {
+class MipsTargetStreamer : public MCTargetStreamer {
+ virtual void anchor();
+
+public:
+ virtual void emitMipsHackELFFlags(unsigned Flags) = 0;
+ virtual void emitMipsHackSTOCG(MCSymbol *Sym, unsigned Val) = 0;
+};
+
+// This part is for ascii assembly output
+class MipsTargetAsmStreamer : public MipsTargetStreamer {
+ formatted_raw_ostream &OS;
+
+public:
+ MipsTargetAsmStreamer(formatted_raw_ostream &OS);
+ virtual void emitMipsHackELFFlags(unsigned Flags);
+ virtual void emitMipsHackSTOCG(MCSymbol *Sym, unsigned Val);
+};
+
+// This part is for ELF object output
+class MipsTargetELFStreamer : public MipsTargetStreamer {
+public:
+ MCELFStreamer &getStreamer();
+ virtual void emitMipsHackELFFlags(unsigned Flags);
+ virtual void emitMipsHackSTOCG(MCSymbol *Sym, unsigned Val);
+};
+}
+
+#endif