aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/Hexagon
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/Hexagon')
-rw-r--r--lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp1
-rw-r--r--lib/Target/Hexagon/HexagonBitSimplify.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonBitTracker.cpp1
-rw-r--r--lib/Target/Hexagon/HexagonConstPropagation.cpp1
-rw-r--r--lib/Target/Hexagon/HexagonFrameLowering.cpp14
-rw-r--r--lib/Target/Hexagon/HexagonGenPredicate.cpp1
-rw-r--r--lib/Target/Hexagon/HexagonISelDAGToDAG.cpp46
-rw-r--r--lib/Target/Hexagon/HexagonISelLowering.cpp56
-rw-r--r--lib/Target/Hexagon/HexagonISelLowering.h27
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.cpp74
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.h21
-rw-r--r--lib/Target/Hexagon/HexagonMachineScheduler.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonPatterns.td165
-rw-r--r--lib/Target/Hexagon/HexagonPseudo.td10
-rw-r--r--lib/Target/Hexagon/HexagonSplitDouble.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonTargetMachine.cpp10
-rw-r--r--lib/Target/Hexagon/HexagonVLIWPacketizer.cpp12
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp7
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h13
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp2
20 files changed, 258 insertions, 209 deletions
diff --git a/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp b/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
index c19e636d79ca..d901abbd1692 100644
--- a/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
+++ b/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
@@ -1413,6 +1413,7 @@ int HexagonAsmParser::processInstruction(MCInst &Inst,
// Translate a "$Rx = CONST32(#imm)" to "$Rx = memw(gp+#LABEL) "
case Hexagon::CONST32:
is32bit = true;
+ LLVM_FALLTHROUGH;
// Translate a "$Rx:y = CONST64(#imm)" to "$Rx:y = memd(gp+#LABEL) "
case Hexagon::CONST64:
// FIXME: need better way to detect AsmStreamer (upstream removed getKind())
diff --git a/lib/Target/Hexagon/HexagonBitSimplify.cpp b/lib/Target/Hexagon/HexagonBitSimplify.cpp
index 14c682c6df4b..b064778c4bbd 100644
--- a/lib/Target/Hexagon/HexagonBitSimplify.cpp
+++ b/lib/Target/Hexagon/HexagonBitSimplify.cpp
@@ -1947,8 +1947,10 @@ bool BitSimplification::genStoreImmediate(MachineInstr *MI) {
switch (Opc) {
case Hexagon::S2_storeri_io:
Align++;
+ LLVM_FALLTHROUGH;
case Hexagon::S2_storerh_io:
Align++;
+ LLVM_FALLTHROUGH;
case Hexagon::S2_storerb_io:
break;
default:
diff --git a/lib/Target/Hexagon/HexagonBitTracker.cpp b/lib/Target/Hexagon/HexagonBitTracker.cpp
index 730026121d3b..3de531088240 100644
--- a/lib/Target/Hexagon/HexagonBitTracker.cpp
+++ b/lib/Target/Hexagon/HexagonBitTracker.cpp
@@ -937,6 +937,7 @@ bool HexagonEvaluator::evaluate(const MachineInstr &BI,
case Hexagon::J2_jumpfnew:
case Hexagon::J2_jumpfnewpt:
Negated = true;
+ LLVM_FALLTHROUGH;
case Hexagon::J2_jumpt:
case Hexagon::J2_jumptpt:
case Hexagon::J2_jumptnew:
diff --git a/lib/Target/Hexagon/HexagonConstPropagation.cpp b/lib/Target/Hexagon/HexagonConstPropagation.cpp
index aa68f6cfdfc1..49ddd6961f8a 100644
--- a/lib/Target/Hexagon/HexagonConstPropagation.cpp
+++ b/lib/Target/Hexagon/HexagonConstPropagation.cpp
@@ -2244,6 +2244,7 @@ bool HexagonConstEvaluator::evaluate(const MachineInstr &BrI,
case Hexagon::J2_jumpfnew:
case Hexagon::J2_jumpfnewpt:
Negated = true;
+ LLVM_FALLTHROUGH;
case Hexagon::J2_jumpt:
case Hexagon::J2_jumptnew:
case Hexagon::J2_jumptnewpt:
diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp
index 97a53dcbaed7..c790579ccebc 100644
--- a/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -979,18 +979,6 @@ bool HexagonFrameLowering::hasFP(const MachineFunction &MF) const {
if (MFI.hasCalls() || HMFI.hasClobberLR())
return true;
- // Frame pointer elimination is a possiblility at this point, but
- // to know if FP is necessary we need to know if spill/restore
- // functions will be used (they require FP to be valid).
- // This means that hasFP shouldn't really be called before CSI is
- // calculated, and some measures are taken to make sure of that
- // (e.g. default implementations of virtual functions that call it
- // are overridden apropriately).
- assert(MFI.isCalleeSavedInfoValid() && "Need to know CSI");
- const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
- if (useSpillFunction(MF, CSI) || useRestoreFunction(MF, CSI))
- return true;
-
return false;
}
@@ -2437,6 +2425,8 @@ bool HexagonFrameLowering::shouldInlineCSR(const MachineFunction &MF,
const CSIVect &CSI) const {
if (MF.getInfo<HexagonMachineFunctionInfo>()->hasEHReturn())
return true;
+ if (!hasFP(MF))
+ return true;
if (!isOptSize(MF) && !isMinSize(MF))
if (MF.getTarget().getOptLevel() > CodeGenOpt::Default)
return true;
diff --git a/lib/Target/Hexagon/HexagonGenPredicate.cpp b/lib/Target/Hexagon/HexagonGenPredicate.cpp
index f14c733dcf51..3470480d607d 100644
--- a/lib/Target/Hexagon/HexagonGenPredicate.cpp
+++ b/lib/Target/Hexagon/HexagonGenPredicate.cpp
@@ -334,6 +334,7 @@ bool HexagonGenPredicate::isScalarPred(Register PredReg) {
if (MRI->getRegClass(PR.R) != PredRC)
return false;
// If it is a copy between two predicate registers, fall through.
+ LLVM_FALLTHROUGH;
}
case Hexagon::C2_and:
case Hexagon::C2_andn:
diff --git a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
index e5f49ca77a91..0163b2e2bdc4 100644
--- a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
+++ b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -241,22 +241,31 @@ void HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, const SDLoc &dl) {
case MVT::v32i16:
case MVT::v16i32:
case MVT::v8i64:
- if (isAlignedMemNode(LD))
- Opcode = IsValidInc ? Hexagon::V6_vL32b_pi : Hexagon::V6_vL32b_ai;
- else
+ if (isAlignedMemNode(LD)) {
+ if (LD->isNonTemporal())
+ Opcode = IsValidInc ? Hexagon::V6_vL32b_nt_pi : Hexagon::V6_vL32b_nt_ai;
+ else
+ Opcode = IsValidInc ? Hexagon::V6_vL32b_pi : Hexagon::V6_vL32b_ai;
+ } else {
Opcode = IsValidInc ? Hexagon::V6_vL32Ub_pi : Hexagon::V6_vL32Ub_ai;
+ }
break;
// 128B
case MVT::v128i8:
case MVT::v64i16:
case MVT::v32i32:
case MVT::v16i64:
- if (isAlignedMemNode(LD))
- Opcode = IsValidInc ? Hexagon::V6_vL32b_pi_128B
- : Hexagon::V6_vL32b_ai_128B;
- else
+ if (isAlignedMemNode(LD)) {
+ if (LD->isNonTemporal())
+ Opcode = IsValidInc ? Hexagon::V6_vL32b_nt_pi_128B
+ : Hexagon::V6_vL32b_nt_ai_128B;
+ else
+ Opcode = IsValidInc ? Hexagon::V6_vL32b_pi_128B
+ : Hexagon::V6_vL32b_ai_128B;
+ } else {
Opcode = IsValidInc ? Hexagon::V6_vL32Ub_pi_128B
: Hexagon::V6_vL32Ub_ai_128B;
+ }
break;
default:
llvm_unreachable("Unexpected memory type in indexed load");
@@ -529,22 +538,31 @@ void HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, const SDLoc &dl) {
case MVT::v32i16:
case MVT::v16i32:
case MVT::v8i64:
- if (isAlignedMemNode(ST))
- Opcode = IsValidInc ? Hexagon::V6_vS32b_pi : Hexagon::V6_vS32b_ai;
- else
+ if (isAlignedMemNode(ST)) {
+ if (ST->isNonTemporal())
+ Opcode = IsValidInc ? Hexagon::V6_vS32b_nt_pi : Hexagon::V6_vS32b_nt_ai;
+ else
+ Opcode = IsValidInc ? Hexagon::V6_vS32b_pi : Hexagon::V6_vS32b_ai;
+ } else {
Opcode = IsValidInc ? Hexagon::V6_vS32Ub_pi : Hexagon::V6_vS32Ub_ai;
+ }
break;
// 128B
case MVT::v128i8:
case MVT::v64i16:
case MVT::v32i32:
case MVT::v16i64:
- if (isAlignedMemNode(ST))
- Opcode = IsValidInc ? Hexagon::V6_vS32b_pi_128B
- : Hexagon::V6_vS32b_ai_128B;
- else
+ if (isAlignedMemNode(ST)) {
+ if (ST->isNonTemporal())
+ Opcode = IsValidInc ? Hexagon::V6_vS32b_nt_pi_128B
+ : Hexagon::V6_vS32b_nt_ai_128B;
+ else
+ Opcode = IsValidInc ? Hexagon::V6_vS32b_pi_128B
+ : Hexagon::V6_vS32b_ai_128B;
+ } else {
Opcode = IsValidInc ? Hexagon::V6_vS32Ub_pi_128B
: Hexagon::V6_vS32Ub_ai_128B;
+ }
break;
default:
llvm_unreachable("Unexpected memory type in indexed store");
diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp
index 2daacf795555..67242764d453 100644
--- a/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -716,6 +716,7 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
auto PtrVT = getPointerTy(MF.getDataLayout());
// Check for varargs.
@@ -832,7 +833,6 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (NeedsArgAlign && Subtarget.hasV60TOps()) {
DEBUG(dbgs() << "Function needs byte stack align due to call args\n");
- MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
// V6 vectors passed by value have 64 or 128 byte alignment depending
// on whether we are 64 byte vector mode or 128 byte.
bool UseHVXDbl = Subtarget.useHVXDblOps();
@@ -916,10 +916,15 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Ops.push_back(Glue);
if (IsTailCall) {
- MF.getFrameInfo().setHasTailCall();
+ MFI.setHasTailCall();
return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops);
}
+ // Set this here because we need to know this for "hasFP" in frame lowering.
+ // The target-independent code calls getFrameRegister before setting it, and
+ // getFrameRegister uses hasFP to determine whether the function has FP.
+ MFI.setHasCalls(true);
+
unsigned OpCode = DoesNotReturn ? HexagonISD::CALLnr : HexagonISD::CALL;
Chain = DAG.getNode(OpCode, dl, NodeTys, Ops);
Glue = Chain.getValue(1);
@@ -1284,11 +1289,9 @@ HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
// Creates a SPLAT instruction for a constant value VAL.
static SDValue createSplat(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
SDValue Val) {
- if (VT.getSimpleVT() == MVT::v4i8)
- return DAG.getNode(HexagonISD::VSPLATB, dl, VT, Val);
-
- if (VT.getSimpleVT() == MVT::v4i16)
- return DAG.getNode(HexagonISD::VSPLATH, dl, VT, Val);
+ EVT T = VT.getVectorElementType();
+ if (T == MVT::i8 || T == MVT::i16)
+ return DAG.getNode(HexagonISD::VSPLAT, dl, VT, Val);
return SDValue();
}
@@ -2296,32 +2299,13 @@ const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
case HexagonISD::JT: return "HexagonISD::JT";
case HexagonISD::PACKHL: return "HexagonISD::PACKHL";
case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
- case HexagonISD::SHUFFEB: return "HexagonISD::SHUFFEB";
- case HexagonISD::SHUFFEH: return "HexagonISD::SHUFFEH";
- case HexagonISD::SHUFFOB: return "HexagonISD::SHUFFOB";
- case HexagonISD::SHUFFOH: return "HexagonISD::SHUFFOH";
case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
- case HexagonISD::VCMPBEQ: return "HexagonISD::VCMPBEQ";
- case HexagonISD::VCMPBGT: return "HexagonISD::VCMPBGT";
- case HexagonISD::VCMPBGTU: return "HexagonISD::VCMPBGTU";
- case HexagonISD::VCMPHEQ: return "HexagonISD::VCMPHEQ";
- case HexagonISD::VCMPHGT: return "HexagonISD::VCMPHGT";
- case HexagonISD::VCMPHGTU: return "HexagonISD::VCMPHGTU";
- case HexagonISD::VCMPWEQ: return "HexagonISD::VCMPWEQ";
- case HexagonISD::VCMPWGT: return "HexagonISD::VCMPWGT";
- case HexagonISD::VCMPWGTU: return "HexagonISD::VCMPWGTU";
case HexagonISD::VCOMBINE: return "HexagonISD::VCOMBINE";
case HexagonISD::VPACK: return "HexagonISD::VPACK";
- case HexagonISD::VSHLH: return "HexagonISD::VSHLH";
- case HexagonISD::VSHLW: return "HexagonISD::VSHLW";
- case HexagonISD::VSPLATB: return "HexagonISD::VSPLTB";
- case HexagonISD::VSPLATH: return "HexagonISD::VSPLATH";
- case HexagonISD::VSRAH: return "HexagonISD::VSRAH";
- case HexagonISD::VSRAW: return "HexagonISD::VSRAW";
- case HexagonISD::VSRLH: return "HexagonISD::VSRLH";
- case HexagonISD::VSRLW: return "HexagonISD::VSRLW";
- case HexagonISD::VSXTBH: return "HexagonISD::VSXTBH";
- case HexagonISD::VSXTBW: return "HexagonISD::VSXTBW";
+ case HexagonISD::VASL: return "HexagonISD::VASL";
+ case HexagonISD::VASR: return "HexagonISD::VASR";
+ case HexagonISD::VLSR: return "HexagonISD::VLSR";
+ case HexagonISD::VSPLAT: return "HexagonISD::VSPLAT";
case HexagonISD::READCYCLE: return "HexagonISD::READCYCLE";
case HexagonISD::OP_END: break;
}
@@ -2503,13 +2487,13 @@ HexagonTargetLowering::LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const {
if (VT.getSimpleVT() == MVT::v4i16) {
switch (Op.getOpcode()) {
case ISD::SRA:
- Result = DAG.getNode(HexagonISD::VSRAH, dl, VT, V3, CommonSplat);
+ Result = DAG.getNode(HexagonISD::VASR, dl, VT, V3, CommonSplat);
break;
case ISD::SHL:
- Result = DAG.getNode(HexagonISD::VSHLH, dl, VT, V3, CommonSplat);
+ Result = DAG.getNode(HexagonISD::VASL, dl, VT, V3, CommonSplat);
break;
case ISD::SRL:
- Result = DAG.getNode(HexagonISD::VSRLH, dl, VT, V3, CommonSplat);
+ Result = DAG.getNode(HexagonISD::VLSR, dl, VT, V3, CommonSplat);
break;
default:
return SDValue();
@@ -2517,13 +2501,13 @@ HexagonTargetLowering::LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const {
} else if (VT.getSimpleVT() == MVT::v2i32) {
switch (Op.getOpcode()) {
case ISD::SRA:
- Result = DAG.getNode(HexagonISD::VSRAW, dl, VT, V3, CommonSplat);
+ Result = DAG.getNode(HexagonISD::VASR, dl, VT, V3, CommonSplat);
break;
case ISD::SHL:
- Result = DAG.getNode(HexagonISD::VSHLW, dl, VT, V3, CommonSplat);
+ Result = DAG.getNode(HexagonISD::VASL, dl, VT, V3, CommonSplat);
break;
case ISD::SRL:
- Result = DAG.getNode(HexagonISD::VSRLW, dl, VT, V3, CommonSplat);
+ Result = DAG.getNode(HexagonISD::VLSR, dl, VT, V3, CommonSplat);
break;
default:
return SDValue();
diff --git a/lib/Target/Hexagon/HexagonISelLowering.h b/lib/Target/Hexagon/HexagonISelLowering.h
index 1415156487c0..bfd2c94eeaba 100644
--- a/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/lib/Target/Hexagon/HexagonISelLowering.h
@@ -52,29 +52,10 @@ namespace HexagonISD {
COMBINE,
PACKHL,
- VSPLATB,
- VSPLATH,
- SHUFFEB,
- SHUFFEH,
- SHUFFOB,
- SHUFFOH,
- VSXTBH,
- VSXTBW,
- VSRAW,
- VSRAH,
- VSRLW,
- VSRLH,
- VSHLW,
- VSHLH,
- VCMPBEQ,
- VCMPBGT,
- VCMPBGTU,
- VCMPHEQ,
- VCMPHGT,
- VCMPHGTU,
- VCMPWEQ,
- VCMPWGT,
- VCMPWGTU,
+ VSPLAT,
+ VASL,
+ VASR,
+ VLSR,
INSERT,
INSERTRP,
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp
index 1eac2d3dd8e2..c77c669f4ca7 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -250,15 +250,19 @@ unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
case Hexagon::L2_loadri_io:
case Hexagon::L2_loadrd_io:
case Hexagon::V6_vL32b_ai:
+ case Hexagon::V6_vL32b_nt_ai:
case Hexagon::V6_vL32b_ai_128B:
+ case Hexagon::V6_vL32b_nt_ai_128B:
case Hexagon::V6_vL32Ub_ai:
case Hexagon::V6_vL32Ub_ai_128B:
case Hexagon::LDriw_pred:
case Hexagon::LDriw_mod:
case Hexagon::PS_vloadrq_ai:
case Hexagon::PS_vloadrw_ai:
+ case Hexagon::PS_vloadrw_nt_ai:
case Hexagon::PS_vloadrq_ai_128B:
- case Hexagon::PS_vloadrw_ai_128B: {
+ case Hexagon::PS_vloadrw_ai_128B:
+ case Hexagon::PS_vloadrw_nt_ai_128B: {
const MachineOperand OpFI = MI.getOperand(1);
if (!OpFI.isFI())
return 0;
@@ -1726,6 +1730,39 @@ bool HexagonInstrInfo::getIncrementValue(const MachineInstr &MI,
return false;
}
+std::pair<unsigned, unsigned>
+HexagonInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
+ return std::make_pair(TF & ~HexagonII::MO_Bitmasks,
+ TF & HexagonII::MO_Bitmasks);
+}
+
+ArrayRef<std::pair<unsigned, const char*>>
+HexagonInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
+ using namespace HexagonII;
+ static const std::pair<unsigned, const char*> Flags[] = {
+ {MO_PCREL, "hexagon-pcrel"},
+ {MO_GOT, "hexagon-got"},
+ {MO_LO16, "hexagon-lo16"},
+ {MO_HI16, "hexagon-hi16"},
+ {MO_GPREL, "hexagon-gprel"},
+ {MO_GDGOT, "hexagon-gdgot"},
+ {MO_GDPLT, "hexagon-gdplt"},
+ {MO_IE, "hexagon-ie"},
+ {MO_IEGOT, "hexagon-iegot"},
+ {MO_TPREL, "hexagon-tprel"}
+ };
+ return makeArrayRef(Flags);
+}
+
+ArrayRef<std::pair<unsigned, const char*>>
+HexagonInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
+ using namespace HexagonII;
+ static const std::pair<unsigned, const char*> Flags[] = {
+ {HMOTF_ConstExtended, "hexagon-ext"}
+ };
+ return makeArrayRef(Flags);
+}
+
unsigned HexagonInstrInfo::createVR(MachineFunction *MF, MVT VT) const {
MachineRegisterInfo &MRI = MF->getRegInfo();
const TargetRegisterClass *TRC;
@@ -1797,7 +1834,7 @@ bool HexagonInstrInfo::isConstExtended(const MachineInstr &MI) const {
const MachineOperand &MO = MI.getOperand(ExtOpNum);
// Use MO operand flags to determine if MO
// has the HMOTF_ConstExtended flag set.
- if (MO.getTargetFlags() && HexagonII::HMOTF_ConstExtended)
+ if (MO.getTargetFlags() & HexagonII::HMOTF_ConstExtended)
return true;
// If this is a Machine BB address we are talking about, and it is
// not marked as extended, say so.
@@ -1807,9 +1844,6 @@ bool HexagonInstrInfo::isConstExtended(const MachineInstr &MI) const {
// We could be using an instruction with an extendable immediate and shoehorn
// a global address into it. If it is a global address it will be constant
// extended. We do this for COMBINE.
- // We currently only handle isGlobal() because it is the only kind of
- // object we are going to end up with here for now.
- // In the future we probably should add isSymbol(), etc.
if (MO.isGlobal() || MO.isSymbol() || MO.isBlockAddress() ||
MO.isJTI() || MO.isCPI() || MO.isFPImm())
return true;
@@ -1961,11 +1995,9 @@ bool HexagonInstrInfo::isExtended(const MachineInstr &MI) const {
return true;
// Use MO operand flags to determine if one of MI's operands
// has HMOTF_ConstExtended flag set.
- for (MachineInstr::const_mop_iterator I = MI.operands_begin(),
- E = MI.operands_end(); I != E; ++I) {
- if (I->getTargetFlags() && HexagonII::HMOTF_ConstExtended)
+ for (const MachineOperand &MO : MI.operands())
+ if (MO.getTargetFlags() & HexagonII::HMOTF_ConstExtended)
return true;
- }
return false;
}
@@ -2445,20 +2477,28 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
switch (Opcode) {
case Hexagon::PS_vstorerq_ai:
case Hexagon::PS_vstorerw_ai:
+ case Hexagon::PS_vstorerw_nt_ai:
case Hexagon::PS_vloadrq_ai:
case Hexagon::PS_vloadrw_ai:
+ case Hexagon::PS_vloadrw_nt_ai:
case Hexagon::V6_vL32b_ai:
case Hexagon::V6_vS32b_ai:
+ case Hexagon::V6_vL32b_nt_ai:
+ case Hexagon::V6_vS32b_nt_ai:
case Hexagon::V6_vL32Ub_ai:
case Hexagon::V6_vS32Ub_ai:
return isShiftedInt<4,6>(Offset);
case Hexagon::PS_vstorerq_ai_128B:
case Hexagon::PS_vstorerw_ai_128B:
+ case Hexagon::PS_vstorerw_nt_ai_128B:
case Hexagon::PS_vloadrq_ai_128B:
case Hexagon::PS_vloadrw_ai_128B:
+ case Hexagon::PS_vloadrw_nt_ai_128B:
case Hexagon::V6_vL32b_ai_128B:
case Hexagon::V6_vS32b_ai_128B:
+ case Hexagon::V6_vL32b_nt_ai_128B:
+ case Hexagon::V6_vS32b_nt_ai_128B:
case Hexagon::V6_vL32Ub_ai_128B:
case Hexagon::V6_vS32Ub_ai_128B:
return isShiftedInt<4,7>(Offset);
@@ -3170,11 +3210,19 @@ int HexagonInstrInfo::getDotCurOp(const MachineInstr &MI) const {
return Hexagon::V6_vL32b_cur_pi;
case Hexagon::V6_vL32b_ai:
return Hexagon::V6_vL32b_cur_ai;
+ case Hexagon::V6_vL32b_nt_pi:
+ return Hexagon::V6_vL32b_nt_cur_pi;
+ case Hexagon::V6_vL32b_nt_ai:
+ return Hexagon::V6_vL32b_nt_cur_ai;
//128B
case Hexagon::V6_vL32b_pi_128B:
return Hexagon::V6_vL32b_cur_pi_128B;
case Hexagon::V6_vL32b_ai_128B:
return Hexagon::V6_vL32b_cur_ai_128B;
+ case Hexagon::V6_vL32b_nt_pi_128B:
+ return Hexagon::V6_vL32b_nt_cur_pi_128B;
+ case Hexagon::V6_vL32b_nt_ai_128B:
+ return Hexagon::V6_vL32b_nt_cur_ai_128B;
}
return 0;
}
@@ -3187,11 +3235,19 @@ int HexagonInstrInfo::getNonDotCurOp(const MachineInstr &MI) const {
return Hexagon::V6_vL32b_pi;
case Hexagon::V6_vL32b_cur_ai:
return Hexagon::V6_vL32b_ai;
+ case Hexagon::V6_vL32b_nt_cur_pi:
+ return Hexagon::V6_vL32b_nt_pi;
+ case Hexagon::V6_vL32b_nt_cur_ai:
+ return Hexagon::V6_vL32b_nt_ai;
//128B
case Hexagon::V6_vL32b_cur_pi_128B:
return Hexagon::V6_vL32b_pi_128B;
case Hexagon::V6_vL32b_cur_ai_128B:
return Hexagon::V6_vL32b_ai_128B;
+ case Hexagon::V6_vL32b_nt_cur_pi_128B:
+ return Hexagon::V6_vL32b_nt_pi_128B;
+ case Hexagon::V6_vL32b_nt_cur_ai_128B:
+ return Hexagon::V6_vL32b_nt_ai_128B;
}
return 0;
}
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.h b/lib/Target/Hexagon/HexagonInstrInfo.h
index 944d0161a7c8..0436ce3ac475 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -301,6 +301,27 @@ public:
const MachineInstr &UseMI,
unsigned UseIdx) const override;
+ /// Decompose the machine operand's target flags into two values - the direct
+ /// target flag value and any of bit flags that are applied.
+ std::pair<unsigned, unsigned>
+ decomposeMachineOperandsTargetFlags(unsigned TF) const override;
+
+ /// Return an array that contains the direct target flag values and their
+ /// names.
+ ///
+ /// MIR Serialization is able to serialize only the target flags that are
+ /// defined by this method.
+ ArrayRef<std::pair<unsigned, const char *>>
+ getSerializableDirectMachineOperandTargetFlags() const override;
+
+ /// Return an array that contains the bitmask target flag values and their
+ /// names.
+ ///
+ /// MIR Serialization is able to serialize only the target flags that are
+ /// defined by this method.
+ ArrayRef<std::pair<unsigned, const char *>>
+ getSerializableBitmaskMachineOperandTargetFlags() const override;
+
bool isTailCall(const MachineInstr &MI) const override;
/// HexagonInstrInfo specifics.
diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
index 4602de979024..1a26805d190d 100644
--- a/lib/Target/Hexagon/HexagonMachineScheduler.cpp
+++ b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
@@ -49,7 +49,7 @@ static cl::opt<bool> CheckEarlyAvail("check-early-avail", cl::Hidden,
using namespace llvm;
-#define DEBUG_TYPE "misched"
+#define DEBUG_TYPE "machine-scheduler"
namespace {
class HexagonCallMutation : public ScheduleDAGMutation {
diff --git a/lib/Target/Hexagon/HexagonPatterns.td b/lib/Target/Hexagon/HexagonPatterns.td
index 689419638f54..ba98b8994937 100644
--- a/lib/Target/Hexagon/HexagonPatterns.td
+++ b/lib/Target/Hexagon/HexagonPatterns.td
@@ -2770,6 +2770,9 @@ def unalignedstore : PatFrag<(ops node:$val, node:$addr), (store $val, $addr), [
multiclass vS32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
// Aligned stores
+ def : Pat<(alignednontemporalstore (VTSgl VectorRegs:$src1), IntRegs:$addr),
+ (V6_vS32b_nt_ai IntRegs:$addr, 0, (VTSgl VectorRegs:$src1))>,
+ Requires<[UseHVXSgl]>;
def : Pat<(alignedstore (VTSgl VectorRegs:$src1), IntRegs:$addr),
(V6_vS32b_ai IntRegs:$addr, 0, (VTSgl VectorRegs:$src1))>,
Requires<[UseHVXSgl]>;
@@ -2778,6 +2781,9 @@ multiclass vS32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
Requires<[UseHVXSgl]>;
// 128B Aligned stores
+ def : Pat<(alignednontemporalstore (VTDbl VectorRegs128B:$src1), IntRegs:$addr),
+ (V6_vS32b_nt_ai_128B IntRegs:$addr, 0, (VTDbl VectorRegs128B:$src1))>,
+ Requires<[UseHVXDbl]>;
def : Pat<(alignedstore (VTDbl VectorRegs128B:$src1), IntRegs:$addr),
(V6_vS32b_ai_128B IntRegs:$addr, 0, (VTDbl VectorRegs128B:$src1))>,
Requires<[UseHVXDbl]>;
@@ -2787,6 +2793,11 @@ multiclass vS32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
// Fold Add R+OFF into vector store.
let AddedComplexity = 10 in {
+ def : Pat<(alignednontemporalstore (VTSgl VectorRegs:$src1),
+ (add IntRegs:$src2, Iss4_6:$offset)),
+ (V6_vS32b_nt_ai IntRegs:$src2, Iss4_6:$offset,
+ (VTSgl VectorRegs:$src1))>,
+ Requires<[UseHVXSgl]>;
def : Pat<(alignedstore (VTSgl VectorRegs:$src1),
(add IntRegs:$src2, Iss4_6:$offset)),
(V6_vS32b_ai IntRegs:$src2, Iss4_6:$offset,
@@ -2799,6 +2810,11 @@ multiclass vS32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
Requires<[UseHVXSgl]>;
// Fold Add R+OFF into vector store 128B.
+ def : Pat<(alignednontemporalstore (VTDbl VectorRegs128B:$src1),
+ (add IntRegs:$src2, Iss4_7:$offset)),
+ (V6_vS32b_nt_ai_128B IntRegs:$src2, Iss4_7:$offset,
+ (VTDbl VectorRegs128B:$src1))>,
+ Requires<[UseHVXDbl]>;
def : Pat<(alignedstore (VTDbl VectorRegs128B:$src1),
(add IntRegs:$src2, Iss4_7:$offset)),
(V6_vS32b_ai_128B IntRegs:$src2, Iss4_7:$offset,
@@ -2820,6 +2836,9 @@ defm : vS32b_ai_pats <v8i64, v16i64>;
multiclass vL32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
// Aligned loads
+ def : Pat < (VTSgl (alignednontemporalload IntRegs:$addr)),
+ (V6_vL32b_nt_ai IntRegs:$addr, 0) >,
+ Requires<[UseHVXSgl]>;
def : Pat < (VTSgl (alignedload IntRegs:$addr)),
(V6_vL32b_ai IntRegs:$addr, 0) >,
Requires<[UseHVXSgl]>;
@@ -2828,6 +2847,9 @@ multiclass vL32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
Requires<[UseHVXSgl]>;
// 128B Load
+ def : Pat < (VTDbl (alignednontemporalload IntRegs:$addr)),
+ (V6_vL32b_nt_ai_128B IntRegs:$addr, 0) >,
+ Requires<[UseHVXDbl]>;
def : Pat < (VTDbl (alignedload IntRegs:$addr)),
(V6_vL32b_ai_128B IntRegs:$addr, 0) >,
Requires<[UseHVXDbl]>;
@@ -2837,6 +2859,9 @@ multiclass vL32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
// Fold Add R+OFF into vector load.
let AddedComplexity = 10 in {
+ def : Pat<(VTDbl (alignednontemporalload (add IntRegs:$src2, Iss4_7:$offset))),
+ (V6_vL32b_nt_ai_128B IntRegs:$src2, Iss4_7:$offset)>,
+ Requires<[UseHVXDbl]>;
def : Pat<(VTDbl (alignedload (add IntRegs:$src2, Iss4_7:$offset))),
(V6_vL32b_ai_128B IntRegs:$src2, Iss4_7:$offset)>,
Requires<[UseHVXDbl]>;
@@ -2844,6 +2869,9 @@ multiclass vL32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
(V6_vL32Ub_ai_128B IntRegs:$src2, Iss4_7:$offset)>,
Requires<[UseHVXDbl]>;
+ def : Pat<(VTSgl (alignednontemporalload (add IntRegs:$src2, Iss4_6:$offset))),
+ (V6_vL32b_nt_ai IntRegs:$src2, Iss4_6:$offset)>,
+ Requires<[UseHVXSgl]>;
def : Pat<(VTSgl (alignedload (add IntRegs:$src2, Iss4_6:$offset))),
(V6_vL32b_ai IntRegs:$src2, Iss4_6:$offset)>,
Requires<[UseHVXSgl]>;
@@ -2859,6 +2887,9 @@ defm : vL32b_ai_pats <v16i32, v32i32>;
defm : vL32b_ai_pats <v8i64, v16i64>;
multiclass STrivv_pats <ValueType VTSgl, ValueType VTDbl> {
+ def : Pat<(alignednontemporalstore (VTSgl VecDblRegs:$src1), IntRegs:$addr),
+ (PS_vstorerw_nt_ai IntRegs:$addr, 0, (VTSgl VecDblRegs:$src1))>,
+ Requires<[UseHVXSgl]>;
def : Pat<(alignedstore (VTSgl VecDblRegs:$src1), IntRegs:$addr),
(PS_vstorerw_ai IntRegs:$addr, 0, (VTSgl VecDblRegs:$src1))>,
Requires<[UseHVXSgl]>;
@@ -2866,6 +2897,10 @@ multiclass STrivv_pats <ValueType VTSgl, ValueType VTDbl> {
(PS_vstorerwu_ai IntRegs:$addr, 0, (VTSgl VecDblRegs:$src1))>,
Requires<[UseHVXSgl]>;
+ def : Pat<(alignednontemporalstore (VTDbl VecDblRegs128B:$src1), IntRegs:$addr),
+ (PS_vstorerw_nt_ai_128B IntRegs:$addr, 0,
+ (VTDbl VecDblRegs128B:$src1))>,
+ Requires<[UseHVXDbl]>;
def : Pat<(alignedstore (VTDbl VecDblRegs128B:$src1), IntRegs:$addr),
(PS_vstorerw_ai_128B IntRegs:$addr, 0,
(VTDbl VecDblRegs128B:$src1))>,
@@ -2882,6 +2917,9 @@ defm : STrivv_pats <v32i32, v64i32>;
defm : STrivv_pats <v16i64, v32i64>;
multiclass LDrivv_pats <ValueType VTSgl, ValueType VTDbl> {
+ def : Pat<(VTSgl (alignednontemporalload I32:$addr)),
+ (PS_vloadrw_nt_ai I32:$addr, 0)>,
+ Requires<[UseHVXSgl]>;
def : Pat<(VTSgl (alignedload I32:$addr)),
(PS_vloadrw_ai I32:$addr, 0)>,
Requires<[UseHVXSgl]>;
@@ -2889,6 +2927,9 @@ multiclass LDrivv_pats <ValueType VTSgl, ValueType VTDbl> {
(PS_vloadrwu_ai I32:$addr, 0)>,
Requires<[UseHVXSgl]>;
+ def : Pat<(VTDbl (alignednontemporalload I32:$addr)),
+ (PS_vloadrw_nt_ai_128B I32:$addr, 0)>,
+ Requires<[UseHVXDbl]>;
def : Pat<(VTDbl (alignedload I32:$addr)),
(PS_vloadrw_ai_128B I32:$addr, 0)>,
Requires<[UseHVXDbl]>;
@@ -3021,16 +3062,16 @@ def : Pat<(v2i16 (add (v2i16 IntRegs:$src1), (v2i16 IntRegs:$src2))),
def : Pat<(v2i16 (sub (v2i16 IntRegs:$src1), (v2i16 IntRegs:$src2))),
(A2_svsubh IntRegs:$src1, IntRegs:$src2)>;
-def HexagonVSPLATB: SDNode<"HexagonISD::VSPLATB", SDTUnaryOp>;
-def HexagonVSPLATH: SDNode<"HexagonISD::VSPLATH", SDTUnaryOp>;
+def SDTHexagonVSPLAT: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
+def HexagonVSPLAT: SDNode<"HexagonISD::VSPLAT", SDTHexagonVSPLAT>;
// Replicate the low 8-bits from 32-bits input register into each of the
// four bytes of 32-bits destination register.
-def: Pat<(v4i8 (HexagonVSPLATB I32:$Rs)), (S2_vsplatrb I32:$Rs)>;
+def: Pat<(v4i8 (HexagonVSPLAT I32:$Rs)), (S2_vsplatrb I32:$Rs)>;
// Replicate the low 16-bits from 32-bits input register into each of the
// four halfwords of 64-bits destination register.
-def: Pat<(v4i16 (HexagonVSPLATH I32:$Rs)), (S2_vsplatrh I32:$Rs)>;
+def: Pat<(v4i16 (HexagonVSPLAT I32:$Rs)), (S2_vsplatrh I32:$Rs)>;
class VArith_pat <InstHexagon MI, SDNode Op, PatFrag Type>
@@ -3068,84 +3109,44 @@ def: Pat<(v2i32 (shl V2I32:$b, (i64 (HexagonCOMBINE (i32 u5_0ImmPred:$c),
(i32 u5_0ImmPred:$c))))),
(S2_asl_i_vw V2I32:$b, imm:$c)>;
-def: Pat<(v4i16 (sra V4I16:$b, (v4i16 (HexagonVSPLATH (i32 (u4_0ImmPred:$c)))))),
+def: Pat<(v4i16 (sra V4I16:$b, (v4i16 (HexagonVSPLAT u4_0ImmPred:$c)))),
(S2_asr_i_vh V4I16:$b, imm:$c)>;
-def: Pat<(v4i16 (srl V4I16:$b, (v4i16 (HexagonVSPLATH (i32 (u4_0ImmPred:$c)))))),
+def: Pat<(v4i16 (srl V4I16:$b, (v4i16 (HexagonVSPLAT u4_0ImmPred:$c)))),
(S2_lsr_i_vh V4I16:$b, imm:$c)>;
-def: Pat<(v4i16 (shl V4I16:$b, (v4i16 (HexagonVSPLATH (i32 (u4_0ImmPred:$c)))))),
+def: Pat<(v4i16 (shl V4I16:$b, (v4i16 (HexagonVSPLAT u4_0ImmPred:$c)))),
(S2_asl_i_vh V4I16:$b, imm:$c)>;
-def SDTHexagon_v2i32_v2i32_i32 : SDTypeProfile<1, 2,
- [SDTCisSameAs<0, 1>, SDTCisVT<0, v2i32>, SDTCisInt<2>]>;
-def SDTHexagon_v4i16_v4i16_i32 : SDTypeProfile<1, 2,
- [SDTCisSameAs<0, 1>, SDTCisVT<0, v4i16>, SDTCisInt<2>]>;
+def SDTHexagonVShift
+ : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>, SDTCisVec<0>, SDTCisVT<2, i32>]>;
-def HexagonVSRAW: SDNode<"HexagonISD::VSRAW", SDTHexagon_v2i32_v2i32_i32>;
-def HexagonVSRAH: SDNode<"HexagonISD::VSRAH", SDTHexagon_v4i16_v4i16_i32>;
-def HexagonVSRLW: SDNode<"HexagonISD::VSRLW", SDTHexagon_v2i32_v2i32_i32>;
-def HexagonVSRLH: SDNode<"HexagonISD::VSRLH", SDTHexagon_v4i16_v4i16_i32>;
-def HexagonVSHLW: SDNode<"HexagonISD::VSHLW", SDTHexagon_v2i32_v2i32_i32>;
-def HexagonVSHLH: SDNode<"HexagonISD::VSHLH", SDTHexagon_v4i16_v4i16_i32>;
+def HexagonVASL: SDNode<"HexagonISD::VASL", SDTHexagonVShift>;
+def HexagonVASR: SDNode<"HexagonISD::VASR", SDTHexagonVShift>;
+def HexagonVLSR: SDNode<"HexagonISD::VLSR", SDTHexagonVShift>;
-def: Pat<(v2i32 (HexagonVSRAW V2I32:$Rs, u5_0ImmPred:$u5)),
+def: Pat<(v2i32 (HexagonVASL V2I32:$Rs, u5_0ImmPred:$u5)),
+ (S2_asl_i_vw V2I32:$Rs, imm:$u5)>;
+def: Pat<(v4i16 (HexagonVASL V4I16:$Rs, u4_0ImmPred:$u4)),
+ (S2_asl_i_vh V4I16:$Rs, imm:$u4)>;
+def: Pat<(v2i32 (HexagonVASR V2I32:$Rs, u5_0ImmPred:$u5)),
(S2_asr_i_vw V2I32:$Rs, imm:$u5)>;
-def: Pat<(v4i16 (HexagonVSRAH V4I16:$Rs, u4_0ImmPred:$u4)),
+def: Pat<(v4i16 (HexagonVASR V4I16:$Rs, u4_0ImmPred:$u4)),
(S2_asr_i_vh V4I16:$Rs, imm:$u4)>;
-def: Pat<(v2i32 (HexagonVSRLW V2I32:$Rs, u5_0ImmPred:$u5)),
+def: Pat<(v2i32 (HexagonVLSR V2I32:$Rs, u5_0ImmPred:$u5)),
(S2_lsr_i_vw V2I32:$Rs, imm:$u5)>;
-def: Pat<(v4i16 (HexagonVSRLH V4I16:$Rs, u4_0ImmPred:$u4)),
+def: Pat<(v4i16 (HexagonVLSR V4I16:$Rs, u4_0ImmPred:$u4)),
(S2_lsr_i_vh V4I16:$Rs, imm:$u4)>;
-def: Pat<(v2i32 (HexagonVSHLW V2I32:$Rs, u5_0ImmPred:$u5)),
- (S2_asl_i_vw V2I32:$Rs, imm:$u5)>;
-def: Pat<(v4i16 (HexagonVSHLH V4I16:$Rs, u4_0ImmPred:$u4)),
- (S2_asl_i_vh V4I16:$Rs, imm:$u4)>;
class vshift_rr_pat<InstHexagon MI, SDNode Op, PatFrag Value>
: Pat <(Op Value:$Rs, I32:$Rt),
(MI Value:$Rs, I32:$Rt)>;
-def: vshift_rr_pat <S2_asr_r_vw, HexagonVSRAW, V2I32>;
-def: vshift_rr_pat <S2_asr_r_vh, HexagonVSRAH, V4I16>;
-def: vshift_rr_pat <S2_lsr_r_vw, HexagonVSRLW, V2I32>;
-def: vshift_rr_pat <S2_lsr_r_vh, HexagonVSRLH, V4I16>;
-def: vshift_rr_pat <S2_asl_r_vw, HexagonVSHLW, V2I32>;
-def: vshift_rr_pat <S2_asl_r_vh, HexagonVSHLH, V4I16>;
-
-
-def SDTHexagonVecCompare_v8i8 : SDTypeProfile<1, 2,
- [SDTCisSameAs<1, 2>, SDTCisVT<0, i1>, SDTCisVT<1, v8i8>]>;
-def SDTHexagonVecCompare_v4i16 : SDTypeProfile<1, 2,
- [SDTCisSameAs<1, 2>, SDTCisVT<0, i1>, SDTCisVT<1, v4i16>]>;
-def SDTHexagonVecCompare_v2i32 : SDTypeProfile<1, 2,
- [SDTCisSameAs<1, 2>, SDTCisVT<0, i1>, SDTCisVT<1, v2i32>]>;
-
-def HexagonVCMPBEQ: SDNode<"HexagonISD::VCMPBEQ", SDTHexagonVecCompare_v8i8>;
-def HexagonVCMPBGT: SDNode<"HexagonISD::VCMPBGT", SDTHexagonVecCompare_v8i8>;
-def HexagonVCMPBGTU: SDNode<"HexagonISD::VCMPBGTU", SDTHexagonVecCompare_v8i8>;
-def HexagonVCMPHEQ: SDNode<"HexagonISD::VCMPHEQ", SDTHexagonVecCompare_v4i16>;
-def HexagonVCMPHGT: SDNode<"HexagonISD::VCMPHGT", SDTHexagonVecCompare_v4i16>;
-def HexagonVCMPHGTU: SDNode<"HexagonISD::VCMPHGTU", SDTHexagonVecCompare_v4i16>;
-def HexagonVCMPWEQ: SDNode<"HexagonISD::VCMPWEQ", SDTHexagonVecCompare_v2i32>;
-def HexagonVCMPWGT: SDNode<"HexagonISD::VCMPWGT", SDTHexagonVecCompare_v2i32>;
-def HexagonVCMPWGTU: SDNode<"HexagonISD::VCMPWGTU", SDTHexagonVecCompare_v2i32>;
-
-
-class vcmp_i1_pat<InstHexagon MI, SDNode Op, PatFrag Value>
- : Pat <(i1 (Op Value:$Rs, Value:$Rt)),
- (MI Value:$Rs, Value:$Rt)>;
-
-def: vcmp_i1_pat<A2_vcmpbeq, HexagonVCMPBEQ, V8I8>;
-def: vcmp_i1_pat<A4_vcmpbgt, HexagonVCMPBGT, V8I8>;
-def: vcmp_i1_pat<A2_vcmpbgtu, HexagonVCMPBGTU, V8I8>;
-
-def: vcmp_i1_pat<A2_vcmpheq, HexagonVCMPHEQ, V4I16>;
-def: vcmp_i1_pat<A2_vcmphgt, HexagonVCMPHGT, V4I16>;
-def: vcmp_i1_pat<A2_vcmphgtu, HexagonVCMPHGTU, V4I16>;
-
-def: vcmp_i1_pat<A2_vcmpweq, HexagonVCMPWEQ, V2I32>;
-def: vcmp_i1_pat<A2_vcmpwgt, HexagonVCMPWGT, V2I32>;
-def: vcmp_i1_pat<A2_vcmpwgtu, HexagonVCMPWGTU, V2I32>;
+def: vshift_rr_pat <S2_asl_r_vw, HexagonVASL, V2I32>;
+def: vshift_rr_pat <S2_asl_r_vh, HexagonVASL, V4I16>;
+def: vshift_rr_pat <S2_asr_r_vw, HexagonVASR, V2I32>;
+def: vshift_rr_pat <S2_asr_r_vh, HexagonVASR, V4I16>;
+def: vshift_rr_pat <S2_lsr_r_vw, HexagonVLSR, V2I32>;
+def: vshift_rr_pat <S2_lsr_r_vh, HexagonVLSR, V4I16>;
class vcmp_vi1_pat<InstHexagon MI, PatFrag Op, PatFrag InVal, ValueType OutTy>
@@ -3255,13 +3256,6 @@ def: Pat<(v4i8 (trunc V4I16:$Rs)),
def: Pat<(v2i16 (trunc V2I32:$Rs)),
(LoReg (S2_packhl (HiReg $Rs), (LoReg $Rs)))>;
-
-def HexagonVSXTBH : SDNode<"HexagonISD::VSXTBH", SDTUnaryOp>;
-def HexagonVSXTBW : SDNode<"HexagonISD::VSXTBW", SDTUnaryOp>;
-
-def: Pat<(i64 (HexagonVSXTBH I32:$Rs)), (S2_vsxtbh I32:$Rs)>;
-def: Pat<(i64 (HexagonVSXTBW I32:$Rs)), (S2_vsxthw I32:$Rs)>;
-
def: Pat<(v4i16 (zext V4I8:$Rs)), (S2_vzxtbh V4I8:$Rs)>;
def: Pat<(v2i32 (zext V2I16:$Rs)), (S2_vzxthw V2I16:$Rs)>;
def: Pat<(v4i16 (anyext V4I8:$Rs)), (S2_vzxtbh V4I8:$Rs)>;
@@ -3322,31 +3316,6 @@ def: Pat<(v8i8 (mul V8I8:$Rs, V8I8:$Rt)),
(A2_combinew (S2_vtrunehb (VMPYB_no_V5 (HiReg $Rs), (HiReg $Rt))),
(S2_vtrunehb (VMPYB_no_V5 (LoReg $Rs), (LoReg $Rt))))>;
-def SDTHexagonBinOp64 : SDTypeProfile<1, 2,
- [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<0, i64>]>;
-
-def HexagonSHUFFEB: SDNode<"HexagonISD::SHUFFEB", SDTHexagonBinOp64>;
-def HexagonSHUFFEH: SDNode<"HexagonISD::SHUFFEH", SDTHexagonBinOp64>;
-def HexagonSHUFFOB: SDNode<"HexagonISD::SHUFFOB", SDTHexagonBinOp64>;
-def HexagonSHUFFOH: SDNode<"HexagonISD::SHUFFOH", SDTHexagonBinOp64>;
-
-class ShufflePat<InstHexagon MI, SDNode Op>
- : Pat<(i64 (Op DoubleRegs:$src1, DoubleRegs:$src2)),
- (i64 (MI DoubleRegs:$src1, DoubleRegs:$src2))>;
-
-// Shuffles even bytes for i=0..3: A[2*i].b = C[2*i].b; A[2*i+1].b = B[2*i].b
-def: ShufflePat<S2_shuffeb, HexagonSHUFFEB>;
-
-// Shuffles odd bytes for i=0..3: A[2*i].b = C[2*i+1].b; A[2*i+1].b = B[2*i+1].b
-def: ShufflePat<S2_shuffob, HexagonSHUFFOB>;
-
-// Shuffles even half for i=0,1: A[2*i].h = C[2*i].h; A[2*i+1].h = B[2*i].h
-def: ShufflePat<S2_shuffeh, HexagonSHUFFEH>;
-
-// Shuffles odd half for i=0,1: A[2*i].h = C[2*i+1].h; A[2*i+1].h = B[2*i+1].h
-def: ShufflePat<S2_shuffoh, HexagonSHUFFOH>;
-
-
// Truncated store from v4i16 to v4i8.
def truncstorev4i8: PatFrag<(ops node:$val, node:$ptr),
(truncstore node:$val, node:$ptr),
diff --git a/lib/Target/Hexagon/HexagonPseudo.td b/lib/Target/Hexagon/HexagonPseudo.td
index 93fb688fc1c0..b42c1ab975a8 100644
--- a/lib/Target/Hexagon/HexagonPseudo.td
+++ b/lib/Target/Hexagon/HexagonPseudo.td
@@ -407,6 +407,11 @@ def PS_vstorerw_ai: STrivv_template<VecDblRegs, V6_vS32b_ai>,
def PS_vstorerw_ai_128B: STrivv_template<VecDblRegs128B, V6_vS32b_ai_128B>,
Requires<[HasV60T,UseHVXDbl]>;
+def PS_vstorerw_nt_ai: STrivv_template<VecDblRegs, V6_vS32b_nt_ai>,
+ Requires<[HasV60T,UseHVXSgl]>;
+def PS_vstorerw_nt_ai_128B: STrivv_template<VecDblRegs128B, V6_vS32b_nt_ai_128B>,
+ Requires<[HasV60T,UseHVXDbl]>;
+
def PS_vstorerwu_ai: STrivv_template<VecDblRegs, V6_vS32Ub_ai>,
Requires<[HasV60T,UseHVXSgl]>;
def PS_vstorerwu_ai_128B: STrivv_template<VecDblRegs128B, V6_vS32Ub_ai_128B>,
@@ -433,6 +438,11 @@ def PS_vloadrw_ai: LDrivv_template<VecDblRegs, V6_vL32b_ai>,
def PS_vloadrw_ai_128B: LDrivv_template<VecDblRegs128B, V6_vL32b_ai_128B>,
Requires<[HasV60T,UseHVXDbl]>;
+def PS_vloadrw_nt_ai: LDrivv_template<VecDblRegs, V6_vL32b_nt_ai>,
+ Requires<[HasV60T,UseHVXSgl]>;
+def PS_vloadrw_nt_ai_128B: LDrivv_template<VecDblRegs128B, V6_vL32b_nt_ai_128B>,
+ Requires<[HasV60T,UseHVXDbl]>;
+
def PS_vloadrwu_ai: LDrivv_template<VecDblRegs, V6_vL32Ub_ai>,
Requires<[HasV60T,UseHVXSgl]>;
def PS_vloadrwu_ai_128B: LDrivv_template<VecDblRegs128B, V6_vL32Ub_ai_128B>,
diff --git a/lib/Target/Hexagon/HexagonSplitDouble.cpp b/lib/Target/Hexagon/HexagonSplitDouble.cpp
index db268b78cd73..4fa929a20810 100644
--- a/lib/Target/Hexagon/HexagonSplitDouble.cpp
+++ b/lib/Target/Hexagon/HexagonSplitDouble.cpp
@@ -350,6 +350,8 @@ int32_t HexagonSplitDoubleRegs::profit(const MachineInstr *MI) const {
MI->getOperand(2).getImm());
case Hexagon::A4_combineri:
ImmX++;
+ // Fall through into A4_combineir.
+ LLVM_FALLTHROUGH;
case Hexagon::A4_combineir: {
ImmX++;
int64_t V = MI->getOperand(ImmX).getImm();
diff --git a/lib/Target/Hexagon/HexagonTargetMachine.cpp b/lib/Target/Hexagon/HexagonTargetMachine.cpp
index 76d9b31b005f..7d88b51f32dd 100644
--- a/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -110,10 +110,11 @@ SchedCustomRegistry("hexagon", "Run Hexagon's custom scheduler",
namespace llvm {
extern char &HexagonExpandCondsetsID;
void initializeHexagonExpandCondsetsPass(PassRegistry&);
- void initializeHexagonLoopIdiomRecognizePass(PassRegistry&);
void initializeHexagonGenMuxPass(PassRegistry&);
- void initializeHexagonOptAddrModePass(PassRegistry&);
+ void initializeHexagonLoopIdiomRecognizePass(PassRegistry&);
void initializeHexagonNewValueJumpPass(PassRegistry&);
+ void initializeHexagonOptAddrModePass(PassRegistry&);
+ void initializeHexagonPacketizerPass(PassRegistry&);
Pass *createHexagonLoopIdiomPass();
FunctionPass *createHexagonBitSimplify();
@@ -156,10 +157,11 @@ extern "C" void LLVMInitializeHexagonTarget() {
RegisterTargetMachine<HexagonTargetMachine> X(getTheHexagonTarget());
PassRegistry &PR = *PassRegistry::getPassRegistry();
- initializeHexagonLoopIdiomRecognizePass(PR);
initializeHexagonGenMuxPass(PR);
- initializeHexagonOptAddrModePass(PR);
+ initializeHexagonLoopIdiomRecognizePass(PR);
initializeHexagonNewValueJumpPass(PR);
+ initializeHexagonOptAddrModePass(PR);
+ initializeHexagonPacketizerPass(PR);
}
HexagonTargetMachine::HexagonTargetMachine(const Target &T, const Triple &TT,
diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index 7667bfb7a0eb..a3021e3dfe43 100644
--- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -60,9 +60,7 @@ namespace {
class HexagonPacketizer : public MachineFunctionPass {
public:
static char ID;
- HexagonPacketizer() : MachineFunctionPass(ID) {
- initializeHexagonPacketizerPass(*PassRegistry::getPassRegistry());
- }
+ HexagonPacketizer() : MachineFunctionPass(ID) {}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
@@ -89,14 +87,14 @@ namespace {
char HexagonPacketizer::ID = 0;
}
-INITIALIZE_PASS_BEGIN(HexagonPacketizer, "packets", "Hexagon Packetizer",
- false, false)
+INITIALIZE_PASS_BEGIN(HexagonPacketizer, "hexagon-packetizer",
+ "Hexagon Packetizer", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
-INITIALIZE_PASS_END(HexagonPacketizer, "packets", "Hexagon Packetizer",
- false, false)
+INITIALIZE_PASS_END(HexagonPacketizer, "hexagon-packetizer",
+ "Hexagon Packetizer", false, false)
HexagonPacketizerList::HexagonPacketizerList(MachineFunction &MF,
MachineLoopInfo &MLI, AliasAnalysis *AA,
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
index 34d0b55aa22a..2a0edda8dcee 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
@@ -412,7 +412,7 @@ public:
/// fixup kind as appropriate.
void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target, MutableArrayRef<char> Data,
- uint64_t FixupValue, bool IsPCRel) const override {
+ uint64_t FixupValue, bool IsResolved) const override {
// When FixupValue is 0 the relocation is external and there
// is nothing for us to do.
@@ -442,6 +442,7 @@ public:
case fixup_Hexagon_B7_PCREL:
if (!(isIntN(7, sValue)))
HandleFixupError(7, 2, (int64_t)FixupValue, "B7_PCREL");
+ LLVM_FALLTHROUGH;
case fixup_Hexagon_B7_PCREL_X:
InstMask = 0x00001f18; // Word32_B7
Reloc = (((Value >> 2) & 0x1f) << 8) | // Value 6-2 = Target 12-8
@@ -451,6 +452,7 @@ public:
case fixup_Hexagon_B9_PCREL:
if (!(isIntN(9, sValue)))
HandleFixupError(9, 2, (int64_t)FixupValue, "B9_PCREL");
+ LLVM_FALLTHROUGH;
case fixup_Hexagon_B9_PCREL_X:
InstMask = 0x003000fe; // Word32_B9
Reloc = (((Value >> 7) & 0x3) << 20) | // Value 8-7 = Target 21-20
@@ -462,6 +464,7 @@ public:
case fixup_Hexagon_B13_PCREL:
if (!(isIntN(13, sValue)))
HandleFixupError(13, 2, (int64_t)FixupValue, "B13_PCREL");
+ LLVM_FALLTHROUGH;
case fixup_Hexagon_B13_PCREL_X:
InstMask = 0x00202ffe; // Word32_B13
Reloc = (((Value >> 12) & 0x1) << 21) | // Value 12 = Target 21
@@ -472,6 +475,7 @@ public:
case fixup_Hexagon_B15_PCREL:
if (!(isIntN(15, sValue)))
HandleFixupError(15, 2, (int64_t)FixupValue, "B15_PCREL");
+ LLVM_FALLTHROUGH;
case fixup_Hexagon_B15_PCREL_X:
InstMask = 0x00df20fe; // Word32_B15
Reloc = (((Value >> 13) & 0x3) << 22) | // Value 14-13 = Target 23-22
@@ -483,6 +487,7 @@ public:
case fixup_Hexagon_B22_PCREL:
if (!(isIntN(22, sValue)))
HandleFixupError(22, 2, (int64_t)FixupValue, "B22_PCREL");
+ LLVM_FALLTHROUGH;
case fixup_Hexagon_B22_PCREL_X:
InstMask = 0x01ff3ffe; // Word32_B22
Reloc = (((Value >> 13) & 0x1ff) << 16) | // Value 21-13 = Target 24-16
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
index d8009c5da08e..7f90e83fc8e9 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
@@ -169,8 +169,11 @@ namespace HexagonII {
// Hexagon specific MO operand flag mask.
enum HexagonMOTargetFlagVal {
- //===------------------------------------------------------------------===//
- // Hexagon Specific MachineOperand flags.
+ // Hexagon-specific MachineOperand target flags.
+ //
+ // When chaning these, make sure to update
+ // getSerializableDirectMachineOperandTargetFlags and
+ // getSerializableBitmaskMachineOperandTargetFlags if needed.
MO_NO_FLAG,
/// MO_PCREL - On a symbol operand, indicates a PC-relative relocation
@@ -207,10 +210,12 @@ namespace HexagonII {
MO_TPREL,
// HMOTF_ConstExtended
- // Addendum to abovem, indicates a const extended op
+ // Addendum to above, indicates a const extended op
// Can be used as a mask.
- HMOTF_ConstExtended = 0x80
+ HMOTF_ConstExtended = 0x80,
+ // Union of all bitmasks (currently only HMOTF_ConstExtended).
+ MO_Bitmasks = HMOTF_ConstExtended
};
// Hexagon Sub-instruction classes.
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
index 564d43b45cb8..1604e7c8dc54 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
@@ -259,6 +259,7 @@ bool HexagonShuffler::check() {
break;
case HexagonII::TypeCVI_VM_VP_LDU:
++onlyNo1;
+ LLVM_FALLTHROUGH;
case HexagonII::TypeCVI_VM_LD:
case HexagonII::TypeCVI_VM_TMP_LD:
case HexagonII::TypeLD:
@@ -274,6 +275,7 @@ bool HexagonShuffler::check() {
break;
case HexagonII::TypeCVI_VM_STU:
++onlyNo1;
+ LLVM_FALLTHROUGH;
case HexagonII::TypeCVI_VM_ST:
case HexagonII::TypeCVI_VM_NEW_ST:
case HexagonII::TypeST: