diff options
Diffstat (limited to 'llvm/lib/Target/RISCV/RISCVInstrInfoM.td')
-rw-r--r-- | llvm/lib/Target/RISCV/RISCVInstrInfoM.td | 32 |
1 files changed, 24 insertions, 8 deletions
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoM.td b/llvm/lib/Target/RISCV/RISCVInstrInfoM.td index 8cfb903a173c..f654ed1949a4 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoM.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoM.td @@ -15,9 +15,10 @@ // RISC-V specific DAG Nodes. //===----------------------------------------------------------------------===// -def riscv_divw : SDNode<"RISCVISD::DIVW", SDTIntBinOp>; -def riscv_divuw : SDNode<"RISCVISD::DIVUW", SDTIntBinOp>; -def riscv_remuw : SDNode<"RISCVISD::REMUW", SDTIntBinOp>; +def riscv_mulhsu : SDNode<"RISCVISD::MULHSU", SDTIntBinOp>; +def riscv_divw : SDNode<"RISCVISD::DIVW", SDT_RISCVIntBinOpW>; +def riscv_divuw : SDNode<"RISCVISD::DIVUW", SDT_RISCVIntBinOpW>; +def riscv_remuw : SDNode<"RISCVISD::REMUW", SDT_RISCVIntBinOpW>; //===----------------------------------------------------------------------===// // Instructions @@ -63,7 +64,7 @@ let Predicates = [HasStdExtM] in { def : PatGprGpr<mul, MUL>; def : PatGprGpr<mulhs, MULH>; def : PatGprGpr<mulhu, MULHU>; -// No ISDOpcode for mulhsu +def : PatGprGpr<riscv_mulhsu, MULHSU>; def : PatGprGpr<sdiv, DIV>; def : PatGprGpr<udiv, DIVU>; def : PatGprGpr<srem, REM>; @@ -91,9 +92,24 @@ def : Pat<(and (riscv_remuw (assertzexti32 GPR:$rs1), // Although the sexti32 operands may not have originated from an i32 srem, // this pattern is safe as it is impossible for two sign extended inputs to // produce a result where res[63:32]=0 and res[31]=1. -def : Pat<(srem (sexti32 GPR:$rs1), (sexti32 GPR:$rs2)), - (REMW GPR:$rs1, GPR:$rs2)>; -def : Pat<(sext_inreg (srem (sexti32 GPR:$rs1), - (sexti32 GPR:$rs2)), i32), +def : Pat<(srem (sexti32 (i64 GPR:$rs1)), (sexti32 (i64 GPR:$rs2))), (REMW GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasStdExtM, IsRV64] + +let Predicates = [HasStdExtM, IsRV64, NotHasStdExtZba] in { +// Special case for calculating the full 64-bit product of a 32x32 unsigned +// multiply where the inputs aren't known to be zero extended. We can shift the +// inputs left by 32 and use a MULHU. This saves two SRLIs needed to finish +// zeroing the upper 32 bits. +// TODO: If one of the operands is zero extended and the other isn't, we might +// still be better off shifting both left by 32. +def : Pat<(i64 (mul (and GPR:$rs1, 0xffffffff), (and GPR:$rs2, 0xffffffff))), + (MULHU (SLLI GPR:$rs1, 32), (SLLI GPR:$rs2, 32))>; +// Prevent matching the first part of this pattern to mulw. The mul here has +// additionals users or the ANDs would have been removed. The above pattern +// will be used for the other users. If we form a mulw we'll keep the ANDs alive +// and they'll still become SLLI+SRLI. +def : Pat<(sext_inreg (mul (and GPR:$rs1, 0xffffffff), + (and GPR:$rs2, 0xffffffff)), i32), + (ADDIW (MULHU (SLLI GPR:$rs1, 32), (SLLI GPR:$rs2, 32)), 0)>; +} // Predicates = [HasStdExtM, IsRV64, NotHasStdExtZba] |