aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/PowerPC/PPCInstrAltivec.td')
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrAltivec.td79
1 files changed, 41 insertions, 38 deletions
diff --git a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
index 1a34aa09315b..2bc7fb2a1a5f 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
@@ -30,11 +30,6 @@
// Altivec transformation functions and pattern fragments.
//
-// Since we canonicalize buildvectors to v16i8, all vnots "-1" operands will be
-// of that type.
-def vnot_ppc : PatFrag<(ops node:$in),
- (xor node:$in, (bitconvert (v16i8 immAllOnesV)))>;
-
def vpkuhum_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return PPC::isVPKUHUMShuffleMask(cast<ShuffleVectorSDNode>(N), 0, *CurDAG);
@@ -352,7 +347,7 @@ def DSS : DSS_Form<0, 822, (outs), (ins u5imm:$STRM),
}
def DSSALL : DSS_Form<1, 822, (outs), (ins),
- "dssall", IIC_LdStLoad /*FIXME*/, [(int_ppc_altivec_dssall)]>,
+ "dssall", IIC_LdStLoad /*FIXME*/, []>,
Deprecated<DeprecatedDST> {
let STRM = 0;
let A = 0;
@@ -416,46 +411,46 @@ let hasSideEffects = 1 in {
let PPC970_Unit = 2, mayLoad = 1, mayStore = 0 in { // Loads.
def LVEBX: XForm_1_memOp<31, 7, (outs vrrc:$vD), (ins memrr:$src),
"lvebx $vD, $src", IIC_LdStLoad,
- [(set v16i8:$vD, (int_ppc_altivec_lvebx xoaddr:$src))]>;
+ [(set v16i8:$vD, (int_ppc_altivec_lvebx ForceXForm:$src))]>;
def LVEHX: XForm_1_memOp<31, 39, (outs vrrc:$vD), (ins memrr:$src),
"lvehx $vD, $src", IIC_LdStLoad,
- [(set v8i16:$vD, (int_ppc_altivec_lvehx xoaddr:$src))]>;
+ [(set v8i16:$vD, (int_ppc_altivec_lvehx ForceXForm:$src))]>;
def LVEWX: XForm_1_memOp<31, 71, (outs vrrc:$vD), (ins memrr:$src),
"lvewx $vD, $src", IIC_LdStLoad,
- [(set v4i32:$vD, (int_ppc_altivec_lvewx xoaddr:$src))]>;
+ [(set v4i32:$vD, (int_ppc_altivec_lvewx ForceXForm:$src))]>;
def LVX : XForm_1_memOp<31, 103, (outs vrrc:$vD), (ins memrr:$src),
"lvx $vD, $src", IIC_LdStLoad,
- [(set v4i32:$vD, (int_ppc_altivec_lvx xoaddr:$src))]>;
+ [(set v4i32:$vD, (int_ppc_altivec_lvx ForceXForm:$src))]>;
def LVXL : XForm_1_memOp<31, 359, (outs vrrc:$vD), (ins memrr:$src),
"lvxl $vD, $src", IIC_LdStLoad,
- [(set v4i32:$vD, (int_ppc_altivec_lvxl xoaddr:$src))]>;
+ [(set v4i32:$vD, (int_ppc_altivec_lvxl ForceXForm:$src))]>;
}
def LVSL : XForm_1_memOp<31, 6, (outs vrrc:$vD), (ins memrr:$src),
"lvsl $vD, $src", IIC_LdStLoad,
- [(set v16i8:$vD, (int_ppc_altivec_lvsl xoaddr:$src))]>,
+ [(set v16i8:$vD, (int_ppc_altivec_lvsl ForceXForm:$src))]>,
PPC970_Unit_LSU;
def LVSR : XForm_1_memOp<31, 38, (outs vrrc:$vD), (ins memrr:$src),
"lvsr $vD, $src", IIC_LdStLoad,
- [(set v16i8:$vD, (int_ppc_altivec_lvsr xoaddr:$src))]>,
+ [(set v16i8:$vD, (int_ppc_altivec_lvsr ForceXForm:$src))]>,
PPC970_Unit_LSU;
let PPC970_Unit = 2, mayStore = 1, mayLoad = 0 in { // Stores.
def STVEBX: XForm_8_memOp<31, 135, (outs), (ins vrrc:$rS, memrr:$dst),
"stvebx $rS, $dst", IIC_LdStStore,
- [(int_ppc_altivec_stvebx v16i8:$rS, xoaddr:$dst)]>;
+ [(int_ppc_altivec_stvebx v16i8:$rS, ForceXForm:$dst)]>;
def STVEHX: XForm_8_memOp<31, 167, (outs), (ins vrrc:$rS, memrr:$dst),
"stvehx $rS, $dst", IIC_LdStStore,
- [(int_ppc_altivec_stvehx v8i16:$rS, xoaddr:$dst)]>;
+ [(int_ppc_altivec_stvehx v8i16:$rS, ForceXForm:$dst)]>;
def STVEWX: XForm_8_memOp<31, 199, (outs), (ins vrrc:$rS, memrr:$dst),
"stvewx $rS, $dst", IIC_LdStStore,
- [(int_ppc_altivec_stvewx v4i32:$rS, xoaddr:$dst)]>;
+ [(int_ppc_altivec_stvewx v4i32:$rS, ForceXForm:$dst)]>;
def STVX : XForm_8_memOp<31, 231, (outs), (ins vrrc:$rS, memrr:$dst),
"stvx $rS, $dst", IIC_LdStStore,
- [(int_ppc_altivec_stvx v4i32:$rS, xoaddr:$dst)]>;
+ [(int_ppc_altivec_stvx v4i32:$rS, ForceXForm:$dst)]>;
def STVXL : XForm_8_memOp<31, 487, (outs), (ins vrrc:$rS, memrr:$dst),
"stvxl $rS, $dst", IIC_LdStStore,
- [(int_ppc_altivec_stvxl v4i32:$rS, xoaddr:$dst)]>;
+ [(int_ppc_altivec_stvxl v4i32:$rS, ForceXForm:$dst)]>;
}
let PPC970_Unit = 5 in { // VALU Operations.
@@ -521,7 +516,7 @@ def VAND : VXForm_1<1028, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
def VANDC : VXForm_1<1092, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
"vandc $vD, $vA, $vB", IIC_VecFP,
[(set v4i32:$vD, (and v4i32:$vA,
- (vnot_ppc v4i32:$vB)))]>;
+ (vnot v4i32:$vB)))]>;
def VCFSX : VXForm_1<842, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
"vcfsx $vD, $vB, $UIMM", IIC_VecFP,
@@ -684,8 +679,8 @@ let hasSideEffects = 1 in {
def VNOR : VXForm_1<1284, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
"vnor $vD, $vA, $vB", IIC_VecFP,
- [(set v4i32:$vD, (vnot_ppc (or v4i32:$vA,
- v4i32:$vB)))]>;
+ [(set v4i32:$vD, (vnot (or v4i32:$vA,
+ v4i32:$vB)))]>;
let isCommutable = 1 in {
def VOR : VXForm_1<1156, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
"vor $vD, $vA, $vB", IIC_VecFP,
@@ -870,6 +865,13 @@ def V_SETALLONES : VXForm_3<908, (outs vrrc:$vD), (ins),
def : InstAlias<"vmr $vD, $vA", (VOR vrrc:$vD, vrrc:$vA, vrrc:$vA)>;
def : InstAlias<"vnot $vD, $vA", (VNOR vrrc:$vD, vrrc:$vA, vrrc:$vA)>;
+// This is a nop on all supported architectures and the AIX assembler
+// doesn't support it (and will not be updated to support it).
+let Predicates = [IsAIX] in
+def : Pat<(int_ppc_altivec_dssall), (NOP)>;
+let Predicates = [NotAIX] in
+def : Pat<(int_ppc_altivec_dssall), (DSSALL)>;
+
// Rotates.
def : Pat<(v16i8 (rotl v16i8:$vA, v16i8:$vB)),
(v16i8 (VRLB v16i8:$vA, v16i8:$vB))>;
@@ -899,11 +901,11 @@ def : Pat<(v4i32 (ssubsat v4i32:$vA, v4i32:$vB)), (v4i32 (VSUBSWS $vA, $vB))>;
def : Pat<(v4i32 (usubsat v4i32:$vA, v4i32:$vB)), (v4i32 (VSUBUWS $vA, $vB))>;
// Loads.
-def : Pat<(v4i32 (load xoaddr:$src)), (LVX xoaddr:$src)>;
+def : Pat<(v4i32 (load ForceXForm:$src)), (LVX ForceXForm:$src)>;
// Stores.
-def : Pat<(store v4i32:$rS, xoaddr:$dst),
- (STVX $rS, xoaddr:$dst)>;
+def : Pat<(store v4i32:$rS, ForceXForm:$dst),
+ (STVX $rS, ForceXForm:$dst)>;
// Bit conversions.
def : Pat<(v16i8 (bitconvert (v8i16 VRRC:$src))), (v16i8 VRRC:$src)>;
@@ -1034,11 +1036,11 @@ def:Pat<(vmrghw_swapped_shuffle v16i8:$vA, v16i8:$vB),
(VMRGHW $vB, $vA)>;
// Logical Operations
-def : Pat<(vnot_ppc v4i32:$vA), (VNOR $vA, $vA)>;
+def : Pat<(vnot v4i32:$vA), (VNOR $vA, $vA)>;
-def : Pat<(vnot_ppc (or v4i32:$A, v4i32:$B)),
+def : Pat<(vnot (or v4i32:$A, v4i32:$B)),
(VNOR $A, $B)>;
-def : Pat<(and v4i32:$A, (vnot_ppc v4i32:$B)),
+def : Pat<(and v4i32:$A, (vnot v4i32:$B)),
(VANDC $A, $B)>;
def : Pat<(fmul v4f32:$vA, v4f32:$vB),
@@ -1140,19 +1142,21 @@ def : Pat<(v4f32 (vselect v4i32:$vA, v4f32:$vB, v4f32:$vC)),
(VSEL $vC, $vB, $vA)>;
def : Pat<(v2f64 (vselect v2i64:$vA, v2f64:$vB, v2f64:$vC)),
(VSEL $vC, $vB, $vA)>;
+def : Pat<(v1i128 (vselect v1i128:$vA, v1i128:$vB, v1i128:$vC)),
+ (VSEL $vC, $vB, $vA)>;
// Vector Integer Average Instructions
-def : Pat<(v4i32 (sra (sub v4i32:$vA, (vnot_ppc v4i32:$vB)),
+def : Pat<(v4i32 (sra (sub v4i32:$vA, (vnot v4i32:$vB)),
(v4i32 (immEQOneV)))), (v4i32 (VAVGSW $vA, $vB))>;
-def : Pat<(v8i16 (sra (sub v8i16:$vA, (v8i16 (bitconvert(vnot_ppc v4i32:$vB)))),
+def : Pat<(v8i16 (sra (sub v8i16:$vA, (v8i16 (bitconvert(vnot v4i32:$vB)))),
(v8i16 (immEQOneV)))), (v8i16 (VAVGSH $vA, $vB))>;
-def : Pat<(v16i8 (sra (sub v16i8:$vA, (v16i8 (bitconvert(vnot_ppc v4i32:$vB)))),
+def : Pat<(v16i8 (sra (sub v16i8:$vA, (v16i8 (bitconvert(vnot v4i32:$vB)))),
(v16i8 (immEQOneV)))), (v16i8 (VAVGSB $vA, $vB))>;
-def : Pat<(v4i32 (srl (sub v4i32:$vA, (vnot_ppc v4i32:$vB)),
+def : Pat<(v4i32 (srl (sub v4i32:$vA, (vnot v4i32:$vB)),
(v4i32 (immEQOneV)))), (v4i32 (VAVGUW $vA, $vB))>;
-def : Pat<(v8i16 (srl (sub v8i16:$vA, (v8i16 (bitconvert(vnot_ppc v4i32:$vB)))),
+def : Pat<(v8i16 (srl (sub v8i16:$vA, (v8i16 (bitconvert(vnot v4i32:$vB)))),
(v8i16 (immEQOneV)))), (v8i16 (VAVGUH $vA, $vB))>;
-def : Pat<(v16i8 (srl (sub v16i8:$vA, (v16i8 (bitconvert(vnot_ppc v4i32:$vB)))),
+def : Pat<(v16i8 (srl (sub v16i8:$vA, (v16i8 (bitconvert(vnot v4i32:$vB)))),
(v16i8 (immEQOneV)))), (v16i8 (VAVGUB $vA, $vB))>;
} // end HasAltivec
@@ -1299,16 +1303,16 @@ let isCommutable = 1 in {
// if we find situations where Altivec is really preferred over VSX.
def VEQV : VXForm_1<1668, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
"veqv $vD, $vA, $vB", IIC_VecGeneral,
- [(set v4i32:$vD, (vnot_ppc (xor v4i32:$vA, v4i32:$vB)))]>;
+ [(set v4i32:$vD, (vnot (xor v4i32:$vA, v4i32:$vB)))]>;
def VNAND : VXForm_1<1412, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
"vnand $vD, $vA, $vB", IIC_VecGeneral,
- [(set v4i32:$vD, (vnot_ppc (and v4i32:$vA, v4i32:$vB)))]>;
+ [(set v4i32:$vD, (vnot (and v4i32:$vA, v4i32:$vB)))]>;
} // isCommutable
def VORC : VXForm_1<1348, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
"vorc $vD, $vA, $vB", IIC_VecGeneral,
[(set v4i32:$vD, (or v4i32:$vA,
- (vnot_ppc v4i32:$vB)))]>;
+ (vnot v4i32:$vB)))]>;
// i64 element comparisons.
def VCMPEQUD : VCMP <199, "vcmpequd $vD, $vA, $vB" , v2i64>;
@@ -1503,8 +1507,7 @@ def VNEGW : VX_VT5_EO5_VB5<1538, 6, "vnegw",
def VNEGD : VX_VT5_EO5_VB5<1538, 7, "vnegd",
[(set v2i64:$vD,
- (sub (v2i64 (bitconvert (v4i32 immAllZerosV))),
- v2i64:$vB))]>;
+ (sub (v2i64 immAllZerosV), v2i64:$vB))]>;
// Vector Parity Byte
def VPRTYBW : VX_VT5_EO5_VB5<1538, 8, "vprtybw", [(set v4i32:$vD,