diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2020-01-17 20:45:01 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2020-01-17 20:45:01 +0000 |
commit | 706b4fc47bbc608932d3b491ae19a3b9cde9497b (patch) | |
tree | 4adf86a776049cbf7f69a1929c4babcbbef925eb /llvm/lib/Target/X86/X86InstrAVX512.td | |
parent | 7cc9cf2bf09f069cb2dd947ead05d0b54301fb71 (diff) | |
download | src-706b4fc47bbc608932d3b491ae19a3b9cde9497b.tar.gz src-706b4fc47bbc608932d3b491ae19a3b9cde9497b.zip |
Vendor import of llvm-project master e26a78e70, the last commit beforevendor/llvm-project/llvmorg-10-init-17466-ge26a78e7085
the llvmorg-11-init tag, from which release/10.x was branched.
Notes
Notes:
svn path=/vendor/llvm-project/master/; revision=356843
svn path=/vendor/llvm-project/llvmorg-10-init-17466-ge26a78e7085/; revision=356844; tag=vendor/llvm-project/llvmorg-10-init-17466-ge26a78e7085
Diffstat (limited to 'llvm/lib/Target/X86/X86InstrAVX512.td')
-rw-r--r-- | llvm/lib/Target/X86/X86InstrAVX512.td | 705 |
1 files changed, 304 insertions, 401 deletions
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td index 9b5de59430a5..32f012033fb0 100644 --- a/llvm/lib/Target/X86/X86InstrAVX512.td +++ b/llvm/lib/Target/X86/X86InstrAVX512.td @@ -2078,7 +2078,7 @@ multiclass avx512_cmp_scalar<X86VectorVTInfo _, SDNode OpNode, SDNode OpNodeSAE, "$cc, $src2, $src1", "$src1, $src2, $cc", (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2), timm:$cc), (OpNode_su (_.VT _.RC:$src1), (_.VT _.RC:$src2), - timm:$cc)>, EVEX_4V, VEX_LIG, Sched<[sched]>; + timm:$cc)>, EVEX_4V, VEX_LIG, Sched<[sched]>, SIMD_EXC; let mayLoad = 1 in defm rm_Int : AVX512_maskable_cmp<0xC2, MRMSrcMem, _, (outs _.KRC:$dst), @@ -2089,8 +2089,9 @@ multiclass avx512_cmp_scalar<X86VectorVTInfo _, SDNode OpNode, SDNode OpNodeSAE, timm:$cc), (OpNode_su (_.VT _.RC:$src1), _.ScalarIntMemCPat:$src2, timm:$cc)>, EVEX_4V, VEX_LIG, EVEX_CD8<_.EltSize, CD8VT1>, - Sched<[sched.Folded, sched.ReadAfterFold]>; + Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC; + let Uses = [MXCSR] in defm rrb_Int : AVX512_maskable_cmp<0xC2, MRMSrcReg, _, (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2, u8imm:$cc), @@ -2111,7 +2112,7 @@ multiclass avx512_cmp_scalar<X86VectorVTInfo _, SDNode OpNode, SDNode OpNodeSAE, [(set _.KRC:$dst, (OpNode _.FRC:$src1, _.FRC:$src2, timm:$cc))]>, - EVEX_4V, VEX_LIG, Sched<[sched]>; + EVEX_4V, VEX_LIG, Sched<[sched]>, SIMD_EXC; def rm : AVX512Ii8<0xC2, MRMSrcMem, (outs _.KRC:$dst), (ins _.FRC:$src1, _.ScalarMemOp:$src2, u8imm:$cc), @@ -2121,7 +2122,7 @@ multiclass avx512_cmp_scalar<X86VectorVTInfo _, SDNode OpNode, SDNode OpNodeSAE, (_.ScalarLdFrag addr:$src2), timm:$cc))]>, EVEX_4V, VEX_LIG, EVEX_CD8<_.EltSize, CD8VT1>, - Sched<[sched.Folded, sched.ReadAfterFold]>; + Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC; } } @@ -2522,11 +2523,12 @@ def X86cmpm_imm_commute : SDNodeXForm<timm, [{ multiclass avx512_vcmp_common<X86FoldableSchedWrite sched, X86VectorVTInfo _, string Name> { +let Uses = [MXCSR], mayRaiseFPException = 1 in { defm rri : AVX512_maskable_cmp<0xC2, MRMSrcReg, _, (outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2,u8imm:$cc), "vcmp"#_.Suffix, "$cc, $src2, $src1", "$src1, $src2, $cc", - (X86cmpm (_.VT _.RC:$src1), (_.VT _.RC:$src2), timm:$cc), + (X86any_cmpm (_.VT _.RC:$src1), (_.VT _.RC:$src2), timm:$cc), (X86cmpm_su (_.VT _.RC:$src1), (_.VT _.RC:$src2), timm:$cc), 1>, Sched<[sched]>; @@ -2534,8 +2536,8 @@ multiclass avx512_vcmp_common<X86FoldableSchedWrite sched, X86VectorVTInfo _, (outs _.KRC:$dst),(ins _.RC:$src1, _.MemOp:$src2, u8imm:$cc), "vcmp"#_.Suffix, "$cc, $src2, $src1", "$src1, $src2, $cc", - (X86cmpm (_.VT _.RC:$src1), (_.VT (_.LdFrag addr:$src2)), - timm:$cc), + (X86any_cmpm (_.VT _.RC:$src1), (_.VT (_.LdFrag addr:$src2)), + timm:$cc), (X86cmpm_su (_.VT _.RC:$src1), (_.VT (_.LdFrag addr:$src2)), timm:$cc)>, Sched<[sched.Folded, sched.ReadAfterFold]>; @@ -2546,17 +2548,18 @@ multiclass avx512_vcmp_common<X86FoldableSchedWrite sched, X86VectorVTInfo _, "vcmp"#_.Suffix, "$cc, ${src2}"#_.BroadcastStr#", $src1", "$src1, ${src2}"#_.BroadcastStr#", $cc", - (X86cmpm (_.VT _.RC:$src1), - (_.VT (_.BroadcastLdFrag addr:$src2)), - timm:$cc), + (X86any_cmpm (_.VT _.RC:$src1), + (_.VT (_.BroadcastLdFrag addr:$src2)), + timm:$cc), (X86cmpm_su (_.VT _.RC:$src1), (_.VT (_.BroadcastLdFrag addr:$src2)), timm:$cc)>, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; + } // Patterns for selecting with loads in other operand. - def : Pat<(X86cmpm (_.LdFrag addr:$src2), (_.VT _.RC:$src1), - timm:$cc), + def : Pat<(X86any_cmpm (_.LdFrag addr:$src2), (_.VT _.RC:$src1), + timm:$cc), (!cast<Instruction>(Name#_.ZSuffix#"rmi") _.RC:$src1, addr:$src2, (X86cmpm_imm_commute timm:$cc))>; @@ -2567,8 +2570,8 @@ multiclass avx512_vcmp_common<X86FoldableSchedWrite sched, X86VectorVTInfo _, _.RC:$src1, addr:$src2, (X86cmpm_imm_commute timm:$cc))>; - def : Pat<(X86cmpm (_.BroadcastLdFrag addr:$src2), - (_.VT _.RC:$src1), timm:$cc), + def : Pat<(X86any_cmpm (_.BroadcastLdFrag addr:$src2), + (_.VT _.RC:$src1), timm:$cc), (!cast<Instruction>(Name#_.ZSuffix#"rmbi") _.RC:$src1, addr:$src2, (X86cmpm_imm_commute timm:$cc))>; @@ -2582,6 +2585,7 @@ multiclass avx512_vcmp_common<X86FoldableSchedWrite sched, X86VectorVTInfo _, multiclass avx512_vcmp_sae<X86FoldableSchedWrite sched, X86VectorVTInfo _> { // comparison code form (VCMP[EQ/LT/LE/...] + let Uses = [MXCSR] in defm rrib : AVX512_maskable_cmp<0xC2, MRMSrcReg, _, (outs _.KRC:$dst),(ins _.RC:$src1, _.RC:$src2, u8imm:$cc), "vcmp"#_.Suffix, @@ -2639,7 +2643,7 @@ def X86Vfpclass_su : PatFrag<(ops node:$src1, node:$src2), multiclass avx512_scalar_fpclass<bits<8> opc, string OpcodeStr, X86FoldableSchedWrite sched, X86VectorVTInfo _, Predicate prd> { - let Predicates = [prd], ExeDomain = _.ExeDomain in { + let Predicates = [prd], ExeDomain = _.ExeDomain, Uses = [MXCSR] in { def rr : AVX512<opc, MRMSrcReg, (outs _.KRC:$dst), (ins _.RC:$src1, i32u8imm:$src2), OpcodeStr##_.Suffix#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", @@ -2679,7 +2683,7 @@ multiclass avx512_scalar_fpclass<bits<8> opc, string OpcodeStr, multiclass avx512_vector_fpclass<bits<8> opc, string OpcodeStr, X86FoldableSchedWrite sched, X86VectorVTInfo _, string mem>{ - let ExeDomain = _.ExeDomain in { + let ExeDomain = _.ExeDomain, Uses = [MXCSR] in { def rr : AVX512<opc, MRMSrcReg, (outs _.KRC:$dst), (ins _.RC:$src1, i32u8imm:$src2), OpcodeStr##_.Suffix#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", @@ -3197,8 +3201,8 @@ def : Pat<(Narrow.KVT (and Narrow.KRC:$mask, multiclass axv512_cmp_packed_cc_no_vlx_lowering<string InstStr, X86VectorVTInfo Narrow, X86VectorVTInfo Wide> { -def : Pat<(Narrow.KVT (X86cmpm (Narrow.VT Narrow.RC:$src1), - (Narrow.VT Narrow.RC:$src2), timm:$cc)), +def : Pat<(Narrow.KVT (X86any_cmpm (Narrow.VT Narrow.RC:$src1), + (Narrow.VT Narrow.RC:$src2), timm:$cc)), (COPY_TO_REGCLASS (!cast<Instruction>(InstStr#"Zrri") (Wide.VT (INSERT_SUBREG (IMPLICIT_DEF), Narrow.RC:$src1, Narrow.SubRegIdx)), @@ -3215,8 +3219,8 @@ def : Pat<(Narrow.KVT (and Narrow.KRC:$mask, timm:$cc), Narrow.KRC)>; // Broadcast load. -def : Pat<(Narrow.KVT (X86cmpm (Narrow.VT Narrow.RC:$src1), - (Narrow.VT (Narrow.BroadcastLdFrag addr:$src2)), timm:$cc)), +def : Pat<(Narrow.KVT (X86any_cmpm (Narrow.VT Narrow.RC:$src1), + (Narrow.VT (Narrow.BroadcastLdFrag addr:$src2)), timm:$cc)), (COPY_TO_REGCLASS (!cast<Instruction>(InstStr#"Zrmbi") (Wide.VT (INSERT_SUBREG (IMPLICIT_DEF), Narrow.RC:$src1, Narrow.SubRegIdx)), @@ -3231,8 +3235,8 @@ def : Pat<(Narrow.KVT (and Narrow.KRC:$mask, addr:$src2, timm:$cc), Narrow.KRC)>; // Commuted with broadcast load. -def : Pat<(Narrow.KVT (X86cmpm (Narrow.VT (Narrow.BroadcastLdFrag addr:$src2)), - (Narrow.VT Narrow.RC:$src1), timm:$cc)), +def : Pat<(Narrow.KVT (X86any_cmpm (Narrow.VT (Narrow.BroadcastLdFrag addr:$src2)), + (Narrow.VT Narrow.RC:$src1), timm:$cc)), (COPY_TO_REGCLASS (!cast<Instruction>(InstStr#"Zrmbi") (Wide.VT (INSERT_SUBREG (IMPLICIT_DEF), Narrow.RC:$src1, Narrow.SubRegIdx)), @@ -3928,6 +3932,17 @@ def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}", def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}", (VMOVPQIto64Zrr GR64:$dst, VR128X:$src), 0>; +// Conversions between masks and scalar fp. +def : Pat<(v32i1 (bitconvert FR32X:$src)), + (KMOVDkr (VMOVSS2DIZrr FR32X:$src))>; +def : Pat<(f32 (bitconvert VK32:$src)), + (VMOVDI2SSZrr (KMOVDrk VK32:$src))>; + +def : Pat<(v64i1 (bitconvert FR64X:$src)), + (KMOVQkr (VMOVSDto64Zrr FR64X:$src))>; +def : Pat<(f64 (bitconvert VK64:$src)), + (VMOV64toSDZrr (KMOVQrk VK64:$src))>; + //===----------------------------------------------------------------------===// // AVX-512 MOVSS, MOVSD //===----------------------------------------------------------------------===// @@ -5278,7 +5293,7 @@ defm : avx512_logical_lowering_types<"VPANDN", X86andnp>; multiclass avx512_fp_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _, SDNode OpNode, SDNode VecNode, X86FoldableSchedWrite sched, bit IsCommutable> { - let ExeDomain = _.ExeDomain in { + let ExeDomain = _.ExeDomain, Uses = [MXCSR], mayRaiseFPException = 1 in { defm rr_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, _.RC:$src2), OpcodeStr, "$src2, $src1", "$src1, $src2", @@ -5312,7 +5327,7 @@ multiclass avx512_fp_scalar<bits<8> opc, string OpcodeStr,X86VectorVTInfo _, multiclass avx512_fp_scalar_round<bits<8> opc, string OpcodeStr,X86VectorVTInfo _, SDNode VecNode, X86FoldableSchedWrite sched, bit IsCommutable = 0> { - let ExeDomain = _.ExeDomain in + let ExeDomain = _.ExeDomain, Uses = [MXCSR] in defm rrb_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, _.RC:$src2, AVX512RC:$rc), OpcodeStr, "$rc, $src2, $src1", "$src1, $src2, $rc", @@ -5329,16 +5344,17 @@ multiclass avx512_fp_scalar_sae<bits<8> opc, string OpcodeStr,X86VectorVTInfo _, (ins _.RC:$src1, _.RC:$src2), OpcodeStr, "$src2, $src1", "$src1, $src2", (_.VT (VecNode _.RC:$src1, _.RC:$src2))>, - Sched<[sched]>; + Sched<[sched]>, SIMD_EXC; defm rm_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, _.IntScalarMemOp:$src2), OpcodeStr, "$src2, $src1", "$src1, $src2", (_.VT (VecNode _.RC:$src1, _.ScalarIntMemCPat:$src2))>, - Sched<[sched.Folded, sched.ReadAfterFold]>; + Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC; - let isCodeGenOnly = 1, Predicates = [HasAVX512] in { + let isCodeGenOnly = 1, Predicates = [HasAVX512], + Uses = [MXCSR], mayRaiseFPException = 1 in { def rr : I< opc, MRMSrcReg, (outs _.FRC:$dst), (ins _.FRC:$src1, _.FRC:$src2), OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", @@ -5356,6 +5372,7 @@ multiclass avx512_fp_scalar_sae<bits<8> opc, string OpcodeStr,X86VectorVTInfo _, EVEX2VEXOverride<EVEX2VexOvrd#"rm">; } + let Uses = [MXCSR] in defm rrb_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, _.RC:$src2), OpcodeStr, "{sae}, $src2, $src1", "$src1, $src2, {sae}", @@ -5391,13 +5408,13 @@ multiclass avx512_binop_s_sae<bits<8> opc, string OpcodeStr, SDNode OpNode, NAME#"SD">, XD, VEX_W, EVEX_4V, VEX_LIG, EVEX_CD8<64, CD8VT1>; } -defm VADD : avx512_binop_s_round<0x58, "vadd", fadd, X86fadds, X86faddRnds, +defm VADD : avx512_binop_s_round<0x58, "vadd", any_fadd, X86fadds, X86faddRnds, SchedWriteFAddSizes, 1>; -defm VMUL : avx512_binop_s_round<0x59, "vmul", fmul, X86fmuls, X86fmulRnds, +defm VMUL : avx512_binop_s_round<0x59, "vmul", any_fmul, X86fmuls, X86fmulRnds, SchedWriteFMulSizes, 1>; -defm VSUB : avx512_binop_s_round<0x5C, "vsub", fsub, X86fsubs, X86fsubRnds, +defm VSUB : avx512_binop_s_round<0x5C, "vsub", any_fsub, X86fsubs, X86fsubRnds, SchedWriteFAddSizes, 0>; -defm VDIV : avx512_binop_s_round<0x5E, "vdiv", fdiv, X86fdivs, X86fdivRnds, +defm VDIV : avx512_binop_s_round<0x5E, "vdiv", any_fdiv, X86fdivs, X86fdivRnds, SchedWriteFDivSizes, 0>; defm VMIN : avx512_binop_s_sae<0x5D, "vmin", X86fmin, X86fmins, X86fminSAEs, SchedWriteFCmpSizes, 0>; @@ -5429,27 +5446,28 @@ multiclass avx512_comutable_binop_s<bits<8> opc, string OpcodeStr, } defm VMINCSSZ : avx512_comutable_binop_s<0x5D, "vminss", f32x_info, X86fminc, SchedWriteFCmp.Scl, "VMINCSS">, XS, - EVEX_4V, VEX_LIG, EVEX_CD8<32, CD8VT1>; + EVEX_4V, VEX_LIG, EVEX_CD8<32, CD8VT1>, SIMD_EXC; defm VMINCSDZ : avx512_comutable_binop_s<0x5D, "vminsd", f64x_info, X86fminc, SchedWriteFCmp.Scl, "VMINCSD">, XD, VEX_W, EVEX_4V, VEX_LIG, - EVEX_CD8<64, CD8VT1>; + EVEX_CD8<64, CD8VT1>, SIMD_EXC; defm VMAXCSSZ : avx512_comutable_binop_s<0x5F, "vmaxss", f32x_info, X86fmaxc, SchedWriteFCmp.Scl, "VMAXCSS">, XS, - EVEX_4V, VEX_LIG, EVEX_CD8<32, CD8VT1>; + EVEX_4V, VEX_LIG, EVEX_CD8<32, CD8VT1>, SIMD_EXC; defm VMAXCSDZ : avx512_comutable_binop_s<0x5F, "vmaxsd", f64x_info, X86fmaxc, SchedWriteFCmp.Scl, "VMAXCSD">, XD, VEX_W, EVEX_4V, VEX_LIG, - EVEX_CD8<64, CD8VT1>; + EVEX_CD8<64, CD8VT1>, SIMD_EXC; multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode, X86VectorVTInfo _, X86FoldableSchedWrite sched, bit IsCommutable, bit IsKCommutable = IsCommutable> { - let ExeDomain = _.ExeDomain, hasSideEffects = 0 in { + let ExeDomain = _.ExeDomain, hasSideEffects = 0, + Uses = [MXCSR], mayRaiseFPException = 1 in { defm rr: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix, "$src2, $src1", "$src1, $src2", @@ -5476,7 +5494,7 @@ multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDPatternOperator OpN multiclass avx512_fp_round_packed<bits<8> opc, string OpcodeStr, SDPatternOperator OpNodeRnd, X86FoldableSchedWrite sched, X86VectorVTInfo _> { - let ExeDomain = _.ExeDomain in + let ExeDomain = _.ExeDomain, Uses = [MXCSR] in defm rrb: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, _.RC:$src2, AVX512RC:$rc), OpcodeStr##_.Suffix, "$rc, $src2, $src1", "$src1, $src2, $rc", @@ -5487,7 +5505,7 @@ multiclass avx512_fp_round_packed<bits<8> opc, string OpcodeStr, multiclass avx512_fp_sae_packed<bits<8> opc, string OpcodeStr, SDPatternOperator OpNodeSAE, X86FoldableSchedWrite sched, X86VectorVTInfo _> { - let ExeDomain = _.ExeDomain in + let ExeDomain = _.ExeDomain, Uses = [MXCSR] in defm rrb: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix, "{sae}, $src2, $src1", "$src1, $src2, {sae}", @@ -5526,6 +5544,7 @@ multiclass avx512_fp_binop_p<bits<8> opc, string OpcodeStr, SDPatternOperator Op } } +let Uses = [MXCSR] in multiclass avx512_fp_binop_p_round<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd, X86SchedWriteSizes sched> { defm PSZ : avx512_fp_round_packed<opc, OpcodeStr, OpNodeRnd, sched.PS.ZMM, @@ -5536,6 +5555,7 @@ multiclass avx512_fp_binop_p_round<bits<8> opc, string OpcodeStr, SDNode OpNodeR EVEX_V512, PD, VEX_W,EVEX_CD8<64, CD8VF>; } +let Uses = [MXCSR] in multiclass avx512_fp_binop_p_sae<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd, X86SchedWriteSizes sched> { defm PSZ : avx512_fp_sae_packed<opc, OpcodeStr, OpNodeRnd, sched.PS.ZMM, @@ -5546,16 +5566,16 @@ multiclass avx512_fp_binop_p_sae<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd EVEX_V512, PD, VEX_W,EVEX_CD8<64, CD8VF>; } -defm VADD : avx512_fp_binop_p<0x58, "vadd", fadd, HasAVX512, +defm VADD : avx512_fp_binop_p<0x58, "vadd", any_fadd, HasAVX512, SchedWriteFAddSizes, 1>, avx512_fp_binop_p_round<0x58, "vadd", X86faddRnd, SchedWriteFAddSizes>; -defm VMUL : avx512_fp_binop_p<0x59, "vmul", fmul, HasAVX512, +defm VMUL : avx512_fp_binop_p<0x59, "vmul", any_fmul, HasAVX512, SchedWriteFMulSizes, 1>, avx512_fp_binop_p_round<0x59, "vmul", X86fmulRnd, SchedWriteFMulSizes>; -defm VSUB : avx512_fp_binop_p<0x5C, "vsub", fsub, HasAVX512, +defm VSUB : avx512_fp_binop_p<0x5C, "vsub", any_fsub, HasAVX512, SchedWriteFAddSizes>, avx512_fp_binop_p_round<0x5C, "vsub", X86fsubRnd, SchedWriteFAddSizes>; -defm VDIV : avx512_fp_binop_p<0x5E, "vdiv", fdiv, HasAVX512, +defm VDIV : avx512_fp_binop_p<0x5E, "vdiv", any_fdiv, HasAVX512, SchedWriteFDivSizes>, avx512_fp_binop_p_round<0x5E, "vdiv", X86fdivRnd, SchedWriteFDivSizes>; defm VMIN : avx512_fp_binop_p<0x5D, "vmin", X86fmin, HasAVX512, @@ -5570,6 +5590,7 @@ let isCodeGenOnly = 1 in { defm VMAXC : avx512_fp_binop_p<0x5F, "vmax", X86fmaxc, HasAVX512, SchedWriteFCmpSizes, 1>; } +let Uses = []<Register>, mayRaiseFPException = 0 in { defm VAND : avx512_fp_binop_p<0x54, "vand", null_frag, HasDQI, SchedWriteFLogicSizes, 1>; defm VANDN : avx512_fp_binop_p<0x55, "vandn", null_frag, HasDQI, @@ -5578,10 +5599,11 @@ defm VOR : avx512_fp_binop_p<0x56, "vor", null_frag, HasDQI, SchedWriteFLogicSizes, 1>; defm VXOR : avx512_fp_binop_p<0x57, "vxor", null_frag, HasDQI, SchedWriteFLogicSizes, 1>; +} multiclass avx512_fp_scalef_p<bits<8> opc, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo _> { - let ExeDomain = _.ExeDomain in { + let ExeDomain = _.ExeDomain, Uses = [MXCSR], mayRaiseFPException = 1 in { defm rr: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix, "$src2, $src1", "$src1, $src2", @@ -5603,7 +5625,7 @@ multiclass avx512_fp_scalef_p<bits<8> opc, string OpcodeStr, SDNode OpNode, multiclass avx512_fp_scalef_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo _> { - let ExeDomain = _.ExeDomain in { + let ExeDomain = _.ExeDomain, Uses = [MXCSR], mayRaiseFPException = 1 in { defm rr: AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix, "$src2, $src1", "$src1, $src2", @@ -6399,7 +6421,8 @@ let Predicates = [HasAVX512] in { multiclass avx512_fma3p_213_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo _, string Suff> { - let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain, hasSideEffects = 0 in { + let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain, hasSideEffects = 0, + Uses = [MXCSR], mayRaiseFPException = 1 in { defm r: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src2, _.RC:$src3), OpcodeStr, "$src3, $src2", "$src2, $src3", @@ -6425,7 +6448,8 @@ multiclass avx512_fma3p_213_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, multiclass avx512_fma3_213_round<bits<8> opc, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo _, string Suff> { - let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain, hasSideEffects = 0 in + let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain, hasSideEffects = 0, + Uses = [MXCSR] in defm rb: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src2, _.RC:$src3, AVX512RC:$rc), OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc", @@ -6462,7 +6486,7 @@ multiclass avx512_fma3p_213_f<bits<8> opc, string OpcodeStr, SDNode OpNode, VEX_W; } -defm VFMADD213 : avx512_fma3p_213_f<0xA8, "vfmadd213", X86Fmadd, X86FmaddRnd>; +defm VFMADD213 : avx512_fma3p_213_f<0xA8, "vfmadd213", X86any_Fmadd, X86FmaddRnd>; defm VFMSUB213 : avx512_fma3p_213_f<0xAA, "vfmsub213", X86Fmsub, X86FmsubRnd>; defm VFMADDSUB213 : avx512_fma3p_213_f<0xA6, "vfmaddsub213", X86Fmaddsub, X86FmaddsubRnd>; defm VFMSUBADD213 : avx512_fma3p_213_f<0xA7, "vfmsubadd213", X86Fmsubadd, X86FmsubaddRnd>; @@ -6473,7 +6497,8 @@ defm VFNMSUB213 : avx512_fma3p_213_f<0xAE, "vfnmsub213", X86Fnmsub, X86FnmsubR multiclass avx512_fma3p_231_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo _, string Suff> { - let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain, hasSideEffects = 0 in { + let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain, hasSideEffects = 0, + Uses = [MXCSR], mayRaiseFPException = 1 in { defm r: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src2, _.RC:$src3), OpcodeStr, "$src3, $src2", "$src2, $src3", @@ -6500,7 +6525,8 @@ multiclass avx512_fma3p_231_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, multiclass avx512_fma3_231_round<bits<8> opc, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo _, string Suff> { - let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain, hasSideEffects = 0 in + let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain, hasSideEffects = 0, + Uses = [MXCSR] in defm rb: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src2, _.RC:$src3, AVX512RC:$rc), OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc", @@ -6538,7 +6564,7 @@ multiclass avx512_fma3p_231_f<bits<8> opc, string OpcodeStr, SDNode OpNode, VEX_W; } -defm VFMADD231 : avx512_fma3p_231_f<0xB8, "vfmadd231", X86Fmadd, X86FmaddRnd>; +defm VFMADD231 : avx512_fma3p_231_f<0xB8, "vfmadd231", X86any_Fmadd, X86FmaddRnd>; defm VFMSUB231 : avx512_fma3p_231_f<0xBA, "vfmsub231", X86Fmsub, X86FmsubRnd>; defm VFMADDSUB231 : avx512_fma3p_231_f<0xB6, "vfmaddsub231", X86Fmaddsub, X86FmaddsubRnd>; defm VFMSUBADD231 : avx512_fma3p_231_f<0xB7, "vfmsubadd231", X86Fmsubadd, X86FmsubaddRnd>; @@ -6548,7 +6574,8 @@ defm VFNMSUB231 : avx512_fma3p_231_f<0xBE, "vfnmsub231", X86Fnmsub, X86FnmsubR multiclass avx512_fma3p_132_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo _, string Suff> { - let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain, hasSideEffects = 0 in { + let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain, hasSideEffects = 0, + Uses = [MXCSR], mayRaiseFPException = 1 in { defm r: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src2, _.RC:$src3), OpcodeStr, "$src3, $src2", "$src2, $src3", @@ -6578,7 +6605,8 @@ multiclass avx512_fma3p_132_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, multiclass avx512_fma3_132_round<bits<8> opc, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo _, string Suff> { - let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain, hasSideEffects = 0 in + let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain, hasSideEffects = 0, + Uses = [MXCSR] in defm rb: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src2, _.RC:$src3, AVX512RC:$rc), OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc", @@ -6616,7 +6644,7 @@ multiclass avx512_fma3p_132_f<bits<8> opc, string OpcodeStr, SDNode OpNode, VEX_W; } -defm VFMADD132 : avx512_fma3p_132_f<0x98, "vfmadd132", X86Fmadd, X86FmaddRnd>; +defm VFMADD132 : avx512_fma3p_132_f<0x98, "vfmadd132", X86any_Fmadd, X86FmaddRnd>; defm VFMSUB132 : avx512_fma3p_132_f<0x9A, "vfmsub132", X86Fmsub, X86FmsubRnd>; defm VFMADDSUB132 : avx512_fma3p_132_f<0x96, "vfmaddsub132", X86Fmaddsub, X86FmaddsubRnd>; defm VFMSUBADD132 : avx512_fma3p_132_f<0x97, "vfmsubadd132", X86Fmsubadd, X86FmsubaddRnd>; @@ -6630,14 +6658,15 @@ let Constraints = "$src1 = $dst", hasSideEffects = 0 in { defm r_Int: AVX512_maskable_3src_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src2, _.RC:$src3), OpcodeStr, "$src3, $src2", "$src2, $src3", (null_frag), 1, 1>, - AVX512FMA3Base, Sched<[SchedWriteFMA.Scl]>; + AVX512FMA3Base, Sched<[SchedWriteFMA.Scl]>, SIMD_EXC; let mayLoad = 1 in defm m_Int: AVX512_maskable_3src_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src2, _.IntScalarMemOp:$src3), OpcodeStr, "$src3, $src2", "$src2, $src3", (null_frag), 1, 1>, - AVX512FMA3Base, Sched<[SchedWriteFMA.Scl.Folded, SchedWriteFMA.Scl.ReadAfterFold]>; + AVX512FMA3Base, Sched<[SchedWriteFMA.Scl.Folded, SchedWriteFMA.Scl.ReadAfterFold]>, SIMD_EXC; + let Uses = [MXCSR] in defm rb_Int: AVX512_maskable_3src_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src2, _.RC:$src3, AVX512RC:$rc), OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc", (null_frag), 1, 1>, @@ -6648,13 +6677,14 @@ let Constraints = "$src1 = $dst", hasSideEffects = 0 in { (ins _.FRC:$src1, _.FRC:$src2, _.FRC:$src3), !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), - !if(MaskOnlyReg, [], [RHS_r])>, Sched<[SchedWriteFMA.Scl]>; + !if(MaskOnlyReg, [], [RHS_r])>, Sched<[SchedWriteFMA.Scl]>, SIMD_EXC; def m : AVX512FMA3S<opc, MRMSrcMem, (outs _.FRC:$dst), (ins _.FRC:$src1, _.FRC:$src2, _.ScalarMemOp:$src3), !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), - [RHS_m]>, Sched<[SchedWriteFMA.Scl.Folded, SchedWriteFMA.Scl.ReadAfterFold]>; + [RHS_m]>, Sched<[SchedWriteFMA.Scl.Folded, SchedWriteFMA.Scl.ReadAfterFold]>, SIMD_EXC; + let Uses = [MXCSR] in def rb : AVX512FMA3S<opc, MRMSrcReg, (outs _.FRC:$dst), (ins _.FRC:$src1, _.FRC:$src2, _.FRC:$src3, AVX512RC:$rc), !strconcat(OpcodeStr, @@ -6711,7 +6741,7 @@ multiclass avx512_fma3s<bits<8> opc213, bits<8> opc231, bits<8> opc132, } } -defm VFMADD : avx512_fma3s<0xA9, 0xB9, 0x99, "vfmadd", X86Fmadd, X86FmaddRnd>; +defm VFMADD : avx512_fma3s<0xA9, 0xB9, 0x99, "vfmadd", X86any_Fmadd, X86FmaddRnd>; defm VFMSUB : avx512_fma3s<0xAB, 0xBB, 0x9B, "vfmsub", X86Fmsub, X86FmsubRnd>; defm VFNMADD : avx512_fma3s<0xAD, 0xBD, 0x9D, "vfnmadd", X86Fnmadd, X86FnmaddRnd>; defm VFNMSUB : avx512_fma3s<0xAF, 0xBF, 0x9F, "vfnmsub", X86Fnmsub, X86FnmsubRnd>; @@ -6918,7 +6948,7 @@ multiclass avx512_scalar_fma_patterns<SDNode Op, SDNode RndOp, string Prefix, } } -defm : avx512_scalar_fma_patterns<X86Fmadd, X86FmaddRnd, "VFMADD", "SS", +defm : avx512_scalar_fma_patterns<X86any_Fmadd, X86FmaddRnd, "VFMADD", "SS", X86Movss, v4f32x_info, fp32imm0>; defm : avx512_scalar_fma_patterns<X86Fmsub, X86FmsubRnd, "VFMSUB", "SS", X86Movss, v4f32x_info, fp32imm0>; @@ -6927,7 +6957,7 @@ defm : avx512_scalar_fma_patterns<X86Fnmadd, X86FnmaddRnd, "VFNMADD", "SS", defm : avx512_scalar_fma_patterns<X86Fnmsub, X86FnmsubRnd, "VFNMSUB", "SS", X86Movss, v4f32x_info, fp32imm0>; -defm : avx512_scalar_fma_patterns<X86Fmadd, X86FmaddRnd, "VFMADD", "SD", +defm : avx512_scalar_fma_patterns<X86any_Fmadd, X86FmaddRnd, "VFMADD", "SD", X86Movsd, v2f64x_info, fp64imm0>; defm : avx512_scalar_fma_patterns<X86Fmsub, X86FmsubRnd, "VFMSUB", "SD", X86Movsd, v2f64x_info, fp64imm0>; @@ -6997,7 +7027,10 @@ defm VPMADD52HUQ : avx512_pmadd52_common<0xb5, "vpmadd52huq", x86vpmadd52h, multiclass avx512_vcvtsi<bits<8> opc, SDPatternOperator OpNode, X86FoldableSchedWrite sched, RegisterClass SrcRC, X86VectorVTInfo DstVT, X86MemOperand x86memop, PatFrag ld_frag, string asm, - string mem> { + string mem, list<Register> _Uses = [MXCSR], + bit _mayRaiseFPException = 1> { +let ExeDomain = DstVT.ExeDomain, Uses = _Uses, + mayRaiseFPException = _mayRaiseFPException in { let hasSideEffects = 0, isCodeGenOnly = 1 in { def rr : SI<opc, MRMSrcReg, (outs DstVT.FRC:$dst), (ins DstVT.FRC:$src1, SrcRC:$src), @@ -7023,6 +7056,7 @@ multiclass avx512_vcvtsi<bits<8> opc, SDPatternOperator OpNode, X86FoldableSched (OpNode (DstVT.VT DstVT.RC:$src1), (ld_frag addr:$src2)))]>, EVEX_4V, Sched<[sched.Folded, sched.ReadAfterFold]>; +} def : InstAlias<"v"#asm#mem#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", (!cast<Instruction>(NAME#"rr_Int") DstVT.RC:$dst, DstVT.RC:$src1, SrcRC:$src2), 0, "att">; @@ -7032,6 +7066,7 @@ multiclass avx512_vcvtsi_round<bits<8> opc, SDNode OpNode, X86FoldableSchedWrite sched, RegisterClass SrcRC, X86VectorVTInfo DstVT, string asm, string mem> { + let ExeDomain = DstVT.ExeDomain, Uses = [MXCSR] in def rrb_Int : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst), (ins DstVT.RC:$src1, SrcRC:$src2, AVX512RC:$rc), !strconcat(asm, @@ -7066,7 +7101,7 @@ defm VCVTSI642SSZ: avx512_vcvtsi_common<0x2A, X86SintToFp, X86SintToFpRnd, v4f32x_info, i64mem, loadi64, "cvtsi2ss", "q">, XS, VEX_W, EVEX_CD8<64, CD8VT1>; defm VCVTSI2SDZ : avx512_vcvtsi<0x2A, null_frag, WriteCvtI2SD, GR32, - v2f64x_info, i32mem, loadi32, "cvtsi2sd", "l">, + v2f64x_info, i32mem, loadi32, "cvtsi2sd", "l", [], 0>, XD, VEX_LIG, EVEX_CD8<32, CD8VT1>; defm VCVTSI642SDZ: avx512_vcvtsi_common<0x2A, X86SintToFp, X86SintToFpRnd, WriteCvtI2SD, GR64, @@ -7078,22 +7113,22 @@ def : InstAlias<"vcvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}", def : InstAlias<"vcvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}", (VCVTSI2SDZrm_Int VR128X:$dst, VR128X:$src1, i32mem:$src), 0, "att">; -def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))), +def : Pat<(f32 (any_sint_to_fp (loadi32 addr:$src))), (VCVTSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>; -def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))), +def : Pat<(f32 (any_sint_to_fp (loadi64 addr:$src))), (VCVTSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>; -def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))), +def : Pat<(f64 (any_sint_to_fp (loadi32 addr:$src))), (VCVTSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>; -def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))), +def : Pat<(f64 (any_sint_to_fp (loadi64 addr:$src))), (VCVTSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>; -def : Pat<(f32 (sint_to_fp GR32:$src)), +def : Pat<(f32 (any_sint_to_fp GR32:$src)), (VCVTSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>; -def : Pat<(f32 (sint_to_fp GR64:$src)), +def : Pat<(f32 (any_sint_to_fp GR64:$src)), (VCVTSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>; -def : Pat<(f64 (sint_to_fp GR32:$src)), +def : Pat<(f64 (any_sint_to_fp GR32:$src)), (VCVTSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>; -def : Pat<(f64 (sint_to_fp GR64:$src)), +def : Pat<(f64 (any_sint_to_fp GR64:$src)), (VCVTSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>; defm VCVTUSI2SSZ : avx512_vcvtsi_common<0x7B, X86UintToFp, X86UintToFpRnd, @@ -7105,7 +7140,7 @@ defm VCVTUSI642SSZ : avx512_vcvtsi_common<0x7B, X86UintToFp, X86UintToFpRnd, v4f32x_info, i64mem, loadi64, "cvtusi2ss", "q">, XS, VEX_W, EVEX_CD8<64, CD8VT1>; defm VCVTUSI2SDZ : avx512_vcvtsi<0x7B, null_frag, WriteCvtI2SD, GR32, v2f64x_info, - i32mem, loadi32, "cvtusi2sd", "l">, + i32mem, loadi32, "cvtusi2sd", "l", [], 0>, XD, VEX_LIG, EVEX_CD8<32, CD8VT1>; defm VCVTUSI642SDZ : avx512_vcvtsi_common<0x7B, X86UintToFp, X86UintToFpRnd, WriteCvtI2SD, GR64, @@ -7117,22 +7152,22 @@ def : InstAlias<"vcvtusi2ss\t{$src, $src1, $dst|$dst, $src1, $src}", def : InstAlias<"vcvtusi2sd\t{$src, $src1, $dst|$dst, $src1, $src}", (VCVTUSI2SDZrm_Int VR128X:$dst, VR128X:$src1, i32mem:$src), 0, "att">; -def : Pat<(f32 (uint_to_fp (loadi32 addr:$src))), +def : Pat<(f32 (any_uint_to_fp (loadi32 addr:$src))), (VCVTUSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>; -def : Pat<(f32 (uint_to_fp (loadi64 addr:$src))), +def : Pat<(f32 (any_uint_to_fp (loadi64 addr:$src))), (VCVTUSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>; -def : Pat<(f64 (uint_to_fp (loadi32 addr:$src))), +def : Pat<(f64 (any_uint_to_fp (loadi32 addr:$src))), (VCVTUSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>; -def : Pat<(f64 (uint_to_fp (loadi64 addr:$src))), +def : Pat<(f64 (any_uint_to_fp (loadi64 addr:$src))), (VCVTUSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>; -def : Pat<(f32 (uint_to_fp GR32:$src)), +def : Pat<(f32 (any_uint_to_fp GR32:$src)), (VCVTUSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>; -def : Pat<(f32 (uint_to_fp GR64:$src)), +def : Pat<(f32 (any_uint_to_fp GR64:$src)), (VCVTUSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>; -def : Pat<(f64 (uint_to_fp GR32:$src)), +def : Pat<(f64 (any_uint_to_fp GR32:$src)), (VCVTUSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>; -def : Pat<(f64 (uint_to_fp GR64:$src)), +def : Pat<(f64 (any_uint_to_fp GR64:$src)), (VCVTUSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>; } @@ -7145,11 +7180,12 @@ multiclass avx512_cvt_s_int_round<bits<8> opc, X86VectorVTInfo SrcVT, SDNode OpNodeRnd, X86FoldableSchedWrite sched, string asm, string aliasStr> { - let Predicates = [HasAVX512] in { + let Predicates = [HasAVX512], ExeDomain = SrcVT.ExeDomain in { def rr_Int : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst), (ins SrcVT.RC:$src), !strconcat(asm,"\t{$src, $dst|$dst, $src}"), [(set DstVT.RC:$dst, (OpNode (SrcVT.VT SrcVT.RC:$src)))]>, - EVEX, VEX_LIG, Sched<[sched]>; + EVEX, VEX_LIG, Sched<[sched]>, SIMD_EXC; + let Uses = [MXCSR] in def rrb_Int : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst), (ins SrcVT.RC:$src, AVX512RC:$rc), !strconcat(asm,"\t{$rc, $src, $dst|$dst, $src, $rc}"), [(set DstVT.RC:$dst, (OpNodeRnd (SrcVT.VT SrcVT.RC:$src),(i32 timm:$rc)))]>, @@ -7159,7 +7195,7 @@ multiclass avx512_cvt_s_int_round<bits<8> opc, X86VectorVTInfo SrcVT, !strconcat(asm,"\t{$src, $dst|$dst, $src}"), [(set DstVT.RC:$dst, (OpNode (SrcVT.VT SrcVT.ScalarIntMemCPat:$src)))]>, - EVEX, VEX_LIG, Sched<[sched.Folded, sched.ReadAfterFold]>; + EVEX, VEX_LIG, Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC; } // Predicates = [HasAVX512] def : InstAlias<"v" # asm # aliasStr # "\t{$src, $dst|$dst, $src}", @@ -7202,82 +7238,82 @@ defm VCVTSD2USI64Z: avx512_cvt_s_int_round<0x79, f64x_info, i64x_info, X86cvts2u let Predicates = [HasAVX512] in { def : Pat<(v4f32 (X86Movss (v4f32 VR128X:$dst), - (v4f32 (scalar_to_vector (f32 (sint_to_fp GR64:$src)))))), + (v4f32 (scalar_to_vector (f32 (any_sint_to_fp GR64:$src)))))), (VCVTSI642SSZrr_Int VR128X:$dst, GR64:$src)>; def : Pat<(v4f32 (X86Movss (v4f32 VR128X:$dst), - (v4f32 (scalar_to_vector (f32 (sint_to_fp (loadi64 addr:$src))))))), + (v4f32 (scalar_to_vector (f32 (any_sint_to_fp (loadi64 addr:$src))))))), (VCVTSI642SSZrm_Int VR128X:$dst, addr:$src)>; def : Pat<(v4f32 (X86Movss (v4f32 VR128X:$dst), - (v4f32 (scalar_to_vector (f32 (sint_to_fp GR32:$src)))))), + (v4f32 (scalar_to_vector (f32 (any_sint_to_fp GR32:$src)))))), (VCVTSI2SSZrr_Int VR128X:$dst, GR32:$src)>; def : Pat<(v4f32 (X86Movss (v4f32 VR128X:$dst), - (v4f32 (scalar_to_vector (f32 (sint_to_fp (loadi32 addr:$src))))))), + (v4f32 (scalar_to_vector (f32 (any_sint_to_fp (loadi32 addr:$src))))))), (VCVTSI2SSZrm_Int VR128X:$dst, addr:$src)>; def : Pat<(v2f64 (X86Movsd (v2f64 VR128X:$dst), - (v2f64 (scalar_to_vector (f64 (sint_to_fp GR64:$src)))))), + (v2f64 (scalar_to_vector (f64 (any_sint_to_fp GR64:$src)))))), (VCVTSI642SDZrr_Int VR128X:$dst, GR64:$src)>; def : Pat<(v2f64 (X86Movsd (v2f64 VR128X:$dst), - (v2f64 (scalar_to_vector (f64 (sint_to_fp (loadi64 addr:$src))))))), + (v2f64 (scalar_to_vector (f64 (any_sint_to_fp (loadi64 addr:$src))))))), (VCVTSI642SDZrm_Int VR128X:$dst, addr:$src)>; def : Pat<(v2f64 (X86Movsd (v2f64 VR128X:$dst), - (v2f64 (scalar_to_vector (f64 (sint_to_fp GR32:$src)))))), + (v2f64 (scalar_to_vector (f64 (any_sint_to_fp GR32:$src)))))), (VCVTSI2SDZrr_Int VR128X:$dst, GR32:$src)>; def : Pat<(v2f64 (X86Movsd (v2f64 VR128X:$dst), - (v2f64 (scalar_to_vector (f64 (sint_to_fp (loadi32 addr:$src))))))), + (v2f64 (scalar_to_vector (f64 (any_sint_to_fp (loadi32 addr:$src))))))), (VCVTSI2SDZrm_Int VR128X:$dst, addr:$src)>; def : Pat<(v4f32 (X86Movss (v4f32 VR128X:$dst), - (v4f32 (scalar_to_vector (f32 (uint_to_fp GR64:$src)))))), + (v4f32 (scalar_to_vector (f32 (any_uint_to_fp GR64:$src)))))), (VCVTUSI642SSZrr_Int VR128X:$dst, GR64:$src)>; def : Pat<(v4f32 (X86Movss (v4f32 VR128X:$dst), - (v4f32 (scalar_to_vector (f32 (uint_to_fp (loadi64 addr:$src))))))), + (v4f32 (scalar_to_vector (f32 (any_uint_to_fp (loadi64 addr:$src))))))), (VCVTUSI642SSZrm_Int VR128X:$dst, addr:$src)>; def : Pat<(v4f32 (X86Movss (v4f32 VR128X:$dst), - (v4f32 (scalar_to_vector (f32 (uint_to_fp GR32:$src)))))), + (v4f32 (scalar_to_vector (f32 (any_uint_to_fp GR32:$src)))))), (VCVTUSI2SSZrr_Int VR128X:$dst, GR32:$src)>; def : Pat<(v4f32 (X86Movss (v4f32 VR128X:$dst), - (v4f32 (scalar_to_vector (f32 (uint_to_fp (loadi32 addr:$src))))))), + (v4f32 (scalar_to_vector (f32 (any_uint_to_fp (loadi32 addr:$src))))))), (VCVTUSI2SSZrm_Int VR128X:$dst, addr:$src)>; def : Pat<(v2f64 (X86Movsd (v2f64 VR128X:$dst), - (v2f64 (scalar_to_vector (f64 (uint_to_fp GR64:$src)))))), + (v2f64 (scalar_to_vector (f64 (any_uint_to_fp GR64:$src)))))), (VCVTUSI642SDZrr_Int VR128X:$dst, GR64:$src)>; def : Pat<(v2f64 (X86Movsd (v2f64 VR128X:$dst), - (v2f64 (scalar_to_vector (f64 (uint_to_fp (loadi64 addr:$src))))))), + (v2f64 (scalar_to_vector (f64 (any_uint_to_fp (loadi64 addr:$src))))))), (VCVTUSI642SDZrm_Int VR128X:$dst, addr:$src)>; def : Pat<(v2f64 (X86Movsd (v2f64 VR128X:$dst), - (v2f64 (scalar_to_vector (f64 (uint_to_fp GR32:$src)))))), + (v2f64 (scalar_to_vector (f64 (any_uint_to_fp GR32:$src)))))), (VCVTUSI2SDZrr_Int VR128X:$dst, GR32:$src)>; def : Pat<(v2f64 (X86Movsd (v2f64 VR128X:$dst), - (v2f64 (scalar_to_vector (f64 (uint_to_fp (loadi32 addr:$src))))))), + (v2f64 (scalar_to_vector (f64 (any_uint_to_fp (loadi32 addr:$src))))))), (VCVTUSI2SDZrm_Int VR128X:$dst, addr:$src)>; } // Predicates = [HasAVX512] @@ -7286,22 +7322,23 @@ multiclass avx512_cvt_s_all<bits<8> opc, string asm, X86VectorVTInfo _SrcRC, X86VectorVTInfo _DstRC, SDNode OpNode, SDNode OpNodeInt, SDNode OpNodeSAE, X86FoldableSchedWrite sched, string aliasStr>{ -let Predicates = [HasAVX512] in { +let Predicates = [HasAVX512], ExeDomain = _SrcRC.ExeDomain in { let isCodeGenOnly = 1 in { def rr : AVX512<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.FRC:$src), !strconcat(asm,"\t{$src, $dst|$dst, $src}"), [(set _DstRC.RC:$dst, (OpNode _SrcRC.FRC:$src))]>, - EVEX, VEX_LIG, Sched<[sched]>; + EVEX, VEX_LIG, Sched<[sched]>, SIMD_EXC; def rm : AVX512<opc, MRMSrcMem, (outs _DstRC.RC:$dst), (ins _SrcRC.ScalarMemOp:$src), !strconcat(asm,"\t{$src, $dst|$dst, $src}"), [(set _DstRC.RC:$dst, (OpNode (_SrcRC.ScalarLdFrag addr:$src)))]>, - EVEX, VEX_LIG, Sched<[sched.Folded, sched.ReadAfterFold]>; + EVEX, VEX_LIG, Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC; } def rr_Int : AVX512<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.RC:$src), !strconcat(asm,"\t{$src, $dst|$dst, $src}"), [(set _DstRC.RC:$dst, (OpNodeInt (_SrcRC.VT _SrcRC.RC:$src)))]>, - EVEX, VEX_LIG, Sched<[sched]>; + EVEX, VEX_LIG, Sched<[sched]>, SIMD_EXC; + let Uses = [MXCSR] in def rrb_Int : AVX512<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.RC:$src), !strconcat(asm,"\t{{sae}, $src, $dst|$dst, $src, {sae}}"), [(set _DstRC.RC:$dst, (OpNodeSAE (_SrcRC.VT _SrcRC.RC:$src)))]>, @@ -7311,7 +7348,7 @@ let Predicates = [HasAVX512] in { !strconcat(asm,"\t{$src, $dst|$dst, $src}"), [(set _DstRC.RC:$dst, (OpNodeInt (_SrcRC.VT _SrcRC.ScalarIntMemCPat:$src)))]>, - EVEX, VEX_LIG, Sched<[sched.Folded, sched.ReadAfterFold]>; + EVEX, VEX_LIG, Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC; } //HasAVX512 def : InstAlias<asm # aliasStr # "\t{$src, $dst|$dst, $src}", @@ -7324,35 +7361,36 @@ let Predicates = [HasAVX512] in { } defm VCVTTSS2SIZ: avx512_cvt_s_all<0x2C, "vcvttss2si", f32x_info, i32x_info, - fp_to_sint, X86cvtts2Int, X86cvtts2IntSAE, WriteCvtSS2I, + any_fp_to_sint, X86cvtts2Int, X86cvtts2IntSAE, WriteCvtSS2I, "{l}">, XS, EVEX_CD8<32, CD8VT1>; defm VCVTTSS2SI64Z: avx512_cvt_s_all<0x2C, "vcvttss2si", f32x_info, i64x_info, - fp_to_sint, X86cvtts2Int, X86cvtts2IntSAE, WriteCvtSS2I, + any_fp_to_sint, X86cvtts2Int, X86cvtts2IntSAE, WriteCvtSS2I, "{q}">, VEX_W, XS, EVEX_CD8<32, CD8VT1>; defm VCVTTSD2SIZ: avx512_cvt_s_all<0x2C, "vcvttsd2si", f64x_info, i32x_info, - fp_to_sint, X86cvtts2Int, X86cvtts2IntSAE, WriteCvtSD2I, + any_fp_to_sint, X86cvtts2Int, X86cvtts2IntSAE, WriteCvtSD2I, "{l}">, XD, EVEX_CD8<64, CD8VT1>; defm VCVTTSD2SI64Z: avx512_cvt_s_all<0x2C, "vcvttsd2si", f64x_info, i64x_info, - fp_to_sint, X86cvtts2Int, X86cvtts2IntSAE, WriteCvtSD2I, + any_fp_to_sint, X86cvtts2Int, X86cvtts2IntSAE, WriteCvtSD2I, "{q}">, VEX_W, XD, EVEX_CD8<64, CD8VT1>; defm VCVTTSS2USIZ: avx512_cvt_s_all<0x78, "vcvttss2usi", f32x_info, i32x_info, - fp_to_uint, X86cvtts2UInt, X86cvtts2UIntSAE, WriteCvtSS2I, + any_fp_to_uint, X86cvtts2UInt, X86cvtts2UIntSAE, WriteCvtSS2I, "{l}">, XS, EVEX_CD8<32, CD8VT1>; defm VCVTTSS2USI64Z: avx512_cvt_s_all<0x78, "vcvttss2usi", f32x_info, i64x_info, - fp_to_uint, X86cvtts2UInt, X86cvtts2UIntSAE, WriteCvtSS2I, + any_fp_to_uint, X86cvtts2UInt, X86cvtts2UIntSAE, WriteCvtSS2I, "{q}">, XS,VEX_W, EVEX_CD8<32, CD8VT1>; defm VCVTTSD2USIZ: avx512_cvt_s_all<0x78, "vcvttsd2usi", f64x_info, i32x_info, - fp_to_uint, X86cvtts2UInt, X86cvtts2UIntSAE, WriteCvtSD2I, + any_fp_to_uint, X86cvtts2UInt, X86cvtts2UIntSAE, WriteCvtSD2I, "{l}">, XD, EVEX_CD8<64, CD8VT1>; defm VCVTTSD2USI64Z: avx512_cvt_s_all<0x78, "vcvttsd2usi", f64x_info, i64x_info, - fp_to_uint, X86cvtts2UInt, X86cvtts2UIntSAE, WriteCvtSD2I, + any_fp_to_uint, X86cvtts2UInt, X86cvtts2UIntSAE, WriteCvtSD2I, "{q}">, XD, VEX_W, EVEX_CD8<64, CD8VT1>; //===----------------------------------------------------------------------===// // AVX-512 Convert form float to double and back //===----------------------------------------------------------------------===// +let Uses = [MXCSR], mayRaiseFPException = 1 in multiclass avx512_cvt_fp_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, X86VectorVTInfo _Src, SDNode OpNode, X86FoldableSchedWrite sched> { @@ -7387,6 +7425,7 @@ multiclass avx512_cvt_fp_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _ multiclass avx512_cvt_fp_sae_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, X86VectorVTInfo _Src, SDNode OpNodeSAE, X86FoldableSchedWrite sched> { + let Uses = [MXCSR] in defm rrb_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, _Src.RC:$src2), OpcodeStr, "{sae}, $src2, $src1", "$src1, $src2, {sae}", @@ -7399,6 +7438,7 @@ multiclass avx512_cvt_fp_sae_scalar<bits<8> opc, string OpcodeStr, X86VectorVTIn multiclass avx512_cvt_fp_rc_scalar<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, X86VectorVTInfo _Src, SDNode OpNodeRnd, X86FoldableSchedWrite sched> { + let Uses = [MXCSR] in defm rrb_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, _Src.RC:$src2, AVX512RC:$rc), OpcodeStr, "$rc, $src2, $src1", "$src1, $src2, $rc", @@ -7435,28 +7475,28 @@ defm VCVTSS2SD : avx512_cvt_fp_scalar_ss2sd<0x5A, "vcvtss2sd", X86fpexts, X86fpextsSAE, WriteCvtSS2SD, f32x_info, f64x_info>; -def : Pat<(f64 (fpextend FR32X:$src)), +def : Pat<(f64 (any_fpextend FR32X:$src)), (VCVTSS2SDZrr (f64 (IMPLICIT_DEF)), FR32X:$src)>, Requires<[HasAVX512]>; -def : Pat<(f64 (fpextend (loadf32 addr:$src))), +def : Pat<(f64 (any_fpextend (loadf32 addr:$src))), (VCVTSS2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX512, OptForSize]>; -def : Pat<(f32 (fpround FR64X:$src)), +def : Pat<(f32 (any_fpround FR64X:$src)), (VCVTSD2SSZrr (f32 (IMPLICIT_DEF)), FR64X:$src)>, Requires<[HasAVX512]>; def : Pat<(v4f32 (X86Movss (v4f32 VR128X:$dst), (v4f32 (scalar_to_vector - (f32 (fpround (f64 (extractelt VR128X:$src, (iPTR 0))))))))), + (f32 (any_fpround (f64 (extractelt VR128X:$src, (iPTR 0))))))))), (VCVTSD2SSZrr_Int VR128X:$dst, VR128X:$src)>, Requires<[HasAVX512]>; def : Pat<(v2f64 (X86Movsd (v2f64 VR128X:$dst), (v2f64 (scalar_to_vector - (f64 (fpextend (f32 (extractelt VR128X:$src, (iPTR 0))))))))), + (f64 (any_fpextend (f32 (extractelt VR128X:$src, (iPTR 0))))))))), (VCVTSS2SDZrr_Int VR128X:$dst, VR128X:$src)>, Requires<[HasAVX512]>; @@ -7472,7 +7512,7 @@ multiclass avx512_vcvt_fp<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, string Alias = "", X86MemOperand MemOp = _Src.MemOp, RegisterClass MaskRC = _.KRCWM, dag LdDAG = (_.VT (OpNode (_Src.VT (_Src.LdFrag addr:$src))))> { - +let Uses = [MXCSR], mayRaiseFPException = 1 in { defm rr : AVX512_maskable_common<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _Src.RC:$src), (ins _.RC:$src0, MaskRC:$mask, _Src.RC:$src), @@ -7512,11 +7552,13 @@ multiclass avx512_vcvt_fp<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, _.RC:$src0), vselect, "$src0 = $dst">, EVEX, EVEX_B, Sched<[sched.Folded]>; + } } // Coversion with SAE - suppress all exceptions multiclass avx512_vcvt_fp_sae<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, X86VectorVTInfo _Src, SDNode OpNodeSAE, X86FoldableSchedWrite sched> { + let Uses = [MXCSR] in defm rrb : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _Src.RC:$src), OpcodeStr, "{sae}, $src", "$src, {sae}", @@ -7528,6 +7570,7 @@ multiclass avx512_vcvt_fp_sae<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, multiclass avx512_vcvt_fp_rc<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, X86VectorVTInfo _Src, SDNode OpNodeRnd, X86FoldableSchedWrite sched> { + let Uses = [MXCSR] in defm rrb : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _Src.RC:$src, AVX512RC:$rc), OpcodeStr, "$rc, $src", "$src, $rc", @@ -7551,14 +7594,14 @@ multiclass avx512_cvtps2pd<bits<8> opc, string OpcodeStr, X86SchedWriteWidths sched> { let Predicates = [HasAVX512] in { defm Z : avx512_vcvt_fpextend<opc, OpcodeStr, v8f64_info, v8f32x_info, - fpextend, sched.ZMM>, + any_fpextend, sched.ZMM>, avx512_vcvt_fp_sae<opc, OpcodeStr, v8f64_info, v8f32x_info, X86vfpextSAE, sched.ZMM>, EVEX_V512; } let Predicates = [HasVLX] in { defm Z128 : avx512_vcvt_fpextend<opc, OpcodeStr, v2f64x_info, v4f32x_info, - X86vfpext, sched.XMM, "{1to2}", "", f64mem>, EVEX_V128; - defm Z256 : avx512_vcvt_fpextend<opc, OpcodeStr, v4f64x_info, v4f32x_info, fpextend, + X86any_vfpext, sched.XMM, "{1to2}", "", f64mem>, EVEX_V128; + defm Z256 : avx512_vcvt_fpextend<opc, OpcodeStr, v4f64x_info, v4f32x_info, any_fpextend, sched.YMM>, EVEX_V256; } } @@ -7566,7 +7609,7 @@ multiclass avx512_cvtps2pd<bits<8> opc, string OpcodeStr, // Truncate Double to Float multiclass avx512_cvtpd2ps<bits<8> opc, string OpcodeStr, X86SchedWriteWidths sched> { let Predicates = [HasAVX512] in { - defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8f32x_info, v8f64_info, X86vfpround, sched.ZMM>, + defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8f32x_info, v8f64_info, X86any_vfpround, sched.ZMM>, avx512_vcvt_fp_rc<opc, OpcodeStr, v8f32x_info, v8f64_info, X86vfproundRnd, sched.ZMM>, EVEX_V512; } @@ -7574,7 +7617,7 @@ multiclass avx512_cvtpd2ps<bits<8> opc, string OpcodeStr, X86SchedWriteWidths sc defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v2f64x_info, null_frag, sched.XMM, "{1to2}", "{x}", f128mem, VK2WM>, EVEX_V128; - defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v4f64x_info, X86vfpround, + defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4f32x_info, v4f64x_info, X86any_vfpround, sched.YMM, "{1to4}", "{y}">, EVEX_V256; } @@ -7624,70 +7667,10 @@ defm VCVTPD2PS : avx512_cvtpd2ps<0x5A, "vcvtpd2ps", SchedWriteCvtPD2PS>, defm VCVTPS2PD : avx512_cvtps2pd<0x5A, "vcvtps2pd", SchedWriteCvtPS2PD>, PS, EVEX_CD8<32, CD8VH>; -let Predicates = [HasAVX512] in { - def : Pat<(v8f32 (fpround (v8f64 VR512:$src))), - (VCVTPD2PSZrr VR512:$src)>; - def : Pat<(vselect VK8WM:$mask, (v8f32 (fpround (v8f64 VR512:$src))), - VR256X:$src0), - (VCVTPD2PSZrrk VR256X:$src0, VK8WM:$mask, VR512:$src)>; - def : Pat<(vselect VK8WM:$mask, (v8f32 (fpround (v8f64 VR512:$src))), - v8f32x_info.ImmAllZerosV), - (VCVTPD2PSZrrkz VK8WM:$mask, VR512:$src)>; - - def : Pat<(v8f32 (fpround (loadv8f64 addr:$src))), - (VCVTPD2PSZrm addr:$src)>; - def : Pat<(vselect VK8WM:$mask, (v8f32 (fpround (loadv8f64 addr:$src))), - VR256X:$src0), - (VCVTPD2PSZrmk VR256X:$src0, VK8WM:$mask, addr:$src)>; - def : Pat<(vselect VK8WM:$mask, (v8f32 (fpround (loadv8f64 addr:$src))), - v8f32x_info.ImmAllZerosV), - (VCVTPD2PSZrmkz VK8WM:$mask, addr:$src)>; - - def : Pat<(v8f32 (fpround (v8f64 (X86VBroadcastld64 addr:$src)))), - (VCVTPD2PSZrmb addr:$src)>; - def : Pat<(vselect VK8WM:$mask, - (fpround (v8f64 (X86VBroadcastld64 addr:$src))), - (v8f32 VR256X:$src0)), - (VCVTPD2PSZrmbk VR256X:$src0, VK8WM:$mask, addr:$src)>; - def : Pat<(vselect VK8WM:$mask, - (fpround (v8f64 (X86VBroadcastld64 addr:$src))), - v8f32x_info.ImmAllZerosV), - (VCVTPD2PSZrmbkz VK8WM:$mask, addr:$src)>; -} - let Predicates = [HasVLX] in { - def : Pat<(v4f32 (fpround (v4f64 VR256X:$src))), - (VCVTPD2PSZ256rr VR256X:$src)>; - def : Pat<(vselect VK4WM:$mask, (v4f32 (fpround (v4f64 VR256X:$src))), - VR128X:$src0), - (VCVTPD2PSZ256rrk VR128X:$src0, VK4WM:$mask, VR256X:$src)>; - def : Pat<(vselect VK4WM:$mask, (v4f32 (fpround (v4f64 VR256X:$src))), - v4f32x_info.ImmAllZerosV), - (VCVTPD2PSZ256rrkz VK4WM:$mask, VR256X:$src)>; - - def : Pat<(v4f32 (fpround (loadv4f64 addr:$src))), - (VCVTPD2PSZ256rm addr:$src)>; - def : Pat<(vselect VK4WM:$mask, (v4f32 (fpround (loadv4f64 addr:$src))), - VR128X:$src0), - (VCVTPD2PSZ256rmk VR128X:$src0, VK4WM:$mask, addr:$src)>; - def : Pat<(vselect VK4WM:$mask, (v4f32 (fpround (loadv4f64 addr:$src))), - v4f32x_info.ImmAllZerosV), - (VCVTPD2PSZ256rmkz VK4WM:$mask, addr:$src)>; - - def : Pat<(v4f32 (fpround (v4f64 (X86VBroadcastld64 addr:$src)))), - (VCVTPD2PSZ256rmb addr:$src)>; - def : Pat<(vselect VK4WM:$mask, - (v4f32 (fpround (v4f64 (X86VBroadcastld64 addr:$src)))), - VR128X:$src0), - (VCVTPD2PSZ256rmbk VR128X:$src0, VK4WM:$mask, addr:$src)>; - def : Pat<(vselect VK4WM:$mask, - (v4f32 (fpround (v4f64 (X86VBroadcastld64 addr:$src)))), - v4f32x_info.ImmAllZerosV), - (VCVTPD2PSZ256rmbkz VK4WM:$mask, addr:$src)>; - // Special patterns to allow use of X86vmfpround for masking. Instruction // patterns have been disabled with null_frag. - def : Pat<(X86vfpround (v2f64 VR128X:$src)), + def : Pat<(X86any_vfpround (v2f64 VR128X:$src)), (VCVTPD2PSZ128rr VR128X:$src)>; def : Pat<(X86vmfpround (v2f64 VR128X:$src), (v4f32 VR128X:$src0), VK2WM:$mask), @@ -7696,7 +7679,7 @@ let Predicates = [HasVLX] in { VK2WM:$mask), (VCVTPD2PSZ128rrkz VK2WM:$mask, VR128X:$src)>; - def : Pat<(X86vfpround (loadv2f64 addr:$src)), + def : Pat<(X86any_vfpround (loadv2f64 addr:$src)), (VCVTPD2PSZ128rm addr:$src)>; def : Pat<(X86vmfpround (loadv2f64 addr:$src), (v4f32 VR128X:$src0), VK2WM:$mask), @@ -7705,7 +7688,7 @@ let Predicates = [HasVLX] in { VK2WM:$mask), (VCVTPD2PSZ128rmkz VK2WM:$mask, addr:$src)>; - def : Pat<(X86vfpround (v2f64 (X86VBroadcastld64 addr:$src))), + def : Pat<(X86any_vfpround (v2f64 (X86VBroadcastld64 addr:$src))), (VCVTPD2PSZ128rmb addr:$src)>; def : Pat<(X86vmfpround (v2f64 (X86VBroadcastld64 addr:$src)), (v4f32 VR128X:$src0), VK2WM:$mask), @@ -7716,6 +7699,7 @@ let Predicates = [HasVLX] in { } // Convert Signed/Unsigned Doubleword to Double +let Uses = []<Register>, mayRaiseFPException = 0 in multiclass avx512_cvtdq2pd<bits<8> opc, string OpcodeStr, SDNode OpNode, SDNode OpNode128, X86SchedWriteWidths sched> { // No rounding in this op @@ -8075,34 +8059,34 @@ multiclass avx512_cvtqq2ps<bits<8> opc, string OpcodeStr, SDNode OpNode, VK4WM:$mask, i64mem:$src), 0, "att">; } -defm VCVTDQ2PD : avx512_cvtdq2pd<0xE6, "vcvtdq2pd", sint_to_fp, X86VSintToFP, +defm VCVTDQ2PD : avx512_cvtdq2pd<0xE6, "vcvtdq2pd", any_sint_to_fp, X86any_VSintToFP, SchedWriteCvtDQ2PD>, XS, EVEX_CD8<32, CD8VH>; -defm VCVTDQ2PS : avx512_cvtdq2ps<0x5B, "vcvtdq2ps", sint_to_fp, +defm VCVTDQ2PS : avx512_cvtdq2ps<0x5B, "vcvtdq2ps", any_sint_to_fp, X86VSintToFpRnd, SchedWriteCvtDQ2PS>, PS, EVEX_CD8<32, CD8VF>; -defm VCVTTPS2DQ : avx512_cvttps2dq<0x5B, "vcvttps2dq", X86cvttp2si, +defm VCVTTPS2DQ : avx512_cvttps2dq<0x5B, "vcvttps2dq", X86any_cvttp2si, X86cvttp2siSAE, SchedWriteCvtPS2DQ>, XS, EVEX_CD8<32, CD8VF>; -defm VCVTTPD2DQ : avx512_cvttpd2dq<0xE6, "vcvttpd2dq", X86cvttp2si, +defm VCVTTPD2DQ : avx512_cvttpd2dq<0xE6, "vcvttpd2dq", X86any_cvttp2si, X86cvttp2siSAE, SchedWriteCvtPD2DQ>, PD, VEX_W, EVEX_CD8<64, CD8VF>; -defm VCVTTPS2UDQ : avx512_cvttps2dq<0x78, "vcvttps2udq", X86cvttp2ui, +defm VCVTTPS2UDQ : avx512_cvttps2dq<0x78, "vcvttps2udq", X86any_cvttp2ui, X86cvttp2uiSAE, SchedWriteCvtPS2DQ>, PS, EVEX_CD8<32, CD8VF>; -defm VCVTTPD2UDQ : avx512_cvttpd2dq<0x78, "vcvttpd2udq", X86cvttp2ui, +defm VCVTTPD2UDQ : avx512_cvttpd2dq<0x78, "vcvttpd2udq", X86any_cvttp2ui, X86cvttp2uiSAE, SchedWriteCvtPD2DQ>, PS, VEX_W, EVEX_CD8<64, CD8VF>; -defm VCVTUDQ2PD : avx512_cvtdq2pd<0x7A, "vcvtudq2pd", uint_to_fp, - X86VUintToFP, SchedWriteCvtDQ2PD>, XS, +defm VCVTUDQ2PD : avx512_cvtdq2pd<0x7A, "vcvtudq2pd", any_uint_to_fp, + X86any_VUintToFP, SchedWriteCvtDQ2PD>, XS, EVEX_CD8<32, CD8VH>; -defm VCVTUDQ2PS : avx512_cvtdq2ps<0x7A, "vcvtudq2ps", uint_to_fp, +defm VCVTUDQ2PS : avx512_cvtdq2ps<0x7A, "vcvtudq2ps", any_uint_to_fp, X86VUintToFpRnd, SchedWriteCvtDQ2PS>, XD, EVEX_CD8<32, CD8VF>; @@ -8138,35 +8122,35 @@ defm VCVTPS2UQQ : avx512_cvtps2qq<0x79, "vcvtps2uqq", X86cvtp2UInt, X86cvtp2UIntRnd, SchedWriteCvtPS2DQ>, PD, EVEX_CD8<32, CD8VH>; -defm VCVTTPD2QQ : avx512_cvttpd2qq<0x7A, "vcvttpd2qq", X86cvttp2si, +defm VCVTTPD2QQ : avx512_cvttpd2qq<0x7A, "vcvttpd2qq", X86any_cvttp2si, X86cvttp2siSAE, SchedWriteCvtPD2DQ>, VEX_W, PD, EVEX_CD8<64, CD8VF>; -defm VCVTTPS2QQ : avx512_cvttps2qq<0x7A, "vcvttps2qq", X86cvttp2si, +defm VCVTTPS2QQ : avx512_cvttps2qq<0x7A, "vcvttps2qq", X86any_cvttp2si, X86cvttp2siSAE, SchedWriteCvtPS2DQ>, PD, EVEX_CD8<32, CD8VH>; -defm VCVTTPD2UQQ : avx512_cvttpd2qq<0x78, "vcvttpd2uqq", X86cvttp2ui, +defm VCVTTPD2UQQ : avx512_cvttpd2qq<0x78, "vcvttpd2uqq", X86any_cvttp2ui, X86cvttp2uiSAE, SchedWriteCvtPD2DQ>, VEX_W, PD, EVEX_CD8<64, CD8VF>; -defm VCVTTPS2UQQ : avx512_cvttps2qq<0x78, "vcvttps2uqq", X86cvttp2ui, +defm VCVTTPS2UQQ : avx512_cvttps2qq<0x78, "vcvttps2uqq", X86any_cvttp2ui, X86cvttp2uiSAE, SchedWriteCvtPS2DQ>, PD, EVEX_CD8<32, CD8VH>; -defm VCVTQQ2PD : avx512_cvtqq2pd<0xE6, "vcvtqq2pd", sint_to_fp, +defm VCVTQQ2PD : avx512_cvtqq2pd<0xE6, "vcvtqq2pd", any_sint_to_fp, X86VSintToFpRnd, SchedWriteCvtDQ2PD>, VEX_W, XS, EVEX_CD8<64, CD8VF>; -defm VCVTUQQ2PD : avx512_cvtqq2pd<0x7A, "vcvtuqq2pd", uint_to_fp, +defm VCVTUQQ2PD : avx512_cvtqq2pd<0x7A, "vcvtuqq2pd", any_uint_to_fp, X86VUintToFpRnd, SchedWriteCvtDQ2PD>, VEX_W, XS, EVEX_CD8<64, CD8VF>; -defm VCVTQQ2PS : avx512_cvtqq2ps<0x5B, "vcvtqq2ps", sint_to_fp, +defm VCVTQQ2PS : avx512_cvtqq2ps<0x5B, "vcvtqq2ps", any_sint_to_fp, X86VSintToFpRnd, SchedWriteCvtDQ2PS>, VEX_W, PS, EVEX_CD8<64, CD8VF>; -defm VCVTUQQ2PS : avx512_cvtqq2ps<0x7A, "vcvtuqq2ps", uint_to_fp, +defm VCVTUQQ2PS : avx512_cvtqq2ps<0x7A, "vcvtuqq2ps", any_uint_to_fp, X86VUintToFpRnd, SchedWriteCvtDQ2PS>, VEX_W, XD, EVEX_CD8<64, CD8VF>; @@ -8202,7 +8186,7 @@ let Predicates = [HasVLX] in { // Special patterns to allow use of X86mcvttp2si for masking. Instruction // patterns have been disabled with null_frag. - def : Pat<(v4i32 (X86cvttp2si (v2f64 VR128X:$src))), + def : Pat<(v4i32 (X86any_cvttp2si (v2f64 VR128X:$src))), (VCVTTPD2DQZ128rr VR128X:$src)>; def : Pat<(X86mcvttp2si (v2f64 VR128X:$src), (v4i32 VR128X:$src0), VK2WM:$mask), @@ -8211,7 +8195,7 @@ let Predicates = [HasVLX] in { VK2WM:$mask), (VCVTTPD2DQZ128rrkz VK2WM:$mask, VR128X:$src)>; - def : Pat<(v4i32 (X86cvttp2si (loadv2f64 addr:$src))), + def : Pat<(v4i32 (X86any_cvttp2si (loadv2f64 addr:$src))), (VCVTTPD2DQZ128rm addr:$src)>; def : Pat<(X86mcvttp2si (loadv2f64 addr:$src), (v4i32 VR128X:$src0), VK2WM:$mask), @@ -8220,7 +8204,7 @@ let Predicates = [HasVLX] in { VK2WM:$mask), (VCVTTPD2DQZ128rmkz VK2WM:$mask, addr:$src)>; - def : Pat<(v4i32 (X86cvttp2si (v2f64 (X86VBroadcastld64 addr:$src)))), + def : Pat<(v4i32 (X86any_cvttp2si (v2f64 (X86VBroadcastld64 addr:$src)))), (VCVTTPD2DQZ128rmb addr:$src)>; def : Pat<(X86mcvttp2si (v2f64 (X86VBroadcastld64 addr:$src)), (v4i32 VR128X:$src0), VK2WM:$mask), @@ -8260,7 +8244,7 @@ let Predicates = [HasVLX] in { // Special patterns to allow use of X86mcvtp2UInt for masking. Instruction // patterns have been disabled with null_frag. - def : Pat<(v4i32 (X86cvttp2ui (v2f64 VR128X:$src))), + def : Pat<(v4i32 (X86any_cvttp2ui (v2f64 VR128X:$src))), (VCVTTPD2UDQZ128rr VR128X:$src)>; def : Pat<(X86mcvttp2ui (v2f64 VR128X:$src), (v4i32 VR128X:$src0), VK2WM:$mask), @@ -8269,7 +8253,7 @@ let Predicates = [HasVLX] in { VK2WM:$mask), (VCVTTPD2UDQZ128rrkz VK2WM:$mask, VR128X:$src)>; - def : Pat<(v4i32 (X86cvttp2ui (loadv2f64 addr:$src))), + def : Pat<(v4i32 (X86any_cvttp2ui (loadv2f64 addr:$src))), (VCVTTPD2UDQZ128rm addr:$src)>; def : Pat<(X86mcvttp2ui (loadv2f64 addr:$src), (v4i32 VR128X:$src0), VK2WM:$mask), @@ -8278,7 +8262,7 @@ let Predicates = [HasVLX] in { VK2WM:$mask), (VCVTTPD2UDQZ128rmkz VK2WM:$mask, addr:$src)>; - def : Pat<(v4i32 (X86cvttp2ui (v2f64 (X86VBroadcastld64 addr:$src)))), + def : Pat<(v4i32 (X86any_cvttp2ui (v2f64 (X86VBroadcastld64 addr:$src)))), (VCVTTPD2UDQZ128rmb addr:$src)>; def : Pat<(X86mcvttp2ui (v2f64 (X86VBroadcastld64 addr:$src)), (v4i32 VR128X:$src0), VK2WM:$mask), @@ -8311,7 +8295,7 @@ let Predicates = [HasDQI, HasVLX] in { v2i64x_info.ImmAllZerosV)), (VCVTPS2UQQZ128rmkz VK2WM:$mask, addr:$src)>; - def : Pat<(v2i64 (X86cvttp2si (bc_v4f32 (v2f64 (X86vzload64 addr:$src))))), + def : Pat<(v2i64 (X86any_cvttp2si (bc_v4f32 (v2f64 (X86vzload64 addr:$src))))), (VCVTTPS2QQZ128rm addr:$src)>; def : Pat<(v2i64 (vselect VK2WM:$mask, (X86cvttp2si (bc_v4f32 (v2f64 (X86vzload64 addr:$src)))), @@ -8322,7 +8306,7 @@ let Predicates = [HasDQI, HasVLX] in { v2i64x_info.ImmAllZerosV)), (VCVTTPS2QQZ128rmkz VK2WM:$mask, addr:$src)>; - def : Pat<(v2i64 (X86cvttp2ui (bc_v4f32 (v2f64 (X86vzload64 addr:$src))))), + def : Pat<(v2i64 (X86any_cvttp2ui (bc_v4f32 (v2f64 (X86vzload64 addr:$src))))), (VCVTTPS2UQQZ128rm addr:$src)>; def : Pat<(v2i64 (vselect VK2WM:$mask, (X86cvttp2ui (bc_v4f32 (v2f64 (X86vzload64 addr:$src)))), @@ -8334,63 +8318,26 @@ let Predicates = [HasDQI, HasVLX] in { (VCVTTPS2UQQZ128rmkz VK2WM:$mask, addr:$src)>; } -let Predicates = [HasAVX512, NoVLX] in { -def : Pat<(v8i32 (X86cvttp2ui (v8f32 VR256X:$src1))), - (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr - (v16f32 (INSERT_SUBREG (IMPLICIT_DEF), - VR256X:$src1, sub_ymm)))), sub_ymm)>; - -def : Pat<(v4i32 (X86cvttp2ui (v4f32 VR128X:$src1))), - (EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr - (v16f32 (INSERT_SUBREG (IMPLICIT_DEF), - VR128X:$src1, sub_xmm)))), sub_xmm)>; - -def : Pat<(v4i32 (X86cvttp2ui (v4f64 VR256X:$src1))), - (EXTRACT_SUBREG (v8i32 (VCVTTPD2UDQZrr - (v8f64 (INSERT_SUBREG (IMPLICIT_DEF), - VR256X:$src1, sub_ymm)))), sub_xmm)>; - -def : Pat<(v8f32 (uint_to_fp (v8i32 VR256X:$src1))), - (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr - (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), - VR256X:$src1, sub_ymm)))), sub_ymm)>; - -def : Pat<(v4f32 (uint_to_fp (v4i32 VR128X:$src1))), - (EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr - (v16i32 (INSERT_SUBREG (IMPLICIT_DEF), - VR128X:$src1, sub_xmm)))), sub_xmm)>; - -def : Pat<(v4f64 (uint_to_fp (v4i32 VR128X:$src1))), - (EXTRACT_SUBREG (v8f64 (VCVTUDQ2PDZrr - (v8i32 (INSERT_SUBREG (IMPLICIT_DEF), - VR128X:$src1, sub_xmm)))), sub_ymm)>; - -def : Pat<(v2f64 (X86VUintToFP (v4i32 VR128X:$src1))), - (EXTRACT_SUBREG (v8f64 (VCVTUDQ2PDZrr - (v8i32 (INSERT_SUBREG (IMPLICIT_DEF), - VR128X:$src1, sub_xmm)))), sub_xmm)>; -} - let Predicates = [HasVLX] in { - def : Pat<(v2f64 (X86VSintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src))))), + def : Pat<(v2f64 (X86any_VSintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src))))), (VCVTDQ2PDZ128rm addr:$src)>; def : Pat<(v2f64 (vselect VK2WM:$mask, - (X86VSintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src)))), + (X86any_VSintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src)))), VR128X:$src0)), (VCVTDQ2PDZ128rmk VR128X:$src0, VK2WM:$mask, addr:$src)>; def : Pat<(v2f64 (vselect VK2WM:$mask, - (X86VSintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src)))), + (X86any_VSintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src)))), v2f64x_info.ImmAllZerosV)), (VCVTDQ2PDZ128rmkz VK2WM:$mask, addr:$src)>; - def : Pat<(v2f64 (X86VUintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src))))), + def : Pat<(v2f64 (X86any_VUintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src))))), (VCVTUDQ2PDZ128rm addr:$src)>; def : Pat<(v2f64 (vselect VK2WM:$mask, - (X86VUintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src)))), + (X86any_VUintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src)))), VR128X:$src0)), (VCVTUDQ2PDZ128rmk VR128X:$src0, VK2WM:$mask, addr:$src)>; def : Pat<(v2f64 (vselect VK2WM:$mask, - (X86VUintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src)))), + (X86any_VUintToFP (bc_v4i32 (v2i64 (X86vzload64 addr:$src)))), v2f64x_info.ImmAllZerosV)), (VCVTUDQ2PDZ128rmkz VK2WM:$mask, addr:$src)>; } @@ -8398,7 +8345,7 @@ let Predicates = [HasVLX] in { let Predicates = [HasDQI, HasVLX] in { // Special patterns to allow use of X86VMSintToFP for masking. Instruction // patterns have been disabled with null_frag. - def : Pat<(v4f32 (X86VSintToFP (v2i64 VR128X:$src))), + def : Pat<(v4f32 (X86any_VSintToFP (v2i64 VR128X:$src))), (VCVTQQ2PSZ128rr VR128X:$src)>; def : Pat<(X86VMSintToFP (v2i64 VR128X:$src), (v4f32 VR128X:$src0), VK2WM:$mask), @@ -8407,7 +8354,7 @@ let Predicates = [HasDQI, HasVLX] in { VK2WM:$mask), (VCVTQQ2PSZ128rrkz VK2WM:$mask, VR128X:$src)>; - def : Pat<(v4f32 (X86VSintToFP (loadv2i64 addr:$src))), + def : Pat<(v4f32 (X86any_VSintToFP (loadv2i64 addr:$src))), (VCVTQQ2PSZ128rm addr:$src)>; def : Pat<(X86VMSintToFP (loadv2i64 addr:$src), (v4f32 VR128X:$src0), VK2WM:$mask), @@ -8416,7 +8363,7 @@ let Predicates = [HasDQI, HasVLX] in { VK2WM:$mask), (VCVTQQ2PSZ128rmkz VK2WM:$mask, addr:$src)>; - def : Pat<(v4f32 (X86VSintToFP (v2i64 (X86VBroadcastld64 addr:$src)))), + def : Pat<(v4f32 (X86any_VSintToFP (v2i64 (X86VBroadcastld64 addr:$src)))), (VCVTQQ2PSZ128rmb addr:$src)>; def : Pat<(X86VMSintToFP (v2i64 (X86VBroadcastld64 addr:$src)), (v4f32 VR128X:$src0), VK2WM:$mask), @@ -8427,7 +8374,7 @@ let Predicates = [HasDQI, HasVLX] in { // Special patterns to allow use of X86VMUintToFP for masking. Instruction // patterns have been disabled with null_frag. - def : Pat<(v4f32 (X86VUintToFP (v2i64 VR128X:$src))), + def : Pat<(v4f32 (X86any_VUintToFP (v2i64 VR128X:$src))), (VCVTUQQ2PSZ128rr VR128X:$src)>; def : Pat<(X86VMUintToFP (v2i64 VR128X:$src), (v4f32 VR128X:$src0), VK2WM:$mask), @@ -8436,7 +8383,7 @@ let Predicates = [HasDQI, HasVLX] in { VK2WM:$mask), (VCVTUQQ2PSZ128rrkz VK2WM:$mask, VR128X:$src)>; - def : Pat<(v4f32 (X86VUintToFP (loadv2i64 addr:$src))), + def : Pat<(v4f32 (X86any_VUintToFP (loadv2i64 addr:$src))), (VCVTUQQ2PSZ128rm addr:$src)>; def : Pat<(X86VMUintToFP (loadv2i64 addr:$src), (v4f32 VR128X:$src0), VK2WM:$mask), @@ -8445,7 +8392,7 @@ let Predicates = [HasDQI, HasVLX] in { VK2WM:$mask), (VCVTUQQ2PSZ128rmkz VK2WM:$mask, addr:$src)>; - def : Pat<(v4f32 (X86VUintToFP (v2i64 (X86VBroadcastld64 addr:$src)))), + def : Pat<(v4f32 (X86any_VUintToFP (v2i64 (X86VBroadcastld64 addr:$src)))), (VCVTUQQ2PSZ128rmb addr:$src)>; def : Pat<(X86VMUintToFP (v2i64 (X86VBroadcastld64 addr:$src)), (v4f32 VR128X:$src0), VK2WM:$mask), @@ -8455,72 +8402,11 @@ let Predicates = [HasDQI, HasVLX] in { (VCVTUQQ2PSZ128rmbkz VK2WM:$mask, addr:$src)>; } -let Predicates = [HasDQI, NoVLX] in { -def : Pat<(v2i64 (X86cvttp2si (v2f64 VR128X:$src1))), - (EXTRACT_SUBREG (v8i64 (VCVTTPD2QQZrr - (v8f64 (INSERT_SUBREG (IMPLICIT_DEF), - VR128X:$src1, sub_xmm)))), sub_xmm)>; - -def : Pat<(v4i64 (X86cvttp2si (v4f32 VR128X:$src1))), - (EXTRACT_SUBREG (v8i64 (VCVTTPS2QQZrr - (v8f32 (INSERT_SUBREG (IMPLICIT_DEF), - VR128X:$src1, sub_xmm)))), sub_ymm)>; - -def : Pat<(v4i64 (X86cvttp2si (v4f64 VR256X:$src1))), - (EXTRACT_SUBREG (v8i64 (VCVTTPD2QQZrr - (v8f64 (INSERT_SUBREG (IMPLICIT_DEF), - VR256X:$src1, sub_ymm)))), sub_ymm)>; - -def : Pat<(v2i64 (X86cvttp2ui (v2f64 VR128X:$src1))), - (EXTRACT_SUBREG (v8i64 (VCVTTPD2UQQZrr - (v8f64 (INSERT_SUBREG (IMPLICIT_DEF), - VR128X:$src1, sub_xmm)))), sub_xmm)>; - -def : Pat<(v4i64 (X86cvttp2ui (v4f32 VR128X:$src1))), - (EXTRACT_SUBREG (v8i64 (VCVTTPS2UQQZrr - (v8f32 (INSERT_SUBREG (IMPLICIT_DEF), - VR128X:$src1, sub_xmm)))), sub_ymm)>; - -def : Pat<(v4i64 (X86cvttp2ui (v4f64 VR256X:$src1))), - (EXTRACT_SUBREG (v8i64 (VCVTTPD2UQQZrr - (v8f64 (INSERT_SUBREG (IMPLICIT_DEF), - VR256X:$src1, sub_ymm)))), sub_ymm)>; - -def : Pat<(v4f32 (sint_to_fp (v4i64 VR256X:$src1))), - (EXTRACT_SUBREG (v8f32 (VCVTQQ2PSZrr - (v8i64 (INSERT_SUBREG (IMPLICIT_DEF), - VR256X:$src1, sub_ymm)))), sub_xmm)>; - -def : Pat<(v2f64 (sint_to_fp (v2i64 VR128X:$src1))), - (EXTRACT_SUBREG (v8f64 (VCVTQQ2PDZrr - (v8i64 (INSERT_SUBREG (IMPLICIT_DEF), - VR128X:$src1, sub_xmm)))), sub_xmm)>; - -def : Pat<(v4f64 (sint_to_fp (v4i64 VR256X:$src1))), - (EXTRACT_SUBREG (v8f64 (VCVTQQ2PDZrr - (v8i64 (INSERT_SUBREG (IMPLICIT_DEF), - VR256X:$src1, sub_ymm)))), sub_ymm)>; - -def : Pat<(v4f32 (uint_to_fp (v4i64 VR256X:$src1))), - (EXTRACT_SUBREG (v8f32 (VCVTUQQ2PSZrr - (v8i64 (INSERT_SUBREG (IMPLICIT_DEF), - VR256X:$src1, sub_ymm)))), sub_xmm)>; - -def : Pat<(v2f64 (uint_to_fp (v2i64 VR128X:$src1))), - (EXTRACT_SUBREG (v8f64 (VCVTUQQ2PDZrr - (v8i64 (INSERT_SUBREG (IMPLICIT_DEF), - VR128X:$src1, sub_xmm)))), sub_xmm)>; - -def : Pat<(v4f64 (uint_to_fp (v4i64 VR256X:$src1))), - (EXTRACT_SUBREG (v8f64 (VCVTUQQ2PDZrr - (v8i64 (INSERT_SUBREG (IMPLICIT_DEF), - VR256X:$src1, sub_ymm)))), sub_ymm)>; -} - //===----------------------------------------------------------------------===// // Half precision conversion instructions //===----------------------------------------------------------------------===// +let Uses = [MXCSR], mayRaiseFPException = 1 in multiclass avx512_cvtph2ps<X86VectorVTInfo _dest, X86VectorVTInfo _src, X86MemOperand x86memop, PatFrag ld_frag, X86FoldableSchedWrite sched> { @@ -8537,6 +8423,7 @@ multiclass avx512_cvtph2ps<X86VectorVTInfo _dest, X86VectorVTInfo _src, multiclass avx512_cvtph2ps_sae<X86VectorVTInfo _dest, X86VectorVTInfo _src, X86FoldableSchedWrite sched> { + let Uses = [MXCSR] in defm rrb : AVX512_maskable<0x13, MRMSrcReg, _dest, (outs _dest.RC:$dst), (ins _src.RC:$src), "vcvtph2ps", "{sae}, $src", "$src, {sae}", @@ -8568,7 +8455,7 @@ let Predicates = [HasVLX] in { multiclass avx512_cvtps2ph<X86VectorVTInfo _dest, X86VectorVTInfo _src, X86MemOperand x86memop, SchedWrite RR, SchedWrite MR> { -let ExeDomain = GenericDomain in { +let ExeDomain = GenericDomain, Uses = [MXCSR], mayRaiseFPException = 1 in { def rr : AVX512AIi8<0x1D, MRMDestReg, (outs _dest.RC:$dst), (ins _src.RC:$src1, i32u8imm:$src2), "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", @@ -8605,7 +8492,7 @@ let ExeDomain = GenericDomain in { multiclass avx512_cvtps2ph_sae<X86VectorVTInfo _dest, X86VectorVTInfo _src, SchedWrite Sched> { - let hasSideEffects = 0 in + let hasSideEffects = 0, Uses = [MXCSR] in defm rrb : AVX512_maskable_in_asm<0x1D, MRMDestReg, _dest, (outs _dest.RC:$dst), (ins _src.RC:$src1, i32u8imm:$src2), @@ -8664,52 +8551,51 @@ let Predicates = [HasVLX] in { // Unordered/Ordered scalar fp compare with Sae and set EFLAGS multiclass avx512_ord_cmp_sae<bits<8> opc, X86VectorVTInfo _, - string OpcodeStr, X86FoldableSchedWrite sched> { - let hasSideEffects = 0 in + string OpcodeStr, Domain d, + X86FoldableSchedWrite sched = WriteFCom> { + let hasSideEffects = 0, Uses = [MXCSR] in def rrb: AVX512<opc, MRMSrcReg, (outs), (ins _.RC:$src1, _.RC:$src2), !strconcat(OpcodeStr, "\t{{sae}, $src2, $src1|$src1, $src2, {sae}}"), []>, EVEX, EVEX_B, VEX_LIG, EVEX_V128, Sched<[sched]>; } let Defs = [EFLAGS], Predicates = [HasAVX512] in { - defm VUCOMISSZ : avx512_ord_cmp_sae<0x2E, v4f32x_info, "vucomiss", WriteFCom>, + defm VUCOMISSZ : avx512_ord_cmp_sae<0x2E, v4f32x_info, "vucomiss", SSEPackedSingle>, AVX512PSIi8Base, EVEX_CD8<32, CD8VT1>; - defm VUCOMISDZ : avx512_ord_cmp_sae<0x2E, v2f64x_info, "vucomisd", WriteFCom>, + defm VUCOMISDZ : avx512_ord_cmp_sae<0x2E, v2f64x_info, "vucomisd", SSEPackedDouble>, AVX512PDIi8Base, VEX_W, EVEX_CD8<64, CD8VT1>; - defm VCOMISSZ : avx512_ord_cmp_sae<0x2F, v4f32x_info, "vcomiss", WriteFCom>, + defm VCOMISSZ : avx512_ord_cmp_sae<0x2F, v4f32x_info, "vcomiss", SSEPackedSingle>, AVX512PSIi8Base, EVEX_CD8<32, CD8VT1>; - defm VCOMISDZ : avx512_ord_cmp_sae<0x2F, v2f64x_info, "vcomisd", WriteFCom>, + defm VCOMISDZ : avx512_ord_cmp_sae<0x2F, v2f64x_info, "vcomisd", SSEPackedDouble>, AVX512PDIi8Base, VEX_W, EVEX_CD8<64, CD8VT1>; } let Defs = [EFLAGS], Predicates = [HasAVX512] in { - defm VUCOMISSZ : sse12_ord_cmp<0x2E, FR32X, X86cmp, f32, f32mem, loadf32, - "ucomiss", WriteFCom>, PS, EVEX, VEX_LIG, + defm VUCOMISSZ : sse12_ord_cmp<0x2E, FR32X, X86any_fcmp, f32, f32mem, loadf32, + "ucomiss", SSEPackedSingle>, PS, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>; - defm VUCOMISDZ : sse12_ord_cmp<0x2E, FR64X, X86cmp, f64, f64mem, loadf64, - "ucomisd", WriteFCom>, PD, EVEX, + defm VUCOMISDZ : sse12_ord_cmp<0x2E, FR64X, X86any_fcmp, f64, f64mem, loadf64, + "ucomisd", SSEPackedDouble>, PD, EVEX, + VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>; + defm VCOMISSZ : sse12_ord_cmp<0x2F, FR32X, X86strict_fcmps, f32, f32mem, loadf32, + "comiss", SSEPackedSingle>, PS, EVEX, VEX_LIG, + EVEX_CD8<32, CD8VT1>; + defm VCOMISDZ : sse12_ord_cmp<0x2F, FR64X, X86strict_fcmps, f64, f64mem, loadf64, + "comisd", SSEPackedDouble>, PD, EVEX, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>; - let Pattern = []<dag> in { - defm VCOMISSZ : sse12_ord_cmp<0x2F, FR32X, undef, f32, f32mem, loadf32, - "comiss", WriteFCom>, PS, EVEX, VEX_LIG, - EVEX_CD8<32, CD8VT1>; - defm VCOMISDZ : sse12_ord_cmp<0x2F, FR64X, undef, f64, f64mem, loadf64, - "comisd", WriteFCom>, PD, EVEX, - VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>; - } let isCodeGenOnly = 1 in { defm VUCOMISSZ : sse12_ord_cmp_int<0x2E, VR128X, X86ucomi, v4f32, ssmem, - sse_load_f32, "ucomiss", WriteFCom>, PS, EVEX, VEX_LIG, + sse_load_f32, "ucomiss", SSEPackedSingle>, PS, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>; defm VUCOMISDZ : sse12_ord_cmp_int<0x2E, VR128X, X86ucomi, v2f64, sdmem, - sse_load_f64, "ucomisd", WriteFCom>, PD, EVEX, + sse_load_f64, "ucomisd", SSEPackedDouble>, PD, EVEX, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>; defm VCOMISSZ : sse12_ord_cmp_int<0x2F, VR128X, X86comi, v4f32, ssmem, - sse_load_f32, "comiss", WriteFCom>, PS, EVEX, VEX_LIG, + sse_load_f32, "comiss", SSEPackedSingle>, PS, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>; defm VCOMISDZ : sse12_ord_cmp_int<0x2F, VR128X, X86comi, v2f64, sdmem, - sse_load_f64, "comisd", WriteFCom>, PD, EVEX, + sse_load_f64, "comisd", SSEPackedDouble>, PD, EVEX, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>; } } @@ -8717,7 +8603,7 @@ let Defs = [EFLAGS], Predicates = [HasAVX512] in { /// avx512_fp14_s rcp14ss, rcp14sd, rsqrt14ss, rsqrt14sd multiclass avx512_fp14_s<bits<8> opc, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo _> { - let Predicates = [HasAVX512], ExeDomain = _.ExeDomain in { + let Predicates = [HasAVX512], ExeDomain = _.ExeDomain, Uses = [MXCSR] in { defm rr : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, _.RC:$src2), OpcodeStr, "$src2, $src1", "$src1, $src2", @@ -8767,6 +8653,7 @@ multiclass avx512_fp14_p<bits<8> opc, string OpcodeStr, SDNode OpNode, } } +let Uses = [MXCSR] in multiclass avx512_fp14_p_vl_all<bits<8> opc, string OpcodeStr, SDNode OpNode, X86SchedWriteWidths sched> { defm PSZ : avx512_fp14_p<opc, !strconcat(OpcodeStr, "ps"), OpNode, sched.ZMM, @@ -8798,12 +8685,12 @@ defm VRCP14 : avx512_fp14_p_vl_all<0x4C, "vrcp14", X86rcp14, SchedWriteFRcp>; multiclass avx512_fp28_s<bits<8> opc, string OpcodeStr,X86VectorVTInfo _, SDNode OpNode, SDNode OpNodeSAE, X86FoldableSchedWrite sched> { - let ExeDomain = _.ExeDomain in { + let ExeDomain = _.ExeDomain, Uses = [MXCSR] in { defm r : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, _.RC:$src2), OpcodeStr, "$src2, $src1", "$src1, $src2", (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))>, - Sched<[sched]>; + Sched<[sched]>, SIMD_EXC; defm rb : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, _.RC:$src2), OpcodeStr, @@ -8815,7 +8702,7 @@ multiclass avx512_fp28_s<bits<8> opc, string OpcodeStr,X86VectorVTInfo _, (ins _.RC:$src1, _.IntScalarMemOp:$src2), OpcodeStr, "$src2, $src1", "$src1, $src2", (OpNode (_.VT _.RC:$src1), _.ScalarIntMemCPat:$src2)>, - Sched<[sched.Folded, sched.ReadAfterFold]>; + Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC; } } @@ -8840,7 +8727,7 @@ defm VGETEXP : avx512_eri_s<0x43, "vgetexp", X86fgetexps, X86fgetexpSAEs, multiclass avx512_fp28_p<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, SDNode OpNode, X86FoldableSchedWrite sched> { - let ExeDomain = _.ExeDomain in { + let ExeDomain = _.ExeDomain, Uses = [MXCSR], mayRaiseFPException = 1 in { defm r : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src), OpcodeStr, "$src", "$src", (OpNode (_.VT _.RC:$src))>, @@ -8862,7 +8749,7 @@ multiclass avx512_fp28_p<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, } multiclass avx512_fp28_p_sae<bits<8> opc, string OpcodeStr, X86VectorVTInfo _, SDNode OpNode, X86FoldableSchedWrite sched> { - let ExeDomain = _.ExeDomain in + let ExeDomain = _.ExeDomain, Uses = [MXCSR] in defm rb : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src), OpcodeStr, "{sae}, $src", "$src, {sae}", @@ -8923,25 +8810,26 @@ multiclass avx512_sqrt_packed_round<bits<8> opc, string OpcodeStr, multiclass avx512_sqrt_packed<bits<8> opc, string OpcodeStr, X86FoldableSchedWrite sched, X86VectorVTInfo _>{ - let ExeDomain = _.ExeDomain in { + let ExeDomain = _.ExeDomain, Uses = [MXCSR], mayRaiseFPException = 1 in { defm r: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src), OpcodeStr, "$src", "$src", - (_.VT (fsqrt _.RC:$src))>, EVEX, + (_.VT (any_fsqrt _.RC:$src))>, EVEX, Sched<[sched]>; defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.MemOp:$src), OpcodeStr, "$src", "$src", - (fsqrt (_.VT + (any_fsqrt (_.VT (bitconvert (_.LdFrag addr:$src))))>, EVEX, Sched<[sched.Folded, sched.ReadAfterFold]>; defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.ScalarMemOp:$src), OpcodeStr, "${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr, - (fsqrt (_.VT + (any_fsqrt (_.VT (_.BroadcastLdFrag addr:$src)))>, EVEX, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; } } +let Uses = [MXCSR], mayRaiseFPException = 1 in multiclass avx512_sqrt_packed_all<bits<8> opc, string OpcodeStr, X86SchedWriteSizes sched> { defm PSZ : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"), @@ -8967,6 +8855,7 @@ multiclass avx512_sqrt_packed_all<bits<8> opc, string OpcodeStr, } } +let Uses = [MXCSR] in multiclass avx512_sqrt_packed_all_round<bits<8> opc, string OpcodeStr, X86SchedWriteSizes sched> { defm PSZ : avx512_sqrt_packed_round<opc, !strconcat(OpcodeStr, "ps"), @@ -8985,13 +8874,14 @@ multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr, X86FoldableSchedWri "$src2, $src1", "$src1, $src2", (X86fsqrts (_.VT _.RC:$src1), (_.VT _.RC:$src2))>, - Sched<[sched]>; + Sched<[sched]>, SIMD_EXC; defm m_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src1, _.IntScalarMemOp:$src2), OpcodeStr, "$src2, $src1", "$src1, $src2", (X86fsqrts (_.VT _.RC:$src1), _.ScalarIntMemCPat:$src2)>, - Sched<[sched.Folded, sched.ReadAfterFold]>; + Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC; + let Uses = [MXCSR] in defm rb_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, _.RC:$src2, AVX512RC:$rc), OpcodeStr, "$rc, $src2, $src1", "$src1, $src2, $rc", @@ -9004,23 +8894,23 @@ multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr, X86FoldableSchedWri def r : I<opc, MRMSrcReg, (outs _.FRC:$dst), (ins _.FRC:$src1, _.FRC:$src2), OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, - Sched<[sched]>; + Sched<[sched]>, SIMD_EXC; let mayLoad = 1 in def m : I<opc, MRMSrcMem, (outs _.FRC:$dst), (ins _.FRC:$src1, _.ScalarMemOp:$src2), OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, - Sched<[sched.Folded, sched.ReadAfterFold]>; + Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC; } } let Predicates = [HasAVX512] in { - def : Pat<(_.EltVT (fsqrt _.FRC:$src)), + def : Pat<(_.EltVT (any_fsqrt _.FRC:$src)), (!cast<Instruction>(Name#Zr) (_.EltVT (IMPLICIT_DEF)), _.FRC:$src)>; } let Predicates = [HasAVX512, OptForSize] in { - def : Pat<(_.EltVT (fsqrt (load addr:$src))), + def : Pat<(_.EltVT (any_fsqrt (load addr:$src))), (!cast<Instruction>(Name#Zm) (_.EltVT (IMPLICIT_DEF)), addr:$src)>; } @@ -9047,8 +8937,9 @@ multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3", (_.VT (X86RndScales (_.VT _.RC:$src1), (_.VT _.RC:$src2), (i32 timm:$src3)))>, - Sched<[sched]>; + Sched<[sched]>, SIMD_EXC; + let Uses = [MXCSR] in defm rb_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3), OpcodeStr, "$src3, {sae}, $src2, $src1", "$src1, $src2, {sae}, $src3", @@ -9062,30 +8953,30 @@ multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3", (_.VT (X86RndScales _.RC:$src1, _.ScalarIntMemCPat:$src2, (i32 timm:$src3)))>, - Sched<[sched.Folded, sched.ReadAfterFold]>; + Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC; let isCodeGenOnly = 1, hasSideEffects = 0, Predicates = [HasAVX512] in { def r : I<opc, MRMSrcReg, (outs _.FRC:$dst), (ins _.FRC:$src1, _.FRC:$src2, i32u8imm:$src3), OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", - []>, Sched<[sched]>; + []>, Sched<[sched]>, SIMD_EXC; let mayLoad = 1 in def m : I<opc, MRMSrcMem, (outs _.FRC:$dst), (ins _.FRC:$src1, _.ScalarMemOp:$src2, i32u8imm:$src3), OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", - []>, Sched<[sched.Folded, sched.ReadAfterFold]>; + []>, Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC; } } let Predicates = [HasAVX512] in { - def : Pat<(X86VRndScale _.FRC:$src1, timm:$src2), + def : Pat<(X86any_VRndScale _.FRC:$src1, timm:$src2), (_.EltVT (!cast<Instruction>(NAME##r) (_.EltVT (IMPLICIT_DEF)), _.FRC:$src1, timm:$src2))>; } let Predicates = [HasAVX512, OptForSize] in { - def : Pat<(X86VRndScale (_.ScalarLdFrag addr:$src1), timm:$src2), + def : Pat<(X86any_VRndScale (_.ScalarLdFrag addr:$src1), timm:$src2), (_.EltVT (!cast<Instruction>(NAME##m) (_.EltVT (IMPLICIT_DEF)), addr:$src1, timm:$src2))>; } @@ -9681,7 +9572,7 @@ defm : AVX512_pmovx_patterns<"VPMOVSX", sext, sext_invec>; defm : AVX512_pmovx_patterns<"VPMOVZX", zext, zext_invec>; // Without BWI we can't do a trunc from v16i16 to v16i8. DAG combine can merge -// ext+trunc aggresively making it impossible to legalize the DAG to this +// ext+trunc aggressively making it impossible to legalize the DAG to this // pattern directly. let Predicates = [HasAVX512, NoBWI] in { def: Pat<(v16i8 (trunc (v16i16 VR256X:$src))), @@ -10101,7 +9992,7 @@ defm VEXPANDPD : expand_by_elt_width <0x88, "vexpandpd", WriteVarShuffle256, //all instruction created with FROUND_CURRENT multiclass avx512_unary_fp_packed_imm<bits<8> opc, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo _> { - let ExeDomain = _.ExeDomain in { + let ExeDomain = _.ExeDomain, Uses = [MXCSR], mayRaiseFPException = 1 in { defm rri : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, i32u8imm:$src2), OpcodeStr##_.Suffix, "$src2, $src1", "$src1, $src2", @@ -10127,7 +10018,7 @@ multiclass avx512_unary_fp_packed_imm<bits<8> opc, string OpcodeStr, SDNode OpNo multiclass avx512_unary_fp_sae_packed_imm<bits<8> opc, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo _> { - let ExeDomain = _.ExeDomain in + let ExeDomain = _.ExeDomain, Uses = [MXCSR] in defm rrib : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, i32u8imm:$src2), OpcodeStr##_.Suffix, "$src2, {sae}, $src1", @@ -10160,7 +10051,7 @@ multiclass avx512_common_unary_fp_sae_packed_imm<string OpcodeStr, //all instruction created with FROUND_CURRENT multiclass avx512_fp_packed_imm<bits<8> opc, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo _>{ - let ExeDomain = _.ExeDomain in { + let ExeDomain = _.ExeDomain, Uses = [MXCSR], mayRaiseFPException = 1 in { defm rri : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3), OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3", @@ -10232,7 +10123,7 @@ multiclass avx512_3Op_imm8<bits<8> opc, string OpcodeStr, SDNode OpNode, // op(reg_vec2,mem_scalar,imm) multiclass avx512_fp_scalar_imm<bits<8> opc, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo _> { - let ExeDomain = _.ExeDomain in { + let ExeDomain = _.ExeDomain, Uses = [MXCSR], mayRaiseFPException = 1 in { defm rri : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3), OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3", @@ -10254,7 +10145,7 @@ multiclass avx512_fp_scalar_imm<bits<8> opc, string OpcodeStr, SDNode OpNode, multiclass avx512_fp_sae_packed_imm<bits<8> opc, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo _> { - let ExeDomain = _.ExeDomain in + let ExeDomain = _.ExeDomain, Uses = [MXCSR] in defm rrib : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3), OpcodeStr, "$src3, {sae}, $src2, $src1", @@ -10268,7 +10159,7 @@ multiclass avx512_fp_sae_packed_imm<bits<8> opc, string OpcodeStr, //handle scalar instruction reg_vec1 = op(reg_vec2,reg_vec3,imm),{sae} multiclass avx512_fp_sae_scalar_imm<bits<8> opc, string OpcodeStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo _> { - let ExeDomain = _.ExeDomain in + let ExeDomain = _.ExeDomain, Uses = [MXCSR] in defm NAME#rrib : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3), OpcodeStr, "$src3, {sae}, $src2, $src1", @@ -10350,7 +10241,7 @@ defm VREDUCE : avx512_common_unary_fp_sae_packed_imm_all<"vreduce", 0x56, 0x56 X86VReduce, X86VReduceSAE, SchedWriteFRnd, HasDQI>, AVX512AIi8Base, EVEX; defm VRNDSCALE : avx512_common_unary_fp_sae_packed_imm_all<"vrndscale", 0x08, 0x09, - X86VRndScale, X86VRndScaleSAE, SchedWriteFRnd, HasAVX512>, + X86any_VRndScale, X86VRndScaleSAE, SchedWriteFRnd, HasAVX512>, AVX512AIi8Base, EVEX; defm VGETMANT : avx512_common_unary_fp_sae_packed_imm_all<"vgetmant", 0x26, 0x26, X86VGetMant, X86VGetMantSAE, SchedWriteFRnd, HasAVX512>, @@ -10892,10 +10783,12 @@ def : Pat<(vselect (v2i1 VK2WM:$mask), (v2f64 (X86VBroadcast (v2f64 (simple_load // AVX-512 - Unpack Instructions //===----------------------------------------------------------------------===// +let Uses = []<Register>, mayRaiseFPException = 0 in { defm VUNPCKH : avx512_fp_binop_p<0x15, "vunpckh", X86Unpckh, HasAVX512, SchedWriteFShuffleSizes, 0, 1>; defm VUNPCKL : avx512_fp_binop_p<0x14, "vunpckl", X86Unpckl, HasAVX512, SchedWriteFShuffleSizes>; +} defm VPUNPCKLBW : avx512_binop_rm_vl_b<0x60, "vpunpcklbw", X86Unpckl, SchedWriteShuffle, HasBWI>; @@ -11587,7 +11480,8 @@ let Predicates = [HasVLX] in { multiclass avx512_fixupimm_packed<bits<8> opc, string OpcodeStr, X86FoldableSchedWrite sched, X86VectorVTInfo _, X86VectorVTInfo TblVT>{ - let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in { + let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain, + Uses = [MXCSR], mayRaiseFPException = 1 in { defm rri : AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src2, _.RC:$src3, i32u8imm:$src4), OpcodeStr##_.Suffix, "$src4, $src3, $src2", "$src2, $src3, $src4", @@ -11619,7 +11513,7 @@ multiclass avx512_fixupimm_packed_sae<bits<8> opc, string OpcodeStr, X86FoldableSchedWrite sched, X86VectorVTInfo _, X86VectorVTInfo TblVT> : avx512_fixupimm_packed<opc, OpcodeStr, sched, _, TblVT> { -let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in { +let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain, Uses = [MXCSR] in { defm rrib : AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src2, _.RC:$src3, i32u8imm:$src4), OpcodeStr##_.Suffix, "$src4, {sae}, $src3, $src2", @@ -11643,7 +11537,8 @@ multiclass avx512_fixupimm_scalar<bits<8> opc, string OpcodeStr, (X86VFixupimms (_.VT _.RC:$src1), (_.VT _.RC:$src2), (_src3VT.VT _src3VT.RC:$src3), - (i32 timm:$src4))>, Sched<[sched]>; + (i32 timm:$src4))>, Sched<[sched]>, SIMD_EXC; + let Uses = [MXCSR] in defm rrib : AVX512_maskable_3src_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src2, _.RC:$src3, i32u8imm:$src4), OpcodeStr##_.Suffix, "$src4, {sae}, $src3, $src2", @@ -11661,7 +11556,7 @@ multiclass avx512_fixupimm_scalar<bits<8> opc, string OpcodeStr, (_src3VT.VT (scalar_to_vector (_src3VT.ScalarLdFrag addr:$src3))), (i32 timm:$src4))>, - Sched<[sched.Folded, sched.ReadAfterFold]>; + Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC; } } @@ -11978,6 +11873,7 @@ let Constraints = "$src1 = $dst" in multiclass VNNI_rmb<bits<8> Op, string OpStr, SDNode OpNode, X86FoldableSchedWrite sched, X86VectorVTInfo VTI, bit IsCommutable> { + let ExeDomain = VTI.ExeDomain in { defm r : AVX512_maskable_3src<Op, MRMSrcReg, VTI, (outs VTI.RC:$dst), (ins VTI.RC:$src2, VTI.RC:$src3), OpStr, "$src3, $src2", "$src2, $src3", @@ -12000,6 +11896,7 @@ multiclass VNNI_rmb<bits<8> Op, string OpStr, SDNode OpNode, (VTI.VT (VTI.BroadcastLdFrag addr:$src3)))>, EVEX_4V, EVEX_CD8<32, CD8VF>, EVEX_B, T8PD, Sched<[sched.Folded, sched.ReadAfterFold]>; + } } multiclass VNNI_common<bits<8> Op, string OpStr, SDNode OpNode, @@ -12164,7 +12061,7 @@ defm VGF2P8AFFINEQB : GF2P8AFFINE_avx512_common<0xCE, "vgf2p8affineqb", //===----------------------------------------------------------------------===// let hasSideEffects = 0, mayLoad = 1, ExeDomain = SSEPackedSingle, - Constraints = "$src1 = $dst" in { + Constraints = "$src1 = $dst", Uses = [MXCSR], mayRaiseFPException = 1 in { defm V4FMADDPSrm : AVX512_maskable_3src_in_asm<0x9A, MRMSrcMem, v16f32_info, (outs VR512:$dst), (ins VR512:$src2, f128mem:$src3), "v4fmaddps", "$src3, $src2", "$src2, $src3", @@ -12210,9 +12107,9 @@ defm VP4DPWSSDSrm : AVX512_maskable_3src_in_asm<0x53, MRMSrcMem, v16i32_info, } let hasSideEffects = 0 in { - let mayStore = 1 in + let mayStore = 1, SchedRW = [WriteFStoreX] in def MASKPAIR16STORE : PseudoI<(outs), (ins anymem:$dst, VK16PAIR:$src), []>; - let mayLoad = 1 in + let mayLoad = 1, SchedRW = [WriteFLoadX] in def MASKPAIR16LOAD : PseudoI<(outs VK16PAIR:$dst), (ins anymem:$src), []>; } @@ -12220,7 +12117,7 @@ let hasSideEffects = 0 in { // VP2INTERSECT //===----------------------------------------------------------------------===// -multiclass avx512_vp2intersect_modes<X86VectorVTInfo _> { +multiclass avx512_vp2intersect_modes<X86FoldableSchedWrite sched, X86VectorVTInfo _> { def rr : I<0x68, MRMSrcReg, (outs _.KRPC:$dst), (ins _.RC:$src1, _.RC:$src2), @@ -12228,7 +12125,7 @@ multiclass avx512_vp2intersect_modes<X86VectorVTInfo _> { "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), [(set _.KRPC:$dst, (X86vp2intersect _.RC:$src1, (_.VT _.RC:$src2)))]>, - EVEX_4V, T8XD; + EVEX_4V, T8XD, Sched<[sched]>; def rm : I<0x68, MRMSrcMem, (outs _.KRPC:$dst), @@ -12237,7 +12134,8 @@ multiclass avx512_vp2intersect_modes<X86VectorVTInfo _> { "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), [(set _.KRPC:$dst, (X86vp2intersect _.RC:$src1, (_.VT (bitconvert (_.LdFrag addr:$src2)))))]>, - EVEX_4V, T8XD, EVEX_CD8<_.EltSize, CD8VF>; + EVEX_4V, T8XD, EVEX_CD8<_.EltSize, CD8VF>, + Sched<[sched.Folded, sched.ReadAfterFold]>; def rmb : I<0x68, MRMSrcMem, (outs _.KRPC:$dst), @@ -12246,21 +12144,22 @@ multiclass avx512_vp2intersect_modes<X86VectorVTInfo _> { ", $src1, $dst|$dst, $src1, ${src2}", _.BroadcastStr ,"}"), [(set _.KRPC:$dst, (X86vp2intersect _.RC:$src1, (_.VT (_.BroadcastLdFrag addr:$src2))))]>, - EVEX_4V, T8XD, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>; + EVEX_4V, T8XD, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>, + Sched<[sched.Folded, sched.ReadAfterFold]>; } -multiclass avx512_vp2intersect<AVX512VLVectorVTInfo _> { +multiclass avx512_vp2intersect<X86SchedWriteWidths sched, AVX512VLVectorVTInfo _> { let Predicates = [HasAVX512, HasVP2INTERSECT] in - defm Z : avx512_vp2intersect_modes<_.info512>, EVEX_V512; + defm Z : avx512_vp2intersect_modes<sched.ZMM, _.info512>, EVEX_V512; let Predicates = [HasAVX512, HasVP2INTERSECT, HasVLX] in { - defm Z256 : avx512_vp2intersect_modes<_.info256>, EVEX_V256; - defm Z128 : avx512_vp2intersect_modes<_.info128>, EVEX_V128; + defm Z256 : avx512_vp2intersect_modes<sched.YMM, _.info256>, EVEX_V256; + defm Z128 : avx512_vp2intersect_modes<sched.XMM, _.info128>, EVEX_V128; } } -defm VP2INTERSECTD : avx512_vp2intersect<avx512vl_i32_info>; -defm VP2INTERSECTQ : avx512_vp2intersect<avx512vl_i64_info>, VEX_W; +defm VP2INTERSECTD : avx512_vp2intersect<SchedWriteVecALU, avx512vl_i32_info>; +defm VP2INTERSECTQ : avx512_vp2intersect<SchedWriteVecALU, avx512vl_i64_info>, VEX_W; multiclass avx512_binop_all2<bits<8> opc, string OpcodeStr, X86SchedWriteWidths sched, @@ -12293,17 +12192,19 @@ defm VCVTNE2PS2BF16 : avx512_binop_all2<0x72, "vcvtne2ps2bf16", // Truncate Float to BFloat16 multiclass avx512_cvtps2bf16<bits<8> opc, string OpcodeStr, X86SchedWriteWidths sched> { - let Predicates = [HasBF16] in { + let Predicates = [HasBF16], Uses = []<Register>, mayRaiseFPException = 0 in { defm Z : avx512_vcvt_fp<opc, OpcodeStr, v16i16x_info, v16f32_info, X86cvtneps2bf16, sched.ZMM>, EVEX_V512; } let Predicates = [HasBF16, HasVLX] in { + let Uses = []<Register>, mayRaiseFPException = 0 in { defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v8i16x_info, v4f32x_info, null_frag, sched.XMM, "{1to4}", "{x}", f128mem, VK4WM>, EVEX_V128; defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v8i16x_info, v8f32x_info, X86cvtneps2bf16, sched.YMM, "{1to8}", "{y}">, EVEX_V256; + } def : InstAlias<OpcodeStr##"x\t{$src, $dst|$dst, $src}", (!cast<Instruction>(NAME # "Z128rr") VR128X:$dst, @@ -12358,19 +12259,21 @@ let Predicates = [HasBF16, HasVLX] in { let Constraints = "$src1 = $dst" in { multiclass avx512_dpbf16ps_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, + X86FoldableSchedWrite sched, X86VectorVTInfo _, X86VectorVTInfo src_v> { defm r: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst), (ins _.RC:$src2, _.RC:$src3), OpcodeStr, "$src3, $src2", "$src2, $src3", (_.VT (OpNode _.RC:$src1, _.RC:$src2, _.RC:$src3))>, - EVEX_4V; + EVEX_4V, Sched<[sched]>; defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src2, _.MemOp:$src3), OpcodeStr, "$src3, $src2", "$src2, $src3", (_.VT (OpNode _.RC:$src1, _.RC:$src2, (src_v.VT (bitconvert - (src_v.LdFrag addr:$src3)))))>, EVEX_4V; + (src_v.LdFrag addr:$src3)))))>, EVEX_4V, + Sched<[sched.Folded, sched.ReadAfterFold]>; defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst), (ins _.RC:$src2, _.ScalarMemOp:$src3), @@ -12379,26 +12282,26 @@ multiclass avx512_dpbf16ps_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, !strconcat("$src2, ${src3}", _.BroadcastStr), (_.VT (OpNode _.RC:$src1, _.RC:$src2, (src_v.VT (src_v.BroadcastLdFrag addr:$src3))))>, - EVEX_B, EVEX_4V; + EVEX_B, EVEX_4V, Sched<[sched.Folded, sched.ReadAfterFold]>; } } // Constraints = "$src1 = $dst" multiclass avx512_dpbf16ps_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode, - AVX512VLVectorVTInfo _, + X86SchedWriteWidths sched, AVX512VLVectorVTInfo _, AVX512VLVectorVTInfo src_v, Predicate prd> { let Predicates = [prd] in { - defm Z : avx512_dpbf16ps_rm<opc, OpcodeStr, OpNode, _.info512, + defm Z : avx512_dpbf16ps_rm<opc, OpcodeStr, OpNode, sched.ZMM, _.info512, src_v.info512>, EVEX_V512; } let Predicates = [HasVLX, prd] in { - defm Z256 : avx512_dpbf16ps_rm<opc, OpcodeStr, OpNode, _.info256, + defm Z256 : avx512_dpbf16ps_rm<opc, OpcodeStr, OpNode, sched.YMM, _.info256, src_v.info256>, EVEX_V256; - defm Z128 : avx512_dpbf16ps_rm<opc, OpcodeStr, OpNode, _.info128, + defm Z128 : avx512_dpbf16ps_rm<opc, OpcodeStr, OpNode, sched.XMM, _.info128, src_v.info128>, EVEX_V128; } } -defm VDPBF16PS : avx512_dpbf16ps_sizes<0x52, "vdpbf16ps", X86dpbf16ps, +defm VDPBF16PS : avx512_dpbf16ps_sizes<0x52, "vdpbf16ps", X86dpbf16ps, SchedWriteFMA, avx512vl_f32_info, avx512vl_i32_info, HasBF16>, T8XS, EVEX_CD8<32, CD8VF>; |