aboutsummaryrefslogtreecommitdiff
path: root/llvm/include/llvm/IR/IntrinsicsARM.td
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/include/llvm/IR/IntrinsicsARM.td')
-rw-r--r--llvm/include/llvm/IR/IntrinsicsARM.td350
1 files changed, 342 insertions, 8 deletions
diff --git a/llvm/include/llvm/IR/IntrinsicsARM.td b/llvm/include/llvm/IR/IntrinsicsARM.td
index e13da6157e04..518ad7079225 100644
--- a/llvm/include/llvm/IR/IntrinsicsARM.td
+++ b/llvm/include/llvm/IR/IntrinsicsARM.td
@@ -426,8 +426,6 @@ let IntrProperties = [IntrNoMem, Commutative] in {
def int_arm_neon_vhaddu : Neon_2Arg_Intrinsic;
def int_arm_neon_vrhadds : Neon_2Arg_Intrinsic;
def int_arm_neon_vrhaddu : Neon_2Arg_Intrinsic;
- def int_arm_neon_vqadds : Neon_2Arg_Intrinsic;
- def int_arm_neon_vqaddu : Neon_2Arg_Intrinsic;
def int_arm_neon_vraddhn : Neon_2Arg_Narrow_Intrinsic;
// Vector Multiply.
@@ -459,8 +457,6 @@ let IntrProperties = [IntrNoMem, Commutative] in {
// Vector Subtract.
def int_arm_neon_vhsubs : Neon_2Arg_Intrinsic;
def int_arm_neon_vhsubu : Neon_2Arg_Intrinsic;
-def int_arm_neon_vqsubs : Neon_2Arg_Intrinsic;
-def int_arm_neon_vqsubu : Neon_2Arg_Intrinsic;
def int_arm_neon_vrsubhn : Neon_2Arg_Narrow_Intrinsic;
// Vector Absolute Compare.
@@ -777,14 +773,352 @@ class Neon_Dot_Intrinsic
def int_arm_neon_udot : Neon_Dot_Intrinsic;
def int_arm_neon_sdot : Neon_Dot_Intrinsic;
-def int_arm_vctp8 : Intrinsic<[llvm_v16i1_ty], [llvm_i32_ty], [IntrNoMem]>;
-def int_arm_vctp16 : Intrinsic<[llvm_v8i1_ty], [llvm_i32_ty], [IntrNoMem]>;
-def int_arm_vctp32 : Intrinsic<[llvm_v4i1_ty], [llvm_i32_ty], [IntrNoMem]>;
-def int_arm_vctp64 : Intrinsic<[llvm_v2i1_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_arm_cls: Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_arm_cls64: Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem]>;
+
+def int_arm_mve_vctp8 : Intrinsic<[llvm_v16i1_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_arm_mve_vctp16 : Intrinsic<[llvm_v8i1_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_arm_mve_vctp32 : Intrinsic<[llvm_v4i1_ty], [llvm_i32_ty], [IntrNoMem]>;
+// vctp64 takes v4i1, to work around v2i1 not being a legal MVE type
+def int_arm_mve_vctp64 : Intrinsic<[llvm_v4i1_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+// v8.3-A Floating-point complex add
+def int_arm_neon_vcadd_rot90 : Neon_2Arg_Intrinsic;
+def int_arm_neon_vcadd_rot270 : Neon_2Arg_Intrinsic;
// GNU eabi mcount
def int_arm_gnu_eabi_mcount : Intrinsic<[],
[],
[IntrReadMem, IntrWriteMem]>;
+def int_arm_mve_pred_i2v : Intrinsic<
+ [llvm_anyvector_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_arm_mve_pred_v2i : Intrinsic<
+ [llvm_i32_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+
+multiclass IntrinsicSignSuffix<list<LLVMType> rets, list<LLVMType> params = [],
+ list<IntrinsicProperty> props = [],
+ string name = "",
+ list<SDNodeProperty> sdprops = []> {
+ def _s: Intrinsic<rets, params, props, name, sdprops>;
+ def _u: Intrinsic<rets, params, props, name, sdprops>;
+}
+
+def int_arm_mve_min_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
+ llvm_anyvector_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_arm_mve_max_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
+ llvm_anyvector_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_arm_mve_abd_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
+ llvm_anyvector_ty, LLVMMatchType<0>], [IntrNoMem]>;
+def int_arm_mve_add_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_arm_mve_and_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_arm_mve_bic_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_arm_mve_eor_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_arm_mve_orn_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_arm_mve_orr_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_arm_mve_sub_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_arm_mve_mul_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_arm_mve_mulh_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
+ llvm_anyvector_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_arm_mve_qdmulh_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_arm_mve_rmulh_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
+ llvm_anyvector_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_arm_mve_qrdmulh_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_arm_mve_mull_int_predicated: Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty /* unsigned */,
+ llvm_i32_ty /* top */, llvm_anyvector_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_arm_mve_mull_poly_predicated: Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty, llvm_anyvector_ty,
+ LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_arm_mve_qadd_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
+ llvm_anyvector_ty, LLVMMatchType<0>], [IntrNoMem]>;
+def int_arm_mve_hadd_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
+ llvm_anyvector_ty, LLVMMatchType<0>], [IntrNoMem]>;
+def int_arm_mve_rhadd_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
+ llvm_anyvector_ty, LLVMMatchType<0>], [IntrNoMem]>;
+def int_arm_mve_qsub_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
+ llvm_anyvector_ty, LLVMMatchType<0>], [IntrNoMem]>;
+def int_arm_mve_hsub_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
+ llvm_anyvector_ty, LLVMMatchType<0>], [IntrNoMem]>;
+
+defm int_arm_mve_minv: IntrinsicSignSuffix<[llvm_i32_ty],
+ [llvm_i32_ty, llvm_anyvector_ty], [IntrNoMem]>;
+defm int_arm_mve_maxv: IntrinsicSignSuffix<[llvm_i32_ty],
+ [llvm_i32_ty, llvm_anyvector_ty], [IntrNoMem]>;
+
+multiclass MVEPredicated<list<LLVMType> rets, list<LLVMType> params,
+ LLVMType pred = llvm_anyvector_ty,
+ list<IntrinsicProperty> props = [IntrNoMem]> {
+ def "": Intrinsic<rets, params, props>;
+ def _predicated: Intrinsic<rets, params # [pred], props>;
+}
+multiclass MVEPredicatedM<list<LLVMType> rets, list<LLVMType> params,
+ LLVMType pred = llvm_anyvector_ty,
+ list<IntrinsicProperty> props = [IntrNoMem]> {
+ def "": Intrinsic<rets, params, props>;
+ def _predicated: Intrinsic<rets, params # [pred,
+ !if(!eq(!cast<string>(rets[0]), "llvm_anyvector_ty"),
+ LLVMMatchType<0>, rets[0])], props>;
+}
+
+defm int_arm_mve_vcvt_narrow: MVEPredicated<[llvm_v8f16_ty],
+ [llvm_v8f16_ty, llvm_v4f32_ty, llvm_i32_ty], llvm_v4i1_ty>;
+
+defm int_arm_mve_vldr_gather_base: MVEPredicated<
+ [llvm_anyvector_ty], [llvm_anyvector_ty, llvm_i32_ty],
+ llvm_anyvector_ty, [IntrReadMem]>;
+defm int_arm_mve_vldr_gather_base_wb: MVEPredicated<
+ [llvm_anyvector_ty, llvm_anyvector_ty],
+ [LLVMMatchType<1>, llvm_i32_ty], llvm_anyvector_ty, [IntrReadMem]>;
+defm int_arm_mve_vstr_scatter_base: MVEPredicated<
+ [], [llvm_anyvector_ty, llvm_i32_ty, llvm_anyvector_ty],
+ llvm_anyvector_ty, [IntrWriteMem]>;
+defm int_arm_mve_vstr_scatter_base_wb: MVEPredicated<
+ [llvm_anyvector_ty], [LLVMMatchType<0>, llvm_i32_ty, llvm_anyvector_ty],
+ llvm_anyvector_ty, [IntrWriteMem]>;
+
+// gather_offset takes three i32 parameters. The first is the size of
+// memory element loaded, in bits. The second is a left bit shift to
+// apply to each offset in the vector parameter (must be either 0, or
+// correspond to the element size of the destination vector type). The
+// last is 1 to indicate zero extension (if the load is widening), or
+// 0 for sign extension.
+//
+// scatter_offset has the first two of those parameters, but since it
+// narrows rather than widening, it doesn't have the last one.
+defm int_arm_mve_vldr_gather_offset: MVEPredicated<
+ [llvm_anyvector_ty], [llvm_anyptr_ty, llvm_anyvector_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], llvm_anyvector_ty, [IntrReadMem]>;
+defm int_arm_mve_vstr_scatter_offset: MVEPredicated<
+ [], [llvm_anyptr_ty, llvm_anyvector_ty, llvm_anyvector_ty,
+ llvm_i32_ty, llvm_i32_ty], llvm_anyvector_ty, [IntrWriteMem]>;
+
+def int_arm_mve_shl_imm_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_i32_ty, llvm_anyvector_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_arm_mve_shr_imm_predicated: Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty, // extra i32 is unsigned flag
+ llvm_anyvector_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+
+defm int_arm_mve_vqshl_imm: MVEPredicatedM<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_i32_ty /*shiftcount*/, llvm_i32_ty /*unsigned*/]>;
+defm int_arm_mve_vrshr_imm: MVEPredicatedM<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_i32_ty /*shiftcount*/, llvm_i32_ty /*unsigned*/]>;
+defm int_arm_mve_vqshlu_imm: MVEPredicatedM<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_i32_ty /*shiftcount*/]>;
+defm int_arm_mve_vshll_imm: MVEPredicatedM<[llvm_anyvector_ty],
+ [llvm_anyvector_ty, llvm_i32_ty /*shiftcount*/, llvm_i32_ty /*unsigned*/,
+ llvm_i32_ty /*top-half*/]>;
+
+defm int_arm_mve_vsli: MVEPredicated<
+ [llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty]>;
+defm int_arm_mve_vsri: MVEPredicated<
+ [llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty]>;
+
+defm int_arm_mve_vshrn: MVEPredicated<
+ [llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty,
+ llvm_i32_ty /*shiftcount*/, llvm_i32_ty /*saturate*/, llvm_i32_ty /*round*/,
+ llvm_i32_ty /*unsigned-out*/, llvm_i32_ty /*unsigned-in*/,
+ llvm_i32_ty /*top-half*/]>;
+
+defm int_arm_mve_vshl_scalar: MVEPredicated<
+ [llvm_anyvector_ty], [LLVMMatchType<0>, llvm_i32_ty /*shiftcount*/,
+ llvm_i32_ty /*saturate*/, llvm_i32_ty /*round*/, llvm_i32_ty /*unsigned*/]>;
+defm int_arm_mve_vshl_vector: MVEPredicatedM<
+ [llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty /*shiftcounts*/,
+ llvm_i32_ty /*saturate*/, llvm_i32_ty /*round*/, llvm_i32_ty /*unsigned*/]>;
+
+// MVE scalar shifts.
+class ARM_MVE_qrshift_single<list<LLVMType> value,
+ list<LLVMType> saturate = []> :
+ Intrinsic<value, value # [llvm_i32_ty] # saturate, [IntrNoMem]>;
+multiclass ARM_MVE_qrshift<list<LLVMType> saturate = []> {
+ // Most of these shifts come in 32- and 64-bit versions. But only
+ // the 64-bit ones have the extra saturation argument (if any).
+ def "": ARM_MVE_qrshift_single<[llvm_i32_ty]>;
+ def l: ARM_MVE_qrshift_single<[llvm_i32_ty, llvm_i32_ty], saturate>;
+}
+defm int_arm_mve_urshr: ARM_MVE_qrshift;
+defm int_arm_mve_uqshl: ARM_MVE_qrshift;
+defm int_arm_mve_srshr: ARM_MVE_qrshift;
+defm int_arm_mve_sqshl: ARM_MVE_qrshift;
+defm int_arm_mve_uqrshl: ARM_MVE_qrshift<[llvm_i32_ty]>;
+defm int_arm_mve_sqrshr: ARM_MVE_qrshift<[llvm_i32_ty]>;
+// LSLL and ASRL only have 64-bit versions, not 32.
+def int_arm_mve_lsll: ARM_MVE_qrshift_single<[llvm_i32_ty, llvm_i32_ty]>;
+def int_arm_mve_asrl: ARM_MVE_qrshift_single<[llvm_i32_ty, llvm_i32_ty]>;
+
+def int_arm_mve_vabd: Intrinsic<
+ [llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */],
+ [IntrNoMem]>;
+def int_arm_mve_vadc: Intrinsic<
+ [llvm_anyvector_ty, llvm_i32_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_mve_vadc_predicated: Intrinsic<
+ [llvm_anyvector_ty, llvm_i32_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i32_ty, llvm_anyvector_ty], [IntrNoMem]>;
+def int_arm_mve_vmulh: Intrinsic<
+ [llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */],
+ [IntrNoMem]>;
+def int_arm_mve_vqdmulh: Intrinsic<
+ [llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
+def int_arm_mve_vhadd: Intrinsic<
+ [llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */],
+ [IntrNoMem]>;
+def int_arm_mve_vrhadd: Intrinsic<
+ [llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */],
+ [IntrNoMem]>;
+def int_arm_mve_vhsub: Intrinsic<
+ [llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */],
+ [IntrNoMem]>;
+def int_arm_mve_vrmulh: Intrinsic<
+ [llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */],
+ [IntrNoMem]>;
+def int_arm_mve_vqrdmulh: Intrinsic<
+ [llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
+def int_arm_mve_vmull: Intrinsic<
+ [llvm_anyvector_ty],
+ [llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty /* unsigned */,
+ llvm_i32_ty /* top */], [IntrNoMem]>;
+def int_arm_mve_vmull_poly: Intrinsic<
+ [llvm_anyvector_ty],
+ [llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty], [IntrNoMem]>;
+
+// Intrinsic with a predicated and a non-predicated case. The predicated case
+// has two additional parameters: inactive (the value for inactive lanes, can
+// be undef) and predicate.
+multiclass MVEMXPredicated<list<LLVMType> rets, list<LLVMType> flags,
+ list<LLVMType> params, LLVMType inactive,
+ LLVMType predicate,
+ list<IntrinsicProperty> props = [IntrNoMem]> {
+ def "": Intrinsic<rets, flags # params, props>;
+ def _predicated: Intrinsic<rets, flags # [inactive] # params # [predicate],
+ props>;
+}
+
+// The first two parameters are compile-time constants:
+// * Halving: 0 means halving (vhcaddq), 1 means non-halving (vcaddq)
+// instruction. Note: the flag is inverted to match the corresonding
+// bit in the instruction encoding
+// * Rotation angle: 0 mean 90 deg, 1 means 180 deg
+defm int_arm_mve_vcaddq : MVEMXPredicated<
+ [llvm_anyvector_ty],
+ [llvm_i32_ty, llvm_i32_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ LLVMMatchType<0>, llvm_anyvector_ty>;
+
+// The first operand of the following two intrinsics is the rotation angle
+// (must be a compile-time constant):
+// 0 - 0 deg
+// 1 - 90 deg
+// 2 - 180 deg
+// 3 - 270 deg
+defm int_arm_mve_vcmulq : MVEMXPredicated<
+ [llvm_anyvector_ty],
+ [llvm_i32_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ LLVMMatchType<0>, llvm_anyvector_ty>;
+
+defm int_arm_mve_vcmlaq : MVEPredicated<
+ [llvm_anyvector_ty],
+ [llvm_i32_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+ llvm_anyvector_ty>;
+
+def int_arm_mve_vld2q: Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], [llvm_anyptr_ty], [IntrReadMem]>;
+def int_arm_mve_vld4q: Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [llvm_anyptr_ty], [IntrReadMem]>;
+
+def int_arm_mve_vst2q: Intrinsic<[], [llvm_anyptr_ty, llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty], [IntrWriteMem]>;
+def int_arm_mve_vst4q: Intrinsic<[], [llvm_anyptr_ty, llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<1>, LLVMMatchType<1>, llvm_i32_ty], [IntrWriteMem]
+>;
+
+// MVE vector absolute difference and accumulate across vector
+// The first operand is an 'unsigned' flag. The remaining operands are:
+// * accumulator
+// * first vector operand
+// * second vector operand
+// * mask (only in predicated versions)
+defm int_arm_mve_vabav: MVEPredicated<
+ [llvm_i32_ty],
+ [llvm_i32_ty, llvm_i32_ty, llvm_anyvector_ty, LLVMMatchType<0>], llvm_anyvector_ty>;
+
+// The following 3 instrinsics are MVE vector reductions with two vector
+// operands.
+// The first 3 operands are boolean flags (must be compile-time constants):
+// * unsigned - the instruction operates on vectors of unsigned values and
+// unsigned scalars
+// * subtract - the instruction performs subtraction after multiplication of
+// lane pairs (e.g., vmlsdav vs vmladav)
+// * exchange - the instruction exchanges successive even and odd lanes of
+// the first operands before multiplication of lane pairs
+// (e.g., vmladavx vs vmladav)
+// The remaining operands are:
+// * accumulator
+// * first vector operand
+// * second vector operand
+// * mask (only in predicated versions)
+
+// Version with 32-bit result, vml{a,s}dav[a][x]
+defm int_arm_mve_vmldava: MVEPredicated<
+ [llvm_i32_ty],
+ [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_anyvector_ty, LLVMMatchType<0>],
+ llvm_anyvector_ty>;
+
+// Version with 64-bit result, vml{a,s}ldav[a][x]
+defm int_arm_mve_vmlldava: MVEPredicated<
+ [llvm_i32_ty, llvm_i32_ty],
+ [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_anyvector_ty, LLVMMatchType<0>],
+ llvm_anyvector_ty>;
+
+// Version with 72-bit rounded result, vrml{a,s}ldavh[a][x]
+defm int_arm_mve_vrmlldavha: MVEPredicated<
+ [llvm_i32_ty, llvm_i32_ty],
+ [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_anyvector_ty, LLVMMatchType<0>],
+ llvm_anyvector_ty>;
} // end TargetPrefix