aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/clang/lib/Headers/avx512fintrin.h')
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512fintrin.h3281
1 files changed, 1651 insertions, 1630 deletions
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h
index 010bcadab019..4f172c74b31c 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h
@@ -26,6 +26,10 @@ typedef unsigned short __v32hu __attribute__((__vector_size__(64)));
typedef unsigned long long __v8du __attribute__((__vector_size__(64)));
typedef unsigned int __v16su __attribute__((__vector_size__(64)));
+/* We need an explicitly signed variant for char. Note that this shouldn't
+ * appear in the interface though. */
+typedef signed char __v64qs __attribute__((__vector_size__(64)));
+
typedef float __m512 __attribute__((__vector_size__(64), __aligned__(64)));
typedef double __m512d __attribute__((__vector_size__(64), __aligned__(64)));
typedef long long __m512i __attribute__((__vector_size__(64), __aligned__(64)));
@@ -163,9 +167,13 @@ typedef enum
} _MM_MANTISSA_SIGN_ENUM;
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(512)))
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f")))
+#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512f,evex512"), __min_vector_width__(512)))
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512f,no-evex512"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512f,no-evex512")))
/* Create vectors with repeated elements */
@@ -252,8 +260,8 @@ _mm512_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_setzero_ps(void)
{
- return __extension__ (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
+ return __extension__ (__m512){ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f };
}
#define _mm512_setzero _mm512_setzero_ps
@@ -393,14 +401,15 @@ _mm512_broadcastsd_pd(__m128d __A)
static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_castpd256_pd512(__m256d __a)
{
- return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
+ return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0,
+ 1, 2, 3, 4, 5, 6, 7);
}
static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_castps256_ps512(__m256 __a)
{
- return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7,
- -1, -1, -1, -1, -1, -1, -1, -1);
+ return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
}
static __inline __m128d __DEFAULT_FN_ATTRS512
@@ -442,7 +451,10 @@ _mm512_castpd_si512 (__m512d __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_castpd128_pd512 (__m128d __A)
{
- return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
+ __m256d __B = __builtin_nondeterministic_value(__B);
+ return __builtin_shufflevector(
+ __builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3),
+ __B, 0, 1, 2, 3, 4, 5, 6, 7);
}
static __inline __m512d __DEFAULT_FN_ATTRS512
@@ -460,19 +472,25 @@ _mm512_castps_si512 (__m512 __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_castps128_ps512 (__m128 __A)
{
- return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+ __m256 __B = __builtin_nondeterministic_value(__B);
+ return __builtin_shufflevector(
+ __builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3, 4, 5, 6, 7),
+ __B, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_castsi128_si512 (__m128i __A)
{
- return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
+ __m256i __B = __builtin_nondeterministic_value(__B);
+ return __builtin_shufflevector(
+ __builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3),
+ __B, 0, 1, 2, 3, 4, 5, 6, 7);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_castsi256_si512 (__m256i __A)
{
- return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1);
+ return __builtin_shufflevector( __A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3, 4, 5, 6, 7);
}
static __inline __m512 __DEFAULT_FN_ATTRS512
@@ -937,18 +955,18 @@ _mm512_maskz_sub_epi32(__mmask16 __U, __m512i __A, __m512i __B)
}
#define _mm512_max_round_pd(A, B, R) \
- (__m512d)__builtin_ia32_maxpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(R))
+ ((__m512d)__builtin_ia32_maxpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R)))
#define _mm512_mask_max_round_pd(W, U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_max_round_pd((A), (B), (R)), \
- (__v8df)(W))
+ (__v8df)(W)))
#define _mm512_maskz_max_round_pd(U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_max_round_pd((A), (B), (R)), \
- (__v8df)_mm512_setzero_pd())
+ (__v8df)_mm512_setzero_pd()))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_max_pd(__m512d __A, __m512d __B)
@@ -974,18 +992,18 @@ _mm512_maskz_max_pd (__mmask8 __U, __m512d __A, __m512d __B)
}
#define _mm512_max_round_ps(A, B, R) \
- (__m512)__builtin_ia32_maxps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(R))
+ ((__m512)__builtin_ia32_maxps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R)))
#define _mm512_mask_max_round_ps(W, U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_max_round_ps((A), (B), (R)), \
- (__v16sf)(W))
+ (__v16sf)(W)))
#define _mm512_maskz_max_round_ps(U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_max_round_ps((A), (B), (R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__v16sf)_mm512_setzero_ps()))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_max_ps(__m512 __A, __m512 __B)
@@ -1029,22 +1047,22 @@ _mm_maskz_max_ss(__mmask8 __U,__m128 __A, __m128 __B) {
}
#define _mm_max_round_ss(A, B, R) \
- (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_max_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)))
#define _mm_maskz_max_round_ss(U, A, B, R) \
- (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_max_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
@@ -1065,28 +1083,28 @@ _mm_maskz_max_sd(__mmask8 __U,__m128d __A, __m128d __B) {
}
#define _mm_max_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_max_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_max_round_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline __m512i
__DEFAULT_FN_ATTRS512
_mm512_max_epi32(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxsd512((__v16si)__A, (__v16si)__B);
+ return (__m512i)__builtin_elementwise_max((__v16si)__A, (__v16si)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1108,7 +1126,7 @@ _mm512_maskz_max_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epu32(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxud512((__v16si)__A, (__v16si)__B);
+ return (__m512i)__builtin_elementwise_max((__v16su)__A, (__v16su)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1130,7 +1148,7 @@ _mm512_maskz_max_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epi64(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxsq512((__v8di)__A, (__v8di)__B);
+ return (__m512i)__builtin_elementwise_max((__v8di)__A, (__v8di)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1152,7 +1170,7 @@ _mm512_maskz_max_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epu64(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxuq512((__v8di)__A, (__v8di)__B);
+ return (__m512i)__builtin_elementwise_max((__v8du)__A, (__v8du)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1172,18 +1190,18 @@ _mm512_maskz_max_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
}
#define _mm512_min_round_pd(A, B, R) \
- (__m512d)__builtin_ia32_minpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(R))
+ ((__m512d)__builtin_ia32_minpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R)))
#define _mm512_mask_min_round_pd(W, U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_min_round_pd((A), (B), (R)), \
- (__v8df)(W))
+ (__v8df)(W)))
#define _mm512_maskz_min_round_pd(U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_min_round_pd((A), (B), (R)), \
- (__v8df)_mm512_setzero_pd())
+ (__v8df)_mm512_setzero_pd()))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_min_pd(__m512d __A, __m512d __B)
@@ -1209,18 +1227,18 @@ _mm512_maskz_min_pd (__mmask8 __U, __m512d __A, __m512d __B)
}
#define _mm512_min_round_ps(A, B, R) \
- (__m512)__builtin_ia32_minps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(R))
+ ((__m512)__builtin_ia32_minps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R)))
#define _mm512_mask_min_round_ps(W, U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_min_round_ps((A), (B), (R)), \
- (__v16sf)(W))
+ (__v16sf)(W)))
#define _mm512_maskz_min_round_ps(U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_min_round_ps((A), (B), (R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__v16sf)_mm512_setzero_ps()))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_min_ps(__m512 __A, __m512 __B)
@@ -1264,22 +1282,22 @@ _mm_maskz_min_ss(__mmask8 __U,__m128 __A, __m128 __B) {
}
#define _mm_min_round_ss(A, B, R) \
- (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_min_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)))
#define _mm_maskz_min_round_ss(U, A, B, R) \
- (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_min_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
@@ -1300,28 +1318,28 @@ _mm_maskz_min_sd(__mmask8 __U,__m128d __A, __m128d __B) {
}
#define _mm_min_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_min_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_min_round_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline __m512i
__DEFAULT_FN_ATTRS512
_mm512_min_epi32(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminsd512((__v16si)__A, (__v16si)__B);
+ return (__m512i)__builtin_elementwise_min((__v16si)__A, (__v16si)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1343,7 +1361,7 @@ _mm512_maskz_min_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epu32(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminud512((__v16si)__A, (__v16si)__B);
+ return (__m512i)__builtin_elementwise_min((__v16su)__A, (__v16su)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1365,7 +1383,7 @@ _mm512_maskz_min_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epi64(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminsq512((__v8di)__A, (__v8di)__B);
+ return (__m512i)__builtin_elementwise_min((__v8di)__A, (__v8di)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1387,7 +1405,7 @@ _mm512_maskz_min_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epu64(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminuq512((__v8di)__A, (__v8di)__B);
+ return (__m512i)__builtin_elementwise_min((__v8du)__A, (__v8du)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1485,17 +1503,17 @@ _mm512_mask_mullox_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
}
#define _mm512_sqrt_round_pd(A, R) \
- (__m512d)__builtin_ia32_sqrtpd512((__v8df)(__m512d)(A), (int)(R))
+ ((__m512d)__builtin_ia32_sqrtpd512((__v8df)(__m512d)(A), (int)(R)))
#define _mm512_mask_sqrt_round_pd(W, U, A, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_sqrt_round_pd((A), (R)), \
- (__v8df)(__m512d)(W))
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_sqrt_round_pd(U, A, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_sqrt_round_pd((A), (R)), \
- (__v8df)_mm512_setzero_pd())
+ (__v8df)_mm512_setzero_pd()))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_sqrt_pd(__m512d __A)
@@ -1521,17 +1539,17 @@ _mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A)
}
#define _mm512_sqrt_round_ps(A, R) \
- (__m512)__builtin_ia32_sqrtps512((__v16sf)(__m512)(A), (int)(R))
+ ((__m512)__builtin_ia32_sqrtps512((__v16sf)(__m512)(A), (int)(R)))
#define _mm512_mask_sqrt_round_ps(W, U, A, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_sqrt_round_ps((A), (R)), \
- (__v16sf)(__m512)(W))
+ (__v16sf)(__m512)(W)))
#define _mm512_maskz_sqrt_round_ps(U, A, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_sqrt_round_ps((A), (R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__v16sf)_mm512_setzero_ps()))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_sqrt_ps(__m512 __A)
@@ -1776,7 +1794,7 @@ _mm512_floor_ps(__m512 __A)
{
return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
_MM_FROUND_FLOOR,
- (__v16sf) __A, -1,
+ (__v16sf) __A, (unsigned short)-1,
_MM_FROUND_CUR_DIRECTION);
}
@@ -1794,7 +1812,7 @@ _mm512_floor_pd(__m512d __A)
{
return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
_MM_FROUND_FLOOR,
- (__v8df) __A, -1,
+ (__v8df) __A, (unsigned char)-1,
_MM_FROUND_CUR_DIRECTION);
}
@@ -1821,7 +1839,7 @@ _mm512_ceil_ps(__m512 __A)
{
return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
_MM_FROUND_CEIL,
- (__v16sf) __A, -1,
+ (__v16sf) __A, (unsigned short)-1,
_MM_FROUND_CUR_DIRECTION);
}
@@ -1830,7 +1848,7 @@ _mm512_ceil_pd(__m512d __A)
{
return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
_MM_FROUND_CEIL,
- (__v8df) __A, -1,
+ (__v8df) __A, (unsigned char)-1,
_MM_FROUND_CUR_DIRECTION);
}
@@ -1846,7 +1864,7 @@ _mm512_mask_ceil_pd (__m512d __W, __mmask8 __U, __m512d __A)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_abs_epi64(__m512i __A)
{
- return (__m512i)__builtin_ia32_pabsq512((__v8di)__A);
+ return (__m512i)__builtin_elementwise_abs((__v8di)__A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1868,7 +1886,7 @@ _mm512_maskz_abs_epi64 (__mmask8 __U, __m512i __A)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_abs_epi32(__m512i __A)
{
- return (__m512i)__builtin_ia32_pabsd512((__v16si) __A);
+ return (__m512i)__builtin_elementwise_abs((__v16si) __A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1900,22 +1918,22 @@ _mm_maskz_add_ss(__mmask8 __U,__m128 __A, __m128 __B) {
}
#define _mm_add_round_ss(A, B, R) \
- (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_add_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)))
#define _mm_maskz_add_round_ss(U, A, B, R) \
- (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_add_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
@@ -1929,22 +1947,22 @@ _mm_maskz_add_sd(__mmask8 __U,__m128d __A, __m128d __B) {
return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
}
#define _mm_add_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_add_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_add_round_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
@@ -1975,32 +1993,32 @@ _mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) {
}
#define _mm512_add_round_pd(A, B, R) \
- (__m512d)__builtin_ia32_addpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(R))
+ ((__m512d)__builtin_ia32_addpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R)))
#define _mm512_mask_add_round_pd(W, U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_add_round_pd((A), (B), (R)), \
- (__v8df)(__m512d)(W))
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_add_round_pd(U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_add_round_pd((A), (B), (R)), \
- (__v8df)_mm512_setzero_pd())
+ (__v8df)_mm512_setzero_pd()))
#define _mm512_add_round_ps(A, B, R) \
- (__m512)__builtin_ia32_addps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(R))
+ ((__m512)__builtin_ia32_addps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R)))
#define _mm512_mask_add_round_ps(W, U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_add_round_ps((A), (B), (R)), \
- (__v16sf)(__m512)(W))
+ (__v16sf)(__m512)(W)))
#define _mm512_maskz_add_round_ps(U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_add_round_ps((A), (B), (R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__v16sf)_mm512_setzero_ps()))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
@@ -2014,22 +2032,22 @@ _mm_maskz_sub_ss(__mmask8 __U,__m128 __A, __m128 __B) {
return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
}
#define _mm_sub_round_ss(A, B, R) \
- (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_sub_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)))
#define _mm_maskz_sub_round_ss(U, A, B, R) \
- (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_sub_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
@@ -2044,22 +2062,22 @@ _mm_maskz_sub_sd(__mmask8 __U,__m128d __A, __m128d __B) {
}
#define _mm_sub_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_sub_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_sub_round_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
@@ -2090,32 +2108,32 @@ _mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) {
}
#define _mm512_sub_round_pd(A, B, R) \
- (__m512d)__builtin_ia32_subpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(R))
+ ((__m512d)__builtin_ia32_subpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R)))
#define _mm512_mask_sub_round_pd(W, U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_sub_round_pd((A), (B), (R)), \
- (__v8df)(__m512d)(W))
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_sub_round_pd(U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_sub_round_pd((A), (B), (R)), \
- (__v8df)_mm512_setzero_pd())
+ (__v8df)_mm512_setzero_pd()))
#define _mm512_sub_round_ps(A, B, R) \
- (__m512)__builtin_ia32_subps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(R))
+ ((__m512)__builtin_ia32_subps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R)))
#define _mm512_mask_sub_round_ps(W, U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
- (__v16sf)(__m512)(W))
+ (__v16sf)(__m512)(W)))
#define _mm512_maskz_sub_round_ps(U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__v16sf)_mm512_setzero_ps()))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
@@ -2129,22 +2147,22 @@ _mm_maskz_mul_ss(__mmask8 __U,__m128 __A, __m128 __B) {
return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
}
#define _mm_mul_round_ss(A, B, R) \
- (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_mul_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)))
#define _mm_maskz_mul_round_ss(U, A, B, R) \
- (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_mul_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
@@ -2159,22 +2177,22 @@ _mm_maskz_mul_sd(__mmask8 __U,__m128d __A, __m128d __B) {
}
#define _mm_mul_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_mul_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_mul_round_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
@@ -2205,32 +2223,32 @@ _mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) {
}
#define _mm512_mul_round_pd(A, B, R) \
- (__m512d)__builtin_ia32_mulpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(R))
+ ((__m512d)__builtin_ia32_mulpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R)))
#define _mm512_mask_mul_round_pd(W, U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_mul_round_pd((A), (B), (R)), \
- (__v8df)(__m512d)(W))
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_mul_round_pd(U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_mul_round_pd((A), (B), (R)), \
- (__v8df)_mm512_setzero_pd())
+ (__v8df)_mm512_setzero_pd()))
#define _mm512_mul_round_ps(A, B, R) \
- (__m512)__builtin_ia32_mulps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(R))
+ ((__m512)__builtin_ia32_mulps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R)))
#define _mm512_mask_mul_round_ps(W, U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
- (__v16sf)(__m512)(W))
+ (__v16sf)(__m512)(W)))
#define _mm512_maskz_mul_round_ps(U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__v16sf)_mm512_setzero_ps()))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
@@ -2245,22 +2263,22 @@ _mm_maskz_div_ss(__mmask8 __U,__m128 __A, __m128 __B) {
}
#define _mm_div_round_ss(A, B, R) \
- (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_div_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)))
#define _mm_maskz_div_round_ss(U, A, B, R) \
- (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_div_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
@@ -2275,22 +2293,22 @@ _mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) {
}
#define _mm_div_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_div_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_div_round_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_div_pd(__m512d __a, __m512d __b)
@@ -2333,179 +2351,179 @@ _mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) {
}
#define _mm512_div_round_pd(A, B, R) \
- (__m512d)__builtin_ia32_divpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(R))
+ ((__m512d)__builtin_ia32_divpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R)))
#define _mm512_mask_div_round_pd(W, U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_div_round_pd((A), (B), (R)), \
- (__v8df)(__m512d)(W))
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_div_round_pd(U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_div_round_pd((A), (B), (R)), \
- (__v8df)_mm512_setzero_pd())
+ (__v8df)_mm512_setzero_pd()))
#define _mm512_div_round_ps(A, B, R) \
- (__m512)__builtin_ia32_divps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(R))
+ ((__m512)__builtin_ia32_divps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R)))
#define _mm512_mask_div_round_ps(W, U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_div_round_ps((A), (B), (R)), \
- (__v16sf)(__m512)(W))
+ (__v16sf)(__m512)(W)))
#define _mm512_maskz_div_round_ps(U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_div_round_ps((A), (B), (R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__v16sf)_mm512_setzero_ps()))
#define _mm512_roundscale_ps(A, B) \
- (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \
- (__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_roundscale_ps(A, B, C, imm) \
- (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
+ ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
(__v16sf)(__m512)(A), (__mmask16)(B), \
- _MM_FROUND_CUR_DIRECTION)
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_roundscale_ps(A, B, imm) \
- (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(A), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(A), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_roundscale_round_ps(A, B, C, imm, R) \
- (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
+ ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
(__v16sf)(__m512)(A), (__mmask16)(B), \
- (int)(R))
+ (int)(R)))
#define _mm512_maskz_roundscale_round_ps(A, B, imm, R) \
- (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(A), (int)(R))
+ ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(A), (int)(R)))
#define _mm512_roundscale_round_ps(A, imm, R) \
- (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(imm), \
- (__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_roundscale_pd(A, B) \
- (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(B), \
- (__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(B), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_roundscale_pd(A, B, C, imm) \
- (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
+ ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
(__v8df)(__m512d)(A), (__mmask8)(B), \
- _MM_FROUND_CUR_DIRECTION)
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_roundscale_pd(A, B, imm) \
- (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(A), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(A), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_roundscale_round_pd(A, B, C, imm, R) \
- (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
+ ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
(__v8df)(__m512d)(A), (__mmask8)(B), \
- (int)(R))
+ (int)(R)))
#define _mm512_maskz_roundscale_round_pd(A, B, imm, R) \
- (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(A), (int)(R))
+ ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(A), (int)(R)))
#define _mm512_roundscale_round_pd(A, imm, R) \
- (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(imm), \
- (__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(imm), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_fmadd_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_fmadd_round_pd(A, U, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_fmsub_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_fmsub_round_pd(A, U, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_fnmadd_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask3(-(__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask3(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_fnmsub_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -2629,87 +2647,87 @@ _mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
}
#define _mm512_fmadd_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_fmadd_round_ps(A, U, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_fmsub_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_fmsub_round_ps(A, U, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_fnmadd_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
- -(__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ -(__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask3(-(__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask3(-(__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_fnmsub_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
- -(__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ -(__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
@@ -2833,52 +2851,52 @@ _mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
}
#define _mm512_fmaddsub_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) \
- (__m512d)__builtin_ia32_vfmaddsubpd512_mask3((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddsubpd512_mask3((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_fmsubadd_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -2952,52 +2970,52 @@ _mm512_maskz_fmsubadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
}
#define _mm512_fmaddsub_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) \
- (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) \
- (__m512)__builtin_ia32_vfmaddsubps512_mask3((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddsubps512_mask3((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_fmsubadd_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) \
- (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
@@ -3071,10 +3089,10 @@ _mm512_maskz_fmsubadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
}
#define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) \
- (__m512d)__builtin_ia32_vfmsubpd512_mask3((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmsubpd512_mask3((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -3088,10 +3106,10 @@ _mm512_mask3_fmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
}
#define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) \
- (__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
@@ -3104,10 +3122,10 @@ _mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
}
#define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) \
- (__m512d)__builtin_ia32_vfmsubaddpd512_mask3((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmsubaddpd512_mask3((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -3121,10 +3139,10 @@ _mm512_mask3_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
}
#define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) \
- (__m512)__builtin_ia32_vfmsubaddps512_mask3((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmsubaddps512_mask3((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
@@ -3138,10 +3156,10 @@ _mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
}
#define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
- -(__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ -(__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -3155,10 +3173,10 @@ _mm512_mask_fnmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
}
#define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
- -(__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ -(__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
@@ -3172,17 +3190,17 @@ _mm512_mask_fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
}
#define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
- -(__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ -(__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) \
- (__m512d)__builtin_ia32_vfmsubpd512_mask3(-(__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmsubpd512_mask3(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -3206,17 +3224,17 @@ _mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
}
#define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
- -(__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ -(__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) \
- (__m512)__builtin_ia32_vfmsubps512_mask3(-(__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmsubps512_mask3(-(__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
@@ -3312,63 +3330,63 @@ _mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i __I,
}
#define _mm512_alignr_epi64(A, B, I) \
- (__m512i)__builtin_ia32_alignq512((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), (int)(I))
+ ((__m512i)__builtin_ia32_alignq512((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), (int)(I)))
#define _mm512_mask_alignr_epi64(W, U, A, B, imm) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
- (__v8di)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
+ (__v8di)(__m512i)(W)))
#define _mm512_maskz_alignr_epi64(U, A, B, imm) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
- (__v8di)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
+ (__v8di)_mm512_setzero_si512()))
#define _mm512_alignr_epi32(A, B, I) \
- (__m512i)__builtin_ia32_alignd512((__v16si)(__m512i)(A), \
- (__v16si)(__m512i)(B), (int)(I))
+ ((__m512i)__builtin_ia32_alignd512((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), (int)(I)))
#define _mm512_mask_alignr_epi32(W, U, A, B, imm) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
- (__v16si)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
+ (__v16si)(__m512i)(W)))
#define _mm512_maskz_alignr_epi32(U, A, B, imm) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
- (__v16si)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
+ (__v16si)_mm512_setzero_si512()))
/* Vector Extract */
#define _mm512_extractf64x4_pd(A, I) \
- (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
- (__v4df)_mm256_undefined_pd(), \
- (__mmask8)-1)
+ ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
+ (__v4df)_mm256_undefined_pd(), \
+ (__mmask8)-1))
#define _mm512_mask_extractf64x4_pd(W, U, A, imm) \
- (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
- (__v4df)(__m256d)(W), \
- (__mmask8)(U))
+ ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
+ (__v4df)(__m256d)(W), \
+ (__mmask8)(U)))
#define _mm512_maskz_extractf64x4_pd(U, A, imm) \
- (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
- (__v4df)_mm256_setzero_pd(), \
- (__mmask8)(U))
+ ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)(U)))
#define _mm512_extractf32x4_ps(A, I) \
- (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
- (__v4sf)_mm_undefined_ps(), \
- (__mmask8)-1)
+ ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
+ (__v4sf)_mm_undefined_ps(), \
+ (__mmask8)-1))
#define _mm512_mask_extractf32x4_ps(W, U, A, imm) \
- (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U))
+ ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U)))
#define _mm512_maskz_extractf32x4_ps(U, A, imm) \
- (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U))
+ ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U)))
/* Vector Blend */
@@ -3407,14 +3425,14 @@ _mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
/* Compare */
#define _mm512_cmp_round_ps_mask(A, B, P, R) \
- (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(P), \
- (__mmask16)-1, (int)(R))
+ ((__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(P), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) \
- (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(P), \
- (__mmask16)(U), (int)(R))
+ ((__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(P), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_cmp_ps_mask(A, B, P) \
_mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
@@ -3462,14 +3480,14 @@ _mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
_mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_ORD_Q)
#define _mm512_cmp_round_pd_mask(A, B, P, R) \
- (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(P), \
- (__mmask8)-1, (int)(R))
+ ((__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(P), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cmp_round_pd_mask(U, A, B, P, R) \
- (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(P), \
- (__mmask8)(U), (int)(R))
+ ((__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(P), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_cmp_pd_mask(A, B, P) \
_mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
@@ -3519,19 +3537,19 @@ _mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
/* Conversion */
#define _mm512_cvtt_roundps_epu32(A, R) \
- (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
- (__v16si)_mm512_undefined_epi32(), \
- (__mmask16)-1, (int)(R))
+ ((__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_undefined_epi32(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_cvtt_roundps_epu32(W, U, A, R) \
- (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
- (__v16si)(__m512i)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_cvtt_roundps_epu32(U, A, R) \
- (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U), (int)(R)))
static __inline __m512i __DEFAULT_FN_ATTRS512
@@ -3563,34 +3581,34 @@ _mm512_maskz_cvttps_epu32 (__mmask16 __U, __m512 __A)
}
#define _mm512_cvt_roundepi32_ps(A, R) \
- (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_cvt_roundepi32_ps(W, U, A, R) \
- (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
- (__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_cvt_roundepi32_ps(U, A, R) \
- (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_cvt_roundepu32_ps(A, R) \
- (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_cvt_roundepu32_ps(W, U, A, R) \
- (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
- (__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_cvt_roundepu32_ps(U, A, R) \
- (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_cvtepu32_ps (__m512i __A)
@@ -3705,19 +3723,19 @@ _mm512_mask_cvtepu32lo_pd(__m512d __W, __mmask8 __U,__m512i __A)
}
#define _mm512_cvt_roundpd_ps(A, R) \
- (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvt_roundpd_ps(W, U, A, R) \
- (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
- (__v8sf)(__m256)(W), (__mmask8)(U), \
- (int)(R))
+ ((__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
+ (__v8sf)(__m256)(W), (__mmask8)(U), \
+ (int)(R)))
#define _mm512_maskz_cvt_roundpd_ps(U, A, R) \
- (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m256 __DEFAULT_FN_ATTRS512
_mm512_cvtpd_ps (__m512d __A)
@@ -3765,38 +3783,38 @@ _mm512_mask_cvtpd_pslo (__m512 __W, __mmask8 __U,__m512d __A)
}
#define _mm512_cvt_roundps_ph(A, I) \
- (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
- (__v16hi)_mm256_undefined_si256(), \
- (__mmask16)-1)
+ ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+ (__v16hi)_mm256_undefined_si256(), \
+ (__mmask16)-1))
#define _mm512_mask_cvt_roundps_ph(U, W, A, I) \
- (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
- (__v16hi)(__m256i)(U), \
- (__mmask16)(W))
+ ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+ (__v16hi)(__m256i)(U), \
+ (__mmask16)(W)))
#define _mm512_maskz_cvt_roundps_ph(W, A, I) \
- (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
- (__v16hi)_mm256_setzero_si256(), \
- (__mmask16)(W))
+ ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+ (__v16hi)_mm256_setzero_si256(), \
+ (__mmask16)(W)))
#define _mm512_cvtps_ph _mm512_cvt_roundps_ph
#define _mm512_mask_cvtps_ph _mm512_mask_cvt_roundps_ph
#define _mm512_maskz_cvtps_ph _mm512_maskz_cvt_roundps_ph
#define _mm512_cvt_roundph_ps(A, R) \
- (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
- (__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_cvt_roundph_ps(W, U, A, R) \
- (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
- (__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_cvt_roundph_ps(U, A, R) \
- (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)))
static __inline __m512 __DEFAULT_FN_ATTRS512
@@ -3828,19 +3846,19 @@ _mm512_maskz_cvtph_ps (__mmask16 __U, __m256i __A)
}
#define _mm512_cvtt_roundpd_epi32(A, R) \
- (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)-1, (int)(R))
+ ((__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvtt_roundpd_epi32(W, U, A, R) \
- (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
- (__v8si)(__m256i)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvtt_roundpd_epi32(U, A, R) \
- (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U), (int)(R))
+ ((__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U), (int)(R)))
static __inline __m256i __DEFAULT_FN_ATTRS512
_mm512_cvttpd_epi32(__m512d __a)
@@ -3870,19 +3888,19 @@ _mm512_maskz_cvttpd_epi32 (__mmask8 __U, __m512d __A)
}
#define _mm512_cvtt_roundps_epi32(A, R) \
- (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)-1, (int)(R))
+ ((__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_cvtt_roundps_epi32(W, U, A, R) \
- (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
- (__v16si)(__m512i)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_cvtt_roundps_epi32(U, A, R) \
- (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U), (int)(R)))
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_cvttps_epi32(__m512 __a)
@@ -3912,19 +3930,19 @@ _mm512_maskz_cvttps_epi32 (__mmask16 __U, __m512 __A)
}
#define _mm512_cvt_roundps_epi32(A, R) \
- (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)-1, (int)(R))
+ ((__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_cvt_roundps_epi32(W, U, A, R) \
- (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
- (__v16si)(__m512i)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_cvt_roundps_epi32(U, A, R) \
- (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtps_epi32 (__m512 __A)
@@ -3955,19 +3973,19 @@ _mm512_maskz_cvtps_epi32 (__mmask16 __U, __m512 __A)
}
#define _mm512_cvt_roundpd_epi32(A, R) \
- (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)-1, (int)(R))
+ ((__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvt_roundpd_epi32(W, U, A, R) \
- (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
- (__v8si)(__m256i)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvt_roundpd_epi32(U, A, R) \
- (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U), (int)(R))
+ ((__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvtpd_epi32 (__m512d __A)
@@ -3999,19 +4017,19 @@ _mm512_maskz_cvtpd_epi32 (__mmask8 __U, __m512d __A)
}
#define _mm512_cvt_roundps_epu32(A, R) \
- (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)-1, (int)(R))
+ ((__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_cvt_roundps_epu32(W, U, A, R) \
- (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
- (__v16si)(__m512i)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_cvt_roundps_epu32(U, A, R) \
- (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtps_epu32 ( __m512 __A)
@@ -4043,19 +4061,19 @@ _mm512_maskz_cvtps_epu32 ( __mmask16 __U, __m512 __A)
}
#define _mm512_cvt_roundpd_epu32(A, R) \
- (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)-1, (int)(R))
+ ((__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvt_roundpd_epu32(W, U, A, R) \
- (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
- (__v8si)(__m256i)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvt_roundpd_epu32(U, A, R) \
- (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U), (int)(R))
+ ((__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvtpd_epu32 (__m512d __A)
@@ -4975,70 +4993,70 @@ _mm512_maskz_rorv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
#define _mm512_cmp_epi32_mask(a, b, p) \
- (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
- (__v16si)(__m512i)(b), (int)(p), \
- (__mmask16)-1)
+ ((__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (int)(p), \
+ (__mmask16)-1))
#define _mm512_cmp_epu32_mask(a, b, p) \
- (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
- (__v16si)(__m512i)(b), (int)(p), \
- (__mmask16)-1)
+ ((__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (int)(p), \
+ (__mmask16)-1))
#define _mm512_cmp_epi64_mask(a, b, p) \
- (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
- (__v8di)(__m512i)(b), (int)(p), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (int)(p), \
+ (__mmask8)-1))
#define _mm512_cmp_epu64_mask(a, b, p) \
- (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
- (__v8di)(__m512i)(b), (int)(p), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (int)(p), \
+ (__mmask8)-1))
#define _mm512_mask_cmp_epi32_mask(m, a, b, p) \
- (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
- (__v16si)(__m512i)(b), (int)(p), \
- (__mmask16)(m))
+ ((__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (int)(p), \
+ (__mmask16)(m)))
#define _mm512_mask_cmp_epu32_mask(m, a, b, p) \
- (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
- (__v16si)(__m512i)(b), (int)(p), \
- (__mmask16)(m))
+ ((__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (int)(p), \
+ (__mmask16)(m)))
#define _mm512_mask_cmp_epi64_mask(m, a, b, p) \
- (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
- (__v8di)(__m512i)(b), (int)(p), \
- (__mmask8)(m))
+ ((__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (int)(p), \
+ (__mmask8)(m)))
#define _mm512_mask_cmp_epu64_mask(m, a, b, p) \
- (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
- (__v8di)(__m512i)(b), (int)(p), \
- (__mmask8)(m))
+ ((__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (int)(p), \
+ (__mmask8)(m)))
#define _mm512_rol_epi32(a, b) \
- (__m512i)__builtin_ia32_prold512((__v16si)(__m512i)(a), (int)(b))
+ ((__m512i)__builtin_ia32_prold512((__v16si)(__m512i)(a), (int)(b)))
#define _mm512_mask_rol_epi32(W, U, a, b) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_rol_epi32((a), (b)), \
- (__v16si)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_rol_epi32((a), (b)), \
+ (__v16si)(__m512i)(W)))
#define _mm512_maskz_rol_epi32(U, a, b) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_rol_epi32((a), (b)), \
- (__v16si)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_rol_epi32((a), (b)), \
+ (__v16si)_mm512_setzero_si512()))
#define _mm512_rol_epi64(a, b) \
- (__m512i)__builtin_ia32_prolq512((__v8di)(__m512i)(a), (int)(b))
+ ((__m512i)__builtin_ia32_prolq512((__v8di)(__m512i)(a), (int)(b)))
#define _mm512_mask_rol_epi64(W, U, a, b) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_rol_epi64((a), (b)), \
- (__v8di)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_rol_epi64((a), (b)), \
+ (__v8di)(__m512i)(W)))
#define _mm512_maskz_rol_epi64(U, a, b) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_rol_epi64((a), (b)), \
- (__v8di)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_rol_epi64((a), (b)), \
+ (__v8di)_mm512_setzero_si512()))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_rolv_epi32 (__m512i __A, __m512i __B)
@@ -5085,35 +5103,35 @@ _mm512_maskz_rolv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
}
#define _mm512_ror_epi32(A, B) \
- (__m512i)__builtin_ia32_prord512((__v16si)(__m512i)(A), (int)(B))
+ ((__m512i)__builtin_ia32_prord512((__v16si)(__m512i)(A), (int)(B)))
#define _mm512_mask_ror_epi32(W, U, A, B) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_ror_epi32((A), (B)), \
- (__v16si)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_ror_epi32((A), (B)), \
+ (__v16si)(__m512i)(W)))
#define _mm512_maskz_ror_epi32(U, A, B) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_ror_epi32((A), (B)), \
- (__v16si)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_ror_epi32((A), (B)), \
+ (__v16si)_mm512_setzero_si512()))
#define _mm512_ror_epi64(A, B) \
- (__m512i)__builtin_ia32_prorq512((__v8di)(__m512i)(A), (int)(B))
+ ((__m512i)__builtin_ia32_prorq512((__v8di)(__m512i)(A), (int)(B)))
#define _mm512_mask_ror_epi64(W, U, A, B) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_ror_epi64((A), (B)), \
- (__v8di)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_ror_epi64((A), (B)), \
+ (__v8di)(__m512i)(W)))
#define _mm512_maskz_ror_epi64(U, A, B) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_ror_epi64((A), (B)), \
- (__v8di)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_ror_epi64((A), (B)), \
+ (__v8di)_mm512_setzero_si512()))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_slli_epi32(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, __B);
+ return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5135,7 +5153,7 @@ _mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_slli_epi64(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, __B);
+ return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5157,7 +5175,7 @@ _mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srli_epi32(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, __B);
+ return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5179,7 +5197,7 @@ _mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srli_epi64(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, __B);
+ return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5304,168 +5322,168 @@ _mm512_maskz_movedup_pd (__mmask8 __U, __m512d __A)
}
#define _mm512_fixupimm_round_pd(A, B, C, imm, R) \
- (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8di)(__m512i)(C), (int)(imm), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_fixupimm_round_pd(A, U, B, C, imm, R) \
- (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8di)(__m512i)(C), (int)(imm), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_fixupimm_pd(A, B, C, imm) \
- (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8di)(__m512i)(C), (int)(imm), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_fixupimm_pd(A, U, B, C, imm) \
- (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8di)(__m512i)(C), (int)(imm), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_fixupimm_round_pd(U, A, B, C, imm, R) \
- (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8di)(__m512i)(C), \
- (int)(imm), (__mmask8)(U), \
- (int)(R))
+ ((__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8di)(__m512i)(C), \
+ (int)(imm), (__mmask8)(U), \
+ (int)(R)))
#define _mm512_maskz_fixupimm_pd(U, A, B, C, imm) \
- (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8di)(__m512i)(C), \
- (int)(imm), (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8di)(__m512i)(C), \
+ (int)(imm), (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_fixupimm_round_ps(A, B, C, imm, R) \
- (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16si)(__m512i)(C), (int)(imm), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16si)(__m512i)(C), (int)(imm), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_fixupimm_round_ps(A, U, B, C, imm, R) \
- (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16si)(__m512i)(C), (int)(imm), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16si)(__m512i)(C), (int)(imm), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_fixupimm_ps(A, B, C, imm) \
- (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16si)(__m512i)(C), (int)(imm), \
- (__mmask16)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16si)(__m512i)(C), (int)(imm), \
+ (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_fixupimm_ps(A, U, B, C, imm) \
- (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16si)(__m512i)(C), (int)(imm), \
- (__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16si)(__m512i)(C), (int)(imm), \
+ (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_fixupimm_round_ps(U, A, B, C, imm, R) \
- (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16si)(__m512i)(C), \
- (int)(imm), (__mmask16)(U), \
- (int)(R))
+ ((__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16si)(__m512i)(C), \
+ (int)(imm), (__mmask16)(U), \
+ (int)(R)))
#define _mm512_maskz_fixupimm_ps(U, A, B, C, imm) \
- (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16si)(__m512i)(C), \
- (int)(imm), (__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16si)(__m512i)(C), \
+ (int)(imm), (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_fixupimm_round_sd(A, B, C, imm, R) \
- (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_fixupimm_round_sd(A, U, B, C, imm, R) \
- (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), (int)(R)))
#define _mm_fixupimm_sd(A, B, C, imm) \
- (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_fixupimm_sd(A, U, B, C, imm) \
- (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_fixupimm_round_sd(U, A, B, C, imm, R) \
- (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
+ ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)(U), (int)(R))
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
-#define _mm_maskz_fixupimm_sd(U, A, B, C, imm) \
- (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
+#define _mm_mask_fixupimm_sd(A, U, B, C, imm) \
+ ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2di)(__m128i)(C), (int)(imm), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_maskz_fixupimm_round_sd(U, A, B, C, imm, R) \
+ ((__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_fixupimm_sd(U, A, B, C, imm) \
+ ((__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_fixupimm_round_ss(A, B, C, imm, R) \
- (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_fixupimm_round_ss(A, U, B, C, imm, R) \
- (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), (int)(R)))
#define _mm_fixupimm_ss(A, B, C, imm) \
- (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_fixupimm_ss(A, U, B, C, imm) \
- (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_fixupimm_round_ss(U, A, B, C, imm, R) \
- (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
+ ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)(U), (int)(R))
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
-#define _mm_maskz_fixupimm_ss(U, A, B, C, imm) \
- (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
+#define _mm_mask_fixupimm_ss(A, U, B, C, imm) \
+ ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4si)(__m128i)(C), (int)(imm), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_maskz_fixupimm_round_ss(U, A, B, C, imm, R) \
+ ((__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_fixupimm_ss(U, A, B, C, imm) \
+ ((__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_getexp_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
@@ -5486,10 +5504,10 @@ _mm_mask_getexp_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_mask_getexp_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B)
@@ -5502,16 +5520,16 @@ _mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_maskz_getexp_round_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
#define _mm_getexp_round_ss(A, B, R) \
- (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_getexp_ss (__m128 __A, __m128 __B)
@@ -5531,10 +5549,10 @@ _mm_mask_getexp_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_mask_getexp_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B)
@@ -5547,100 +5565,100 @@ _mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_maskz_getexp_round_ss(U, A, B, R) \
- (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
#define _mm_getmant_round_sd(A, B, C, D, R) \
- (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (int)(((D)<<2) | (C)), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_getmant_sd(A, B, C, D) \
- (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (int)(((D)<<2) | (C)), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_getmant_sd(W, U, A, B, C, D) \
- (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (int)(((D)<<2) | (C)), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_getmant_round_sd(W, U, A, B, C, D, R) \
- (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (int)(((D)<<2) | (C)), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_getmant_sd(U, A, B, C, D) \
- (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (int)(((D)<<2) | (C)), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_maskz_getmant_round_sd(U, A, B, C, D, R) \
- (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (int)(((D)<<2) | (C)), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
#define _mm_getmant_round_ss(A, B, C, D, R) \
- (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (int)(((D)<<2) | (C)), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_getmant_ss(A, B, C, D) \
- (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (int)(((D)<<2) | (C)), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_getmant_ss(W, U, A, B, C, D) \
- (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (int)(((D)<<2) | (C)), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_getmant_round_ss(W, U, A, B, C, D, R) \
- (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (int)(((D)<<2) | (C)), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_getmant_ss(U, A, B, C, D) \
- (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (int)(((D)<<2) | (C)), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_maskz_getmant_round_ss(U, A, B, C, D, R) \
- (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (int)(((D)<<2) | (C)), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __mmask16 __DEFAULT_FN_ATTRS
_mm512_kmov (__mmask16 __A)
@@ -5649,16 +5667,16 @@ _mm512_kmov (__mmask16 __A)
}
#define _mm_comi_round_sd(A, B, P, R) \
- (int)__builtin_ia32_vcomisd((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), \
- (int)(P), (int)(R))
+ ((int)__builtin_ia32_vcomisd((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), \
+ (int)(P), (int)(R)))
#define _mm_comi_round_ss(A, B, P, R) \
- (int)__builtin_ia32_vcomiss((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \
- (int)(P), (int)(R))
+ ((int)__builtin_ia32_vcomiss((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \
+ (int)(P), (int)(R)))
#ifdef __x86_64__
#define _mm_cvt_roundsd_si64(A, R) \
- (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R))
+ ((long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)))
#endif
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5925,55 +5943,58 @@ _mm512_maskz_srlv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
(__v8di)_mm512_setzero_si512());
}
-#define _mm512_ternarylogic_epi32(A, B, C, imm) \
- (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
- (__v16si)(__m512i)(B), \
- (__v16si)(__m512i)(C), (int)(imm), \
- (__mmask16)-1)
-
-#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) \
- (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
- (__v16si)(__m512i)(B), \
- (__v16si)(__m512i)(C), (int)(imm), \
- (__mmask16)(U))
-
-#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) \
- (__m512i)__builtin_ia32_pternlogd512_maskz((__v16si)(__m512i)(A), \
- (__v16si)(__m512i)(B), \
- (__v16si)(__m512i)(C), \
- (int)(imm), (__mmask16)(U))
-
-#define _mm512_ternarylogic_epi64(A, B, C, imm) \
- (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), \
- (__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)-1)
-
-#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) \
- (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), \
- (__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)(U))
-
-#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) \
- (__m512i)__builtin_ia32_pternlogq512_maskz((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), \
- (__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)(U))
+/// \enum _MM_TERNLOG_ENUM
+/// A helper to represent the ternary logic operations among vector \a A,
+/// \a B and \a C. The representation is passed to \a imm.
+typedef enum {
+ _MM_TERNLOG_A = 0xF0,
+ _MM_TERNLOG_B = 0xCC,
+ _MM_TERNLOG_C = 0xAA
+} _MM_TERNLOG_ENUM;
+
+#define _mm512_ternarylogic_epi32(A, B, C, imm) \
+ ((__m512i)__builtin_ia32_pternlogd512_mask( \
+ (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C), \
+ (unsigned char)(imm), (__mmask16)-1))
+
+#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) \
+ ((__m512i)__builtin_ia32_pternlogd512_mask( \
+ (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C), \
+ (unsigned char)(imm), (__mmask16)(U)))
+
+#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) \
+ ((__m512i)__builtin_ia32_pternlogd512_maskz( \
+ (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C), \
+ (unsigned char)(imm), (__mmask16)(U)))
+
+#define _mm512_ternarylogic_epi64(A, B, C, imm) \
+ ((__m512i)__builtin_ia32_pternlogq512_mask( \
+ (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C), \
+ (unsigned char)(imm), (__mmask8)-1))
+
+#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) \
+ ((__m512i)__builtin_ia32_pternlogq512_mask( \
+ (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
+
+#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) \
+ ((__m512i)__builtin_ia32_pternlogq512_maskz( \
+ (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
#ifdef __x86_64__
#define _mm_cvt_roundsd_i64(A, R) \
- (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R))
+ ((long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)))
#endif
#define _mm_cvt_roundsd_si32(A, R) \
- (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R))
+ ((int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)))
#define _mm_cvt_roundsd_i32(A, R) \
- (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R))
+ ((int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)))
#define _mm_cvt_roundsd_u32(A, R) \
- (unsigned int)__builtin_ia32_vcvtsd2usi32((__v2df)(__m128d)(A), (int)(R))
+ ((unsigned int)__builtin_ia32_vcvtsd2usi32((__v2df)(__m128d)(A), (int)(R)))
static __inline__ unsigned __DEFAULT_FN_ATTRS128
_mm_cvtsd_u32 (__m128d __A)
@@ -5984,8 +6005,8 @@ _mm_cvtsd_u32 (__m128d __A)
#ifdef __x86_64__
#define _mm_cvt_roundsd_u64(A, R) \
- (unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)(__m128d)(A), \
- (int)(R))
+ ((unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)(__m128d)(A), \
+ (int)(R)))
static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
_mm_cvtsd_u64 (__m128d __A)
@@ -5997,21 +6018,21 @@ _mm_cvtsd_u64 (__m128d __A)
#endif
#define _mm_cvt_roundss_si32(A, R) \
- (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R))
+ ((int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)))
#define _mm_cvt_roundss_i32(A, R) \
- (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R))
+ ((int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)))
#ifdef __x86_64__
#define _mm_cvt_roundss_si64(A, R) \
- (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R))
+ ((long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)))
#define _mm_cvt_roundss_i64(A, R) \
- (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R))
+ ((long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)))
#endif
#define _mm_cvt_roundss_u32(A, R) \
- (unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R))
+ ((unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R)))
static __inline__ unsigned __DEFAULT_FN_ATTRS128
_mm_cvtss_u32 (__m128 __A)
@@ -6022,8 +6043,8 @@ _mm_cvtss_u32 (__m128 __A)
#ifdef __x86_64__
#define _mm_cvt_roundss_u64(A, R) \
- (unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)(__m128)(A), \
- (int)(R))
+ ((unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)(__m128)(A), \
+ (int)(R)))
static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
_mm_cvtss_u64 (__m128 __A)
@@ -6035,10 +6056,10 @@ _mm_cvtss_u64 (__m128 __A)
#endif
#define _mm_cvtt_roundsd_i32(A, R) \
- (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R))
+ ((int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)))
#define _mm_cvtt_roundsd_si32(A, R) \
- (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R))
+ ((int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)))
static __inline__ int __DEFAULT_FN_ATTRS128
_mm_cvttsd_i32 (__m128d __A)
@@ -6049,10 +6070,10 @@ _mm_cvttsd_i32 (__m128d __A)
#ifdef __x86_64__
#define _mm_cvtt_roundsd_si64(A, R) \
- (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R))
+ ((long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)))
#define _mm_cvtt_roundsd_i64(A, R) \
- (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R))
+ ((long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)))
static __inline__ long long __DEFAULT_FN_ATTRS128
_mm_cvttsd_i64 (__m128d __A)
@@ -6063,7 +6084,7 @@ _mm_cvttsd_i64 (__m128d __A)
#endif
#define _mm_cvtt_roundsd_u32(A, R) \
- (unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R))
+ ((unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R)))
static __inline__ unsigned __DEFAULT_FN_ATTRS128
_mm_cvttsd_u32 (__m128d __A)
@@ -6074,8 +6095,8 @@ _mm_cvttsd_u32 (__m128d __A)
#ifdef __x86_64__
#define _mm_cvtt_roundsd_u64(A, R) \
- (unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)(__m128d)(A), \
- (int)(R))
+ ((unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)(__m128d)(A), \
+ (int)(R)))
static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
_mm_cvttsd_u64 (__m128d __A)
@@ -6087,10 +6108,10 @@ _mm_cvttsd_u64 (__m128d __A)
#endif
#define _mm_cvtt_roundss_i32(A, R) \
- (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R))
+ ((int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)))
#define _mm_cvtt_roundss_si32(A, R) \
- (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R))
+ ((int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)))
static __inline__ int __DEFAULT_FN_ATTRS128
_mm_cvttss_i32 (__m128 __A)
@@ -6101,10 +6122,10 @@ _mm_cvttss_i32 (__m128 __A)
#ifdef __x86_64__
#define _mm_cvtt_roundss_i64(A, R) \
- (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R))
+ ((long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)))
#define _mm_cvtt_roundss_si64(A, R) \
- (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R))
+ ((long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)))
static __inline__ long long __DEFAULT_FN_ATTRS128
_mm_cvttss_i64 (__m128 __A)
@@ -6115,7 +6136,7 @@ _mm_cvttss_i64 (__m128 __A)
#endif
#define _mm_cvtt_roundss_u32(A, R) \
- (unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R))
+ ((unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R)))
static __inline__ unsigned __DEFAULT_FN_ATTRS128
_mm_cvttss_u32 (__m128 __A)
@@ -6126,8 +6147,8 @@ _mm_cvttss_u32 (__m128 __A)
#ifdef __x86_64__
#define _mm_cvtt_roundss_u64(A, R) \
- (unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)(__m128)(A), \
- (int)(R))
+ ((unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)(__m128)(A), \
+ (int)(R)))
static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
_mm_cvttss_u64 (__m128 __A)
@@ -6139,30 +6160,30 @@ _mm_cvttss_u64 (__m128 __A)
#endif
#define _mm512_permute_pd(X, C) \
- (__m512d)__builtin_ia32_vpermilpd512((__v8df)(__m512d)(X), (int)(C))
+ ((__m512d)__builtin_ia32_vpermilpd512((__v8df)(__m512d)(X), (int)(C)))
#define _mm512_mask_permute_pd(W, U, X, C) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_permute_pd((X), (C)), \
- (__v8df)(__m512d)(W))
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_permute_pd((X), (C)), \
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_permute_pd(U, X, C) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_permute_pd((X), (C)), \
- (__v8df)_mm512_setzero_pd())
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_permute_pd((X), (C)), \
+ (__v8df)_mm512_setzero_pd()))
#define _mm512_permute_ps(X, C) \
- (__m512)__builtin_ia32_vpermilps512((__v16sf)(__m512)(X), (int)(C))
+ ((__m512)__builtin_ia32_vpermilps512((__v16sf)(__m512)(X), (int)(C)))
#define _mm512_mask_permute_ps(W, U, X, C) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__v16sf)_mm512_permute_ps((X), (C)), \
- (__v16sf)(__m512)(W))
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_permute_ps((X), (C)), \
+ (__v16sf)(__m512)(W)))
#define _mm512_maskz_permute_ps(U, X, C) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__v16sf)_mm512_permute_ps((X), (C)), \
- (__v16sf)_mm512_setzero_ps())
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_permute_ps((X), (C)), \
+ (__v16sf)_mm512_setzero_ps()))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_permutevar_pd(__m512d __A, __m512i __C)
@@ -6274,19 +6295,19 @@ _mm512_maskz_permutex2var_ps(__mmask16 __U, __m512 __A, __m512i __I, __m512 __B)
#define _mm512_cvtt_roundpd_epu32(A, R) \
- (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
- (__v8si)_mm256_undefined_si256(), \
- (__mmask8)-1, (int)(R))
+ ((__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_undefined_si256(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvtt_roundpd_epu32(W, U, A, R) \
- (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
- (__v8si)(__m256i)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvtt_roundpd_epu32(U, A, R) \
- (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U), (int)(R))
+ ((__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvttpd_epu32 (__m512d __A)
@@ -6318,106 +6339,106 @@ _mm512_maskz_cvttpd_epu32 (__mmask8 __U, __m512d __A)
}
#define _mm_roundscale_round_sd(A, B, imm, R) \
- (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(imm), \
- (int)(R))
+ ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(imm), \
+ (int)(R)))
#define _mm_roundscale_sd(A, B, imm) \
- (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(imm), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(imm), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_roundscale_sd(W, U, A, B, imm) \
- (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(imm), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(imm), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_roundscale_round_sd(W, U, A, B, I, R) \
- (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(I), \
- (int)(R))
+ ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(I), \
+ (int)(R)))
#define _mm_maskz_roundscale_sd(U, A, B, I) \
- (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(I), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(I), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_maskz_roundscale_round_sd(U, A, B, I, R) \
- (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(I), \
- (int)(R))
+ ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(I), \
+ (int)(R)))
#define _mm_roundscale_round_ss(A, B, imm, R) \
- (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(imm), \
- (int)(R))
+ ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(imm), \
+ (int)(R)))
#define _mm_roundscale_ss(A, B, imm) \
- (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(imm), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(imm), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_roundscale_ss(W, U, A, B, I) \
- (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U), (int)(I), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(I), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_roundscale_round_ss(W, U, A, B, I, R) \
- (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U), (int)(I), \
- (int)(R))
+ ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(I), \
+ (int)(R)))
#define _mm_maskz_roundscale_ss(U, A, B, I) \
- (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(I), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(I), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_maskz_roundscale_round_ss(U, A, B, I, R) \
- (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(I), \
- (int)(R))
+ ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(I), \
+ (int)(R)))
#define _mm512_scalef_round_pd(A, B, R) \
- (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_scalef_round_pd(W, U, A, B, R) \
- (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_scalef_round_pd(U, A, B, R) \
- (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_scalef_pd (__m512d __A, __m512d __B)
@@ -6452,22 +6473,22 @@ _mm512_maskz_scalef_pd (__mmask8 __U, __m512d __A, __m512d __B)
}
#define _mm512_scalef_round_ps(A, B, R) \
- (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_scalef_round_ps(W, U, A, B, R) \
- (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_scalef_round_ps(U, A, B, R) \
- (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_scalef_ps (__m512 __A, __m512 __B)
@@ -6502,10 +6523,10 @@ _mm512_maskz_scalef_ps (__mmask16 __U, __m512 __A, __m512 __B)
}
#define _mm_scalef_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_scalef_sd (__m128d __A, __m128d __B)
@@ -6527,10 +6548,10 @@ _mm_mask_scalef_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_mask_scalef_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_scalef_sd (__mmask8 __U, __m128d __A, __m128d __B)
@@ -6543,16 +6564,16 @@ _mm_maskz_scalef_sd (__mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_maskz_scalef_round_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
#define _mm_scalef_round_ss(A, B, R) \
- (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_scalef_ss (__m128 __A, __m128 __B)
@@ -6574,10 +6595,10 @@ _mm_mask_scalef_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_mask_scalef_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
@@ -6590,16 +6611,16 @@ _mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_maskz_scalef_round_ss(U, A, B, R) \
- (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srai_epi32(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psradi512((__v16si)__A, __B);
+ return (__m512i)__builtin_ia32_psradi512((__v16si)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -6622,7 +6643,7 @@ _mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A,
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srai_epi64(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, __B);
+ return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -6642,94 +6663,94 @@ _mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
}
#define _mm512_shuffle_f32x4(A, B, imm) \
- (__m512)__builtin_ia32_shuf_f32x4((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(imm))
+ ((__m512)__builtin_ia32_shuf_f32x4((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(imm)))
#define _mm512_mask_shuffle_f32x4(W, U, A, B, imm) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
- (__v16sf)(__m512)(W))
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
+ (__v16sf)(__m512)(W)))
#define _mm512_maskz_shuffle_f32x4(U, A, B, imm) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
- (__v16sf)_mm512_setzero_ps())
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
+ (__v16sf)_mm512_setzero_ps()))
#define _mm512_shuffle_f64x2(A, B, imm) \
- (__m512d)__builtin_ia32_shuf_f64x2((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(imm))
+ ((__m512d)__builtin_ia32_shuf_f64x2((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(imm)))
#define _mm512_mask_shuffle_f64x2(W, U, A, B, imm) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
- (__v8df)(__m512d)(W))
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_shuffle_f64x2(U, A, B, imm) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
- (__v8df)_mm512_setzero_pd())
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
+ (__v8df)_mm512_setzero_pd()))
#define _mm512_shuffle_i32x4(A, B, imm) \
- (__m512i)__builtin_ia32_shuf_i32x4((__v16si)(__m512i)(A), \
- (__v16si)(__m512i)(B), (int)(imm))
+ ((__m512i)__builtin_ia32_shuf_i32x4((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), (int)(imm)))
#define _mm512_mask_shuffle_i32x4(W, U, A, B, imm) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
- (__v16si)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
+ (__v16si)(__m512i)(W)))
#define _mm512_maskz_shuffle_i32x4(U, A, B, imm) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
- (__v16si)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
+ (__v16si)_mm512_setzero_si512()))
#define _mm512_shuffle_i64x2(A, B, imm) \
- (__m512i)__builtin_ia32_shuf_i64x2((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), (int)(imm))
+ ((__m512i)__builtin_ia32_shuf_i64x2((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), (int)(imm)))
#define _mm512_mask_shuffle_i64x2(W, U, A, B, imm) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
- (__v8di)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
+ (__v8di)(__m512i)(W)))
#define _mm512_maskz_shuffle_i64x2(U, A, B, imm) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
- (__v8di)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
+ (__v8di)_mm512_setzero_si512()))
#define _mm512_shuffle_pd(A, B, M) \
- (__m512d)__builtin_ia32_shufpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(M))
+ ((__m512d)__builtin_ia32_shufpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(M)))
#define _mm512_mask_shuffle_pd(W, U, A, B, M) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
- (__v8df)(__m512d)(W))
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_shuffle_pd(U, A, B, M) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
- (__v8df)_mm512_setzero_pd())
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
+ (__v8df)_mm512_setzero_pd()))
#define _mm512_shuffle_ps(A, B, M) \
- (__m512)__builtin_ia32_shufps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(M))
+ ((__m512)__builtin_ia32_shufps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(M)))
#define _mm512_mask_shuffle_ps(W, U, A, B, M) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
- (__v16sf)(__m512)(W))
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
+ (__v16sf)(__m512)(W)))
#define _mm512_maskz_shuffle_ps(U, A, B, M) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
- (__v16sf)_mm512_setzero_ps())
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
+ (__v16sf)_mm512_setzero_ps()))
#define _mm_sqrt_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_sqrt_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
@@ -6742,10 +6763,10 @@ _mm_mask_sqrt_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_mask_sqrt_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_sqrt_sd (__mmask8 __U, __m128d __A, __m128d __B)
@@ -6758,16 +6779,16 @@ _mm_maskz_sqrt_sd (__mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_maskz_sqrt_round_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
#define _mm_sqrt_round_ss(A, B, R) \
- (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_sqrt_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
@@ -6780,10 +6801,10 @@ _mm_mask_sqrt_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_mask_sqrt_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B)
@@ -6796,10 +6817,10 @@ _mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_maskz_sqrt_round_ss(U, A, B, R) \
- (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_broadcast_f32x4(__m128 __A)
@@ -7366,183 +7387,183 @@ _mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
}
#define _mm512_extracti32x4_epi32(A, imm) \
- (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v4si)_mm_undefined_si128(), \
- (__mmask8)-1)
+ ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v4si)_mm_undefined_si128(), \
+ (__mmask8)-1))
#define _mm512_mask_extracti32x4_epi32(W, U, A, imm) \
- (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v4si)(__m128i)(W), \
- (__mmask8)(U))
+ ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v4si)(__m128i)(W), \
+ (__mmask8)(U)))
#define _mm512_maskz_extracti32x4_epi32(U, A, imm) \
- (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v4si)_mm_setzero_si128(), \
- (__mmask8)(U))
+ ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v4si)_mm_setzero_si128(), \
+ (__mmask8)(U)))
#define _mm512_extracti64x4_epi64(A, imm) \
- (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
- (__v4di)_mm256_undefined_si256(), \
- (__mmask8)-1)
+ ((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
+ (__v4di)_mm256_undefined_si256(), \
+ (__mmask8)-1))
#define _mm512_mask_extracti64x4_epi64(W, U, A, imm) \
- (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
- (__v4di)(__m256i)(W), \
- (__mmask8)(U))
+ ((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
+ (__v4di)(__m256i)(W), \
+ (__mmask8)(U)))
#define _mm512_maskz_extracti64x4_epi64(U, A, imm) \
- (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
- (__v4di)_mm256_setzero_si256(), \
- (__mmask8)(U))
+ ((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8)(U)))
#define _mm512_insertf64x4(A, B, imm) \
- (__m512d)__builtin_ia32_insertf64x4((__v8df)(__m512d)(A), \
- (__v4df)(__m256d)(B), (int)(imm))
+ ((__m512d)__builtin_ia32_insertf64x4((__v8df)(__m512d)(A), \
+ (__v4df)(__m256d)(B), (int)(imm)))
#define _mm512_mask_insertf64x4(W, U, A, B, imm) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
- (__v8df)(__m512d)(W))
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_insertf64x4(U, A, B, imm) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
- (__v8df)_mm512_setzero_pd())
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
+ (__v8df)_mm512_setzero_pd()))
#define _mm512_inserti64x4(A, B, imm) \
- (__m512i)__builtin_ia32_inserti64x4((__v8di)(__m512i)(A), \
- (__v4di)(__m256i)(B), (int)(imm))
+ ((__m512i)__builtin_ia32_inserti64x4((__v8di)(__m512i)(A), \
+ (__v4di)(__m256i)(B), (int)(imm)))
#define _mm512_mask_inserti64x4(W, U, A, B, imm) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
- (__v8di)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
+ (__v8di)(__m512i)(W)))
#define _mm512_maskz_inserti64x4(U, A, B, imm) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
- (__v8di)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
+ (__v8di)_mm512_setzero_si512()))
#define _mm512_insertf32x4(A, B, imm) \
- (__m512)__builtin_ia32_insertf32x4((__v16sf)(__m512)(A), \
- (__v4sf)(__m128)(B), (int)(imm))
+ ((__m512)__builtin_ia32_insertf32x4((__v16sf)(__m512)(A), \
+ (__v4sf)(__m128)(B), (int)(imm)))
#define _mm512_mask_insertf32x4(W, U, A, B, imm) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
- (__v16sf)(__m512)(W))
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
+ (__v16sf)(__m512)(W)))
#define _mm512_maskz_insertf32x4(U, A, B, imm) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
- (__v16sf)_mm512_setzero_ps())
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
+ (__v16sf)_mm512_setzero_ps()))
#define _mm512_inserti32x4(A, B, imm) \
- (__m512i)__builtin_ia32_inserti32x4((__v16si)(__m512i)(A), \
- (__v4si)(__m128i)(B), (int)(imm))
+ ((__m512i)__builtin_ia32_inserti32x4((__v16si)(__m512i)(A), \
+ (__v4si)(__m128i)(B), (int)(imm)))
#define _mm512_mask_inserti32x4(W, U, A, B, imm) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
- (__v16si)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
+ (__v16si)(__m512i)(W)))
#define _mm512_maskz_inserti32x4(U, A, B, imm) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
- (__v16si)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
+ (__v16si)_mm512_setzero_si512()))
#define _mm512_getmant_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
- (int)(((C)<<2) | (B)), \
- (__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_getmant_round_pd(W, U, A, B, C, R) \
- (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
- (int)(((C)<<2) | (B)), \
- (__v8df)(__m512d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_getmant_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
- (int)(((C)<<2) | (B)), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_getmant_pd(A, B, C) \
- (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
- (int)(((C)<<2) | (B)), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_getmant_pd(W, U, A, B, C) \
- (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
- (int)(((C)<<2) | (B)), \
- (__v8df)(__m512d)(W), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_getmant_pd(U, A, B, C) \
- (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
- (int)(((C)<<2) | (B)), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_getmant_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
- (int)(((C)<<2) | (B)), \
- (__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_getmant_round_ps(W, U, A, B, C, R) \
- (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
- (int)(((C)<<2) | (B)), \
- (__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_getmant_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
- (int)(((C)<<2) | (B)), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_getmant_ps(A, B, C) \
- (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
- (int)(((C)<<2)|(B)), \
- (__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+ (int)(((C)<<2)|(B)), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_getmant_ps(W, U, A, B, C) \
- (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
- (int)(((C)<<2)|(B)), \
- (__v16sf)(__m512)(W), \
- (__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+ (int)(((C)<<2)|(B)), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_getmant_ps(U, A, B, C) \
- (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
- (int)(((C)<<2)|(B)), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+ (int)(((C)<<2)|(B)), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_getexp_round_pd(A, R) \
- (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_getexp_round_pd(W, U, A, R) \
- (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_getexp_round_pd(U, A, R) \
- (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_getexp_pd (__m512d __A)
@@ -7572,19 +7593,19 @@ _mm512_maskz_getexp_pd (__mmask8 __U, __m512d __A)
}
#define _mm512_getexp_round_ps(A, R) \
- (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_getexp_round_ps(W, U, A, R) \
- (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_getexp_round_ps(U, A, R) \
- (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_getexp_ps (__m512 __A)
@@ -7614,100 +7635,100 @@ _mm512_maskz_getexp_ps (__mmask16 __U, __m512 __A)
}
#define _mm512_i64gather_ps(index, addr, scale) \
- (__m256)__builtin_ia32_gatherdiv16sf((__v8sf)_mm256_undefined_ps(), \
- (void const *)(addr), \
- (__v8di)(__m512i)(index), (__mmask8)-1, \
- (int)(scale))
+ ((__m256)__builtin_ia32_gatherdiv16sf((__v8sf)_mm256_undefined_ps(), \
+ (void const *)(addr), \
+ (__v8di)(__m512i)(index), (__mmask8)-1, \
+ (int)(scale)))
#define _mm512_mask_i64gather_ps(v1_old, mask, index, addr, scale) \
- (__m256)__builtin_ia32_gatherdiv16sf((__v8sf)(__m256)(v1_old),\
- (void const *)(addr), \
- (__v8di)(__m512i)(index), \
- (__mmask8)(mask), (int)(scale))
-
-#define _mm512_i64gather_epi32(index, addr, scale) \
- (__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_si256(), \
+ ((__m256)__builtin_ia32_gatherdiv16sf((__v8sf)(__m256)(v1_old),\
(void const *)(addr), \
(__v8di)(__m512i)(index), \
- (__mmask8)-1, (int)(scale))
+ (__mmask8)(mask), (int)(scale)))
+
+#define _mm512_i64gather_epi32(index, addr, scale) \
+ ((__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_si256(), \
+ (void const *)(addr), \
+ (__v8di)(__m512i)(index), \
+ (__mmask8)-1, (int)(scale)))
#define _mm512_mask_i64gather_epi32(v1_old, mask, index, addr, scale) \
- (__m256i)__builtin_ia32_gatherdiv16si((__v8si)(__m256i)(v1_old), \
- (void const *)(addr), \
- (__v8di)(__m512i)(index), \
- (__mmask8)(mask), (int)(scale))
+ ((__m256i)__builtin_ia32_gatherdiv16si((__v8si)(__m256i)(v1_old), \
+ (void const *)(addr), \
+ (__v8di)(__m512i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm512_i64gather_pd(index, addr, scale) \
- (__m512d)__builtin_ia32_gatherdiv8df((__v8df)_mm512_undefined_pd(), \
- (void const *)(addr), \
- (__v8di)(__m512i)(index), (__mmask8)-1, \
- (int)(scale))
+ ((__m512d)__builtin_ia32_gatherdiv8df((__v8df)_mm512_undefined_pd(), \
+ (void const *)(addr), \
+ (__v8di)(__m512i)(index), (__mmask8)-1, \
+ (int)(scale)))
#define _mm512_mask_i64gather_pd(v1_old, mask, index, addr, scale) \
- (__m512d)__builtin_ia32_gatherdiv8df((__v8df)(__m512d)(v1_old), \
- (void const *)(addr), \
- (__v8di)(__m512i)(index), \
- (__mmask8)(mask), (int)(scale))
+ ((__m512d)__builtin_ia32_gatherdiv8df((__v8df)(__m512d)(v1_old), \
+ (void const *)(addr), \
+ (__v8di)(__m512i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm512_i64gather_epi64(index, addr, scale) \
- (__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_epi32(), \
- (void const *)(addr), \
- (__v8di)(__m512i)(index), (__mmask8)-1, \
- (int)(scale))
+ ((__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_epi32(), \
+ (void const *)(addr), \
+ (__v8di)(__m512i)(index), (__mmask8)-1, \
+ (int)(scale)))
#define _mm512_mask_i64gather_epi64(v1_old, mask, index, addr, scale) \
- (__m512i)__builtin_ia32_gatherdiv8di((__v8di)(__m512i)(v1_old), \
- (void const *)(addr), \
- (__v8di)(__m512i)(index), \
- (__mmask8)(mask), (int)(scale))
+ ((__m512i)__builtin_ia32_gatherdiv8di((__v8di)(__m512i)(v1_old), \
+ (void const *)(addr), \
+ (__v8di)(__m512i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm512_i32gather_ps(index, addr, scale) \
- (__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \
- (void const *)(addr), \
- (__v16si)(__m512)(index), \
- (__mmask16)-1, (int)(scale))
+ ((__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \
+ (void const *)(addr), \
+ (__v16si)(__m512)(index), \
+ (__mmask16)-1, (int)(scale)))
#define _mm512_mask_i32gather_ps(v1_old, mask, index, addr, scale) \
- (__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \
- (void const *)(addr), \
- (__v16si)(__m512)(index), \
- (__mmask16)(mask), (int)(scale))
+ ((__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \
+ (void const *)(addr), \
+ (__v16si)(__m512)(index), \
+ (__mmask16)(mask), (int)(scale)))
#define _mm512_i32gather_epi32(index, addr, scale) \
- (__m512i)__builtin_ia32_gathersiv16si((__v16si)_mm512_undefined_epi32(), \
- (void const *)(addr), \
- (__v16si)(__m512i)(index), \
- (__mmask16)-1, (int)(scale))
+ ((__m512i)__builtin_ia32_gathersiv16si((__v16si)_mm512_undefined_epi32(), \
+ (void const *)(addr), \
+ (__v16si)(__m512i)(index), \
+ (__mmask16)-1, (int)(scale)))
#define _mm512_mask_i32gather_epi32(v1_old, mask, index, addr, scale) \
- (__m512i)__builtin_ia32_gathersiv16si((__v16si)(__m512i)(v1_old), \
- (void const *)(addr), \
- (__v16si)(__m512i)(index), \
- (__mmask16)(mask), (int)(scale))
+ ((__m512i)__builtin_ia32_gathersiv16si((__v16si)(__m512i)(v1_old), \
+ (void const *)(addr), \
+ (__v16si)(__m512i)(index), \
+ (__mmask16)(mask), (int)(scale)))
#define _mm512_i32gather_pd(index, addr, scale) \
- (__m512d)__builtin_ia32_gathersiv8df((__v8df)_mm512_undefined_pd(), \
- (void const *)(addr), \
- (__v8si)(__m256i)(index), (__mmask8)-1, \
- (int)(scale))
+ ((__m512d)__builtin_ia32_gathersiv8df((__v8df)_mm512_undefined_pd(), \
+ (void const *)(addr), \
+ (__v8si)(__m256i)(index), (__mmask8)-1, \
+ (int)(scale)))
#define _mm512_mask_i32gather_pd(v1_old, mask, index, addr, scale) \
- (__m512d)__builtin_ia32_gathersiv8df((__v8df)(__m512d)(v1_old), \
- (void const *)(addr), \
- (__v8si)(__m256i)(index), \
- (__mmask8)(mask), (int)(scale))
+ ((__m512d)__builtin_ia32_gathersiv8df((__v8df)(__m512d)(v1_old), \
+ (void const *)(addr), \
+ (__v8si)(__m256i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm512_i32gather_epi64(index, addr, scale) \
- (__m512i)__builtin_ia32_gathersiv8di((__v8di)_mm512_undefined_epi32(), \
- (void const *)(addr), \
- (__v8si)(__m256i)(index), (__mmask8)-1, \
- (int)(scale))
+ ((__m512i)__builtin_ia32_gathersiv8di((__v8di)_mm512_undefined_epi32(), \
+ (void const *)(addr), \
+ (__v8si)(__m256i)(index), (__mmask8)-1, \
+ (int)(scale)))
#define _mm512_mask_i32gather_epi64(v1_old, mask, index, addr, scale) \
- (__m512i)__builtin_ia32_gathersiv8di((__v8di)(__m512i)(v1_old), \
- (void const *)(addr), \
- (__v8si)(__m256i)(index), \
- (__mmask8)(mask), (int)(scale))
+ ((__m512i)__builtin_ia32_gathersiv8di((__v8di)(__m512i)(v1_old), \
+ (void const *)(addr), \
+ (__v8si)(__m256i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm512_i64scatter_ps(addr, index, v1, scale) \
__builtin_ia32_scatterdiv16sf((void *)(addr), (__mmask8)-1, \
@@ -7800,16 +7821,16 @@ _mm_mask_fmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_fmadd_round_ss(A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(C), (__mmask8)-1, \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(C), (__mmask8)-1, \
+ (int)(R)))
#define _mm_mask_fmadd_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
- (__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
+ (__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
@@ -7822,10 +7843,10 @@ _mm_maskz_fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
#define _mm_maskz_fmadd_round_ss(U, A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(C), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(C), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
@@ -7838,10 +7859,10 @@ _mm_mask3_fmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
}
#define _mm_mask3_fmadd_round_ss(W, X, Y, U, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
- (__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
+ (__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
@@ -7854,16 +7875,16 @@ _mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_fmsub_round_ss(A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- -(__v4sf)(__m128)(C), (__mmask8)-1, \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ -(__v4sf)(__m128)(C), (__mmask8)-1, \
+ (int)(R)))
#define _mm_mask_fmsub_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
- (__v4sf)(__m128)(A), \
- -(__v4sf)(__m128)(B), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
+ (__v4sf)(__m128)(A), \
+ -(__v4sf)(__m128)(B), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
@@ -7876,10 +7897,10 @@ _mm_maskz_fmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
#define _mm_maskz_fmsub_round_ss(U, A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- -(__v4sf)(__m128)(C), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ -(__v4sf)(__m128)(C), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
@@ -7892,10 +7913,10 @@ _mm_mask3_fmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
}
#define _mm_mask3_fmsub_round_ss(W, X, Y, U, R) \
- (__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
- (__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
+ (__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
@@ -7908,16 +7929,16 @@ _mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_fnmadd_round_ss(A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
- -(__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(C), (__mmask8)-1, \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+ -(__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(C), (__mmask8)-1, \
+ (int)(R)))
#define _mm_mask_fnmadd_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
- -(__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
+ -(__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
@@ -7930,10 +7951,10 @@ _mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
#define _mm_maskz_fnmadd_round_ss(U, A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
- -(__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(C), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
+ -(__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(C), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fnmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
@@ -7946,10 +7967,10 @@ _mm_mask3_fnmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
}
#define _mm_mask3_fnmadd_round_ss(W, X, Y, U, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
- -(__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
+ -(__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
@@ -7962,16 +7983,16 @@ _mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_fnmsub_round_ss(A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
- -(__v4sf)(__m128)(B), \
- -(__v4sf)(__m128)(C), (__mmask8)-1, \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+ -(__v4sf)(__m128)(B), \
+ -(__v4sf)(__m128)(C), (__mmask8)-1, \
+ (int)(R)))
#define _mm_mask_fnmsub_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
- -(__v4sf)(__m128)(A), \
- -(__v4sf)(__m128)(B), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
+ -(__v4sf)(__m128)(A), \
+ -(__v4sf)(__m128)(B), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
@@ -7984,10 +8005,10 @@ _mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
#define _mm_maskz_fnmsub_round_ss(U, A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
- -(__v4sf)(__m128)(B), \
- -(__v4sf)(__m128)(C), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
+ -(__v4sf)(__m128)(B), \
+ -(__v4sf)(__m128)(C), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fnmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
@@ -8000,10 +8021,10 @@ _mm_mask3_fnmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
}
#define _mm_mask3_fnmsub_round_ss(W, X, Y, U, R) \
- (__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
- -(__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
+ -(__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
@@ -8016,16 +8037,16 @@ _mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_fmadd_round_sd(A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(C), (__mmask8)-1, \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(C), (__mmask8)-1, \
+ (int)(R)))
#define _mm_mask_fmadd_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
- (__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (__mmask8)(U), \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
+ (__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
@@ -8038,10 +8059,10 @@ _mm_maskz_fmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
}
#define _mm_maskz_fmadd_round_sd(U, A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(C), (__mmask8)(U), \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(C), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
@@ -8054,10 +8075,10 @@ _mm_mask3_fmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
}
#define _mm_mask3_fmadd_round_sd(W, X, Y, U, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
- (__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (__mmask8)(U), \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
+ (__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
@@ -8070,16 +8091,16 @@ _mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_fmsub_round_sd(A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- -(__v2df)(__m128d)(C), (__mmask8)-1, \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ -(__v2df)(__m128d)(C), (__mmask8)-1, \
+ (int)(R)))
#define _mm_mask_fmsub_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
- (__v2df)(__m128d)(A), \
- -(__v2df)(__m128d)(B), (__mmask8)(U), \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
+ (__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
@@ -8092,10 +8113,10 @@ _mm_maskz_fmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
}
#define _mm_maskz_fmsub_round_sd(U, A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- -(__v2df)(__m128d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ -(__v2df)(__m128d)(C), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
@@ -8108,10 +8129,10 @@ _mm_mask3_fmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
}
#define _mm_mask3_fmsub_round_sd(W, X, Y, U, R) \
- (__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
- (__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
+ (__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
@@ -8124,16 +8145,16 @@ _mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_fnmadd_round_sd(A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
- -(__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(C), (__mmask8)-1, \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(C), (__mmask8)-1, \
+ (int)(R)))
#define _mm_mask_fnmadd_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
- -(__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (__mmask8)(U), \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
+ -(__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fnmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
@@ -8146,10 +8167,10 @@ _mm_maskz_fnmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
}
#define _mm_maskz_fnmadd_round_sd(U, A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
- -(__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(C), (__mmask8)(U), \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(C), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fnmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
@@ -8162,10 +8183,10 @@ _mm_mask3_fnmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
}
#define _mm_mask3_fnmadd_round_sd(W, X, Y, U, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
- -(__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (__mmask8)(U), \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
+ -(__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
@@ -8178,16 +8199,16 @@ _mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_fnmsub_round_sd(A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
- -(__v2df)(__m128d)(B), \
- -(__v2df)(__m128d)(C), (__mmask8)-1, \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), \
+ -(__v2df)(__m128d)(C), (__mmask8)-1, \
+ (int)(R)))
#define _mm_mask_fnmsub_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
- -(__v2df)(__m128d)(A), \
- -(__v2df)(__m128d)(B), (__mmask8)(U), \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
+ -(__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
@@ -8200,11 +8221,11 @@ _mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
}
#define _mm_maskz_fnmsub_round_sd(U, A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
- -(__v2df)(__m128d)(B), \
- -(__v2df)(__m128d)(C), \
- (__mmask8)(U), \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), \
+ -(__v2df)(__m128d)(C), \
+ (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fnmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
@@ -8217,36 +8238,36 @@ _mm_mask3_fnmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
}
#define _mm_mask3_fnmsub_round_sd(W, X, Y, U, R) \
- (__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
- -(__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
+ -(__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_permutex_pd(X, C) \
- (__m512d)__builtin_ia32_permdf512((__v8df)(__m512d)(X), (int)(C))
+ ((__m512d)__builtin_ia32_permdf512((__v8df)(__m512d)(X), (int)(C)))
#define _mm512_mask_permutex_pd(W, U, X, C) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_permutex_pd((X), (C)), \
- (__v8df)(__m512d)(W))
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_permutex_pd((X), (C)), \
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_permutex_pd(U, X, C) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_permutex_pd((X), (C)), \
- (__v8df)_mm512_setzero_pd())
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_permutex_pd((X), (C)), \
+ (__v8df)_mm512_setzero_pd()))
#define _mm512_permutex_epi64(X, C) \
- (__m512i)__builtin_ia32_permdi512((__v8di)(__m512i)(X), (int)(C))
+ ((__m512i)__builtin_ia32_permdi512((__v8di)(__m512i)(X), (int)(C)))
#define _mm512_mask_permutex_epi64(W, U, X, C) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_permutex_epi64((X), (C)), \
- (__v8di)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_permutex_epi64((X), (C)), \
+ (__v8di)(__m512i)(W)))
#define _mm512_maskz_permutex_epi64(U, X, C) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_permutex_epi64((X), (C)), \
- (__v8di)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_permutex_epi64((X), (C)), \
+ (__v8di)_mm512_setzero_si512()))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_permutexvar_pd (__m512i __X, __m512d __Y)
@@ -8416,10 +8437,10 @@ _mm512_kxor (__mmask16 __A, __mmask16 __B)
#define _kxor_mask16 _mm512_kxor
#define _kshiftli_mask16(A, I) \
- (__mmask16)__builtin_ia32_kshiftlihi((__mmask16)(A), (unsigned int)(I))
+ ((__mmask16)__builtin_ia32_kshiftlihi((__mmask16)(A), (unsigned int)(I)))
#define _kshiftri_mask16(A, I) \
- (__mmask16)__builtin_ia32_kshiftrihi((__mmask16)(A), (unsigned int)(I))
+ ((__mmask16)__builtin_ia32_kshiftrihi((__mmask16)(A), (unsigned int)(I)))
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_cvtmask16_u32(__mmask16 __A) {
@@ -8538,48 +8559,48 @@ _mm512_maskz_compress_epi32 (__mmask16 __U, __m512i __A)
}
#define _mm_cmp_round_ss_mask(X, Y, P, R) \
- (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (int)(P), \
- (__mmask8)-1, (int)(R))
+ ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (int)(P), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_cmp_round_ss_mask(M, X, Y, P, R) \
- (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (int)(P), \
- (__mmask8)(M), (int)(R))
+ ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (int)(P), \
+ (__mmask8)(M), (int)(R)))
#define _mm_cmp_ss_mask(X, Y, P) \
- (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (int)(P), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (int)(P), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_cmp_ss_mask(M, X, Y, P) \
- (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (int)(P), \
- (__mmask8)(M), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (int)(P), \
+ (__mmask8)(M), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_cmp_round_sd_mask(X, Y, P, R) \
- (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (int)(P), \
- (__mmask8)-1, (int)(R))
+ ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (int)(P), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_cmp_round_sd_mask(M, X, Y, P, R) \
- (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (int)(P), \
- (__mmask8)(M), (int)(R))
+ ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (int)(P), \
+ (__mmask8)(M), (int)(R)))
#define _mm_cmp_sd_mask(X, Y, P) \
- (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (int)(P), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (int)(P), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_cmp_sd_mask(M, X, Y, P) \
- (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (int)(P), \
- (__mmask8)(M), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (int)(P), \
+ (__mmask8)(M), \
+ _MM_FROUND_CUR_DIRECTION))
/* Bit Test */
@@ -8760,17 +8781,17 @@ _mm_maskz_load_sd (__mmask8 __U, const double* __A)
}
#define _mm512_shuffle_epi32(A, I) \
- (__m512i)__builtin_ia32_pshufd512((__v16si)(__m512i)(A), (int)(I))
+ ((__m512i)__builtin_ia32_pshufd512((__v16si)(__m512i)(A), (int)(I)))
#define _mm512_mask_shuffle_epi32(W, U, A, I) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_shuffle_epi32((A), (I)), \
- (__v16si)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_shuffle_epi32((A), (I)), \
+ (__v16si)(__m512i)(W)))
#define _mm512_maskz_shuffle_epi32(U, A, I) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_shuffle_epi32((A), (I)), \
- (__v16si)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_shuffle_epi32((A), (I)), \
+ (__v16si)_mm512_setzero_si512()))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_expand_pd (__m512d __W, __mmask8 __U, __m512d __A)
@@ -8901,19 +8922,19 @@ _mm512_maskz_expand_epi32 (__mmask16 __U, __m512i __A)
}
#define _mm512_cvt_roundps_pd(A, R) \
- (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
- (__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvt_roundps_pd(W, U, A, R) \
- (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
- (__v8df)(__m512d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvt_roundps_pd(U, A, R) \
- (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_cvtps_pd (__m256 __A)
@@ -9010,22 +9031,22 @@ _mm512_mask_compressstoreu_epi32 (void *__P, __mmask16 __U, __m512i __A)
}
#define _mm_cvt_roundsd_ss(A, B, R) \
- (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
- (__v2df)(__m128d)(B), \
- (__v4sf)_mm_undefined_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v4sf)_mm_undefined_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_cvt_roundsd_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
- (__v2df)(__m128d)(B), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_cvt_roundsd_ss(U, A, B, R) \
- (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
- (__v2df)(__m128d)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_cvtsd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128d __B)
@@ -9058,47 +9079,47 @@ _mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B)
#ifdef __x86_64__
#define _mm_cvt_roundi64_sd(A, B, R) \
- (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
- (int)(R))
+ ((__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
+ (int)(R)))
#define _mm_cvt_roundsi64_sd(A, B, R) \
- (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
- (int)(R))
+ ((__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
+ (int)(R)))
#endif
#define _mm_cvt_roundsi32_ss(A, B, R) \
- (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R))
+ ((__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)))
#define _mm_cvt_roundi32_ss(A, B, R) \
- (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R))
+ ((__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)))
#ifdef __x86_64__
#define _mm_cvt_roundsi64_ss(A, B, R) \
- (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
- (int)(R))
+ ((__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
+ (int)(R)))
#define _mm_cvt_roundi64_ss(A, B, R) \
- (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
- (int)(R))
+ ((__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
+ (int)(R)))
#endif
#define _mm_cvt_roundss_sd(A, B, R) \
- (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
- (__v4sf)(__m128)(B), \
- (__v2df)_mm_undefined_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v2df)_mm_undefined_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_cvt_roundss_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
- (__v4sf)(__m128)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_cvt_roundss_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
- (__v4sf)(__m128)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_cvtss_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128 __B)
@@ -9127,8 +9148,8 @@ _mm_cvtu32_sd (__m128d __A, unsigned __B)
#ifdef __x86_64__
#define _mm_cvt_roundu64_sd(A, B, R) \
- (__m128d)__builtin_ia32_cvtusi2sd64((__v2df)(__m128d)(A), \
- (unsigned long long)(B), (int)(R))
+ ((__m128d)__builtin_ia32_cvtusi2sd64((__v2df)(__m128d)(A), \
+ (unsigned long long)(B), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_cvtu64_sd (__m128d __A, unsigned long long __B)
@@ -9139,8 +9160,8 @@ _mm_cvtu64_sd (__m128d __A, unsigned long long __B)
#endif
#define _mm_cvt_roundu32_ss(A, B, R) \
- (__m128)__builtin_ia32_cvtusi2ss32((__v4sf)(__m128)(A), (unsigned int)(B), \
- (int)(R))
+ ((__m128)__builtin_ia32_cvtusi2ss32((__v4sf)(__m128)(A), (unsigned int)(B), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_cvtu32_ss (__m128 __A, unsigned __B)
@@ -9151,8 +9172,8 @@ _mm_cvtu32_ss (__m128 __A, unsigned __B)
#ifdef __x86_64__
#define _mm_cvt_roundu64_ss(A, B, R) \
- (__m128)__builtin_ia32_cvtusi2ss64((__v4sf)(__m128)(A), \
- (unsigned long long)(B), (int)(R))
+ ((__m128)__builtin_ia32_cvtusi2ss64((__v4sf)(__m128)(A), \
+ (unsigned long long)(B), (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_cvtu64_ss (__m128 __A, unsigned long long __B)
@@ -9312,43 +9333,43 @@ _mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A)
*/
static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_add_epi64(__m512i __W) {
- return __builtin_ia32_reduce_add_q512(__W);
+ return __builtin_reduce_add((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_epi64(__m512i __W) {
- return __builtin_ia32_reduce_mul_q512(__W);
+ return __builtin_reduce_mul((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_and_epi64(__m512i __W) {
- return __builtin_ia32_reduce_and_q512(__W);
+ return __builtin_reduce_and((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_or_epi64(__m512i __W) {
- return __builtin_ia32_reduce_or_q512(__W);
+ return __builtin_reduce_or((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_add_epi64(__mmask8 __M, __m512i __W) {
__W = _mm512_maskz_mov_epi64(__M, __W);
- return __builtin_ia32_reduce_add_q512(__W);
+ return __builtin_reduce_add((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_mul_epi64(__mmask8 __M, __m512i __W) {
__W = _mm512_mask_mov_epi64(_mm512_set1_epi64(1), __M, __W);
- return __builtin_ia32_reduce_mul_q512(__W);
+ return __builtin_reduce_mul((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W) {
- __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __W);
- return __builtin_ia32_reduce_and_q512(__W);
+ __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(-1LL), __M, __W);
+ return __builtin_reduce_and((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) {
__W = _mm512_maskz_mov_epi64(__M, __W);
- return __builtin_ia32_reduce_or_q512(__W);
+ return __builtin_reduce_or((__v8di)__W);
}
// -0.0 is used to ignore the start value since it is the neutral value of
@@ -9376,46 +9397,46 @@ _mm512_mask_reduce_mul_pd(__mmask8 __M, __m512d __W) {
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_add_epi32(__m512i __W) {
- return __builtin_ia32_reduce_add_d512((__v16si)__W);
+ return __builtin_reduce_add((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_mul_epi32(__m512i __W) {
- return __builtin_ia32_reduce_mul_d512((__v16si)__W);
+ return __builtin_reduce_mul((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_and_epi32(__m512i __W) {
- return __builtin_ia32_reduce_and_d512((__v16si)__W);
+ return __builtin_reduce_and((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_or_epi32(__m512i __W) {
- return __builtin_ia32_reduce_or_d512((__v16si)__W);
+ return __builtin_reduce_or((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_add_epi32( __mmask16 __M, __m512i __W) {
__W = _mm512_maskz_mov_epi32(__M, __W);
- return __builtin_ia32_reduce_add_d512((__v16si)__W);
+ return __builtin_reduce_add((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_mul_epi32( __mmask16 __M, __m512i __W) {
__W = _mm512_mask_mov_epi32(_mm512_set1_epi32(1), __M, __W);
- return __builtin_ia32_reduce_mul_d512((__v16si)__W);
+ return __builtin_reduce_mul((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_and_epi32( __mmask16 __M, __m512i __W) {
- __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __W);
- return __builtin_ia32_reduce_and_d512((__v16si)__W);
+ __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(-1), __M, __W);
+ return __builtin_reduce_and((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W) {
__W = _mm512_maskz_mov_epi32(__M, __W);
- return __builtin_ia32_reduce_or_d512((__v16si)__W);
+ return __builtin_reduce_or((__v16si)__W);
}
static __inline__ float __DEFAULT_FN_ATTRS512
@@ -9442,89 +9463,89 @@ _mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W) {
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_reduce_max_epi64(__m512i __V) {
- return __builtin_ia32_reduce_smax_q512(__V);
+ return __builtin_reduce_max((__v8di)__V);
}
static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_reduce_max_epu64(__m512i __V) {
- return __builtin_ia32_reduce_umax_q512(__V);
+ return __builtin_reduce_max((__v8du)__V);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_reduce_min_epi64(__m512i __V) {
- return __builtin_ia32_reduce_smin_q512(__V);
+ return __builtin_reduce_min((__v8di)__V);
}
static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_reduce_min_epu64(__m512i __V) {
- return __builtin_ia32_reduce_umin_q512(__V);
+ return __builtin_reduce_min((__v8du)__V);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __V) {
__V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-__LONG_LONG_MAX__ - 1LL), __M, __V);
- return __builtin_ia32_reduce_smax_q512(__V);
+ return __builtin_reduce_max((__v8di)__V);
}
static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __V) {
__V = _mm512_maskz_mov_epi64(__M, __V);
- return __builtin_ia32_reduce_umax_q512(__V);
+ return __builtin_reduce_max((__v8du)__V);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __V) {
__V = _mm512_mask_mov_epi64(_mm512_set1_epi64(__LONG_LONG_MAX__), __M, __V);
- return __builtin_ia32_reduce_smin_q512(__V);
+ return __builtin_reduce_min((__v8di)__V);
}
static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) {
- __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __V);
- return __builtin_ia32_reduce_umin_q512(__V);
+ __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-1LL), __M, __V);
+ return __builtin_reduce_min((__v8du)__V);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_max_epi32(__m512i __V) {
- return __builtin_ia32_reduce_smax_d512((__v16si)__V);
+ return __builtin_reduce_max((__v16si)__V);
}
static __inline__ unsigned int __DEFAULT_FN_ATTRS512
_mm512_reduce_max_epu32(__m512i __V) {
- return __builtin_ia32_reduce_umax_d512((__v16si)__V);
+ return __builtin_reduce_max((__v16su)__V);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_min_epi32(__m512i __V) {
- return __builtin_ia32_reduce_smin_d512((__v16si)__V);
+ return __builtin_reduce_min((__v16si)__V);
}
static __inline__ unsigned int __DEFAULT_FN_ATTRS512
_mm512_reduce_min_epu32(__m512i __V) {
- return __builtin_ia32_reduce_umin_d512((__v16si)__V);
+ return __builtin_reduce_min((__v16su)__V);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __V) {
__V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-__INT_MAX__ - 1), __M, __V);
- return __builtin_ia32_reduce_smax_d512((__v16si)__V);
+ return __builtin_reduce_max((__v16si)__V);
}
static __inline__ unsigned int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __V) {
__V = _mm512_maskz_mov_epi32(__M, __V);
- return __builtin_ia32_reduce_umax_d512((__v16si)__V);
+ return __builtin_reduce_max((__v16su)__V);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __V) {
__V = _mm512_mask_mov_epi32(_mm512_set1_epi32(__INT_MAX__), __M, __V);
- return __builtin_ia32_reduce_smin_d512((__v16si)__V);
+ return __builtin_reduce_min((__v16si)__V);
}
static __inline__ unsigned int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) {
- __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __V);
- return __builtin_ia32_reduce_umin_d512((__v16si)__V);
+ __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-1), __M, __V);
+ return __builtin_reduce_min((__v16su)__V);
}
static __inline__ double __DEFAULT_FN_ATTRS512
@@ -9594,7 +9615,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VGATHERDPD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
@@ -9602,7 +9623,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
/// dst[i+63:i] := MEM[addr+63:addr]
/// ENDFOR
/// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
#define _mm512_i32logather_pd(vindex, base_addr, scale) \
_mm512_i32gather_pd(_mm512_castsi512_si256(vindex), (base_addr), (scale))
@@ -9614,7 +9635,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VGATHERDPD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
@@ -9626,7 +9647,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
/// FI
/// ENDFOR
/// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
#define _mm512_mask_i32logather_pd(src, mask, vindex, base_addr, scale) \
_mm512_mask_i32gather_pd((src), (mask), _mm512_castsi512_si256(vindex), \
(base_addr), (scale))
@@ -9637,7 +9658,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VPGATHERDQ </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
@@ -9645,7 +9666,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
/// dst[i+63:i] := MEM[addr+63:addr]
/// ENDFOR
/// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
#define _mm512_i32logather_epi64(vindex, base_addr, scale) \
_mm512_i32gather_epi64(_mm512_castsi512_si256(vindex), (base_addr), (scale))
@@ -9656,7 +9677,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VPGATHERDQ </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
@@ -9668,7 +9689,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
/// FI
/// ENDFOR
/// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
#define _mm512_mask_i32logather_epi64(src, mask, vindex, base_addr, scale) \
_mm512_mask_i32gather_epi64((src), (mask), _mm512_castsi512_si256(vindex), \
(base_addr), (scale))
@@ -9679,14 +9700,14 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VSCATTERDPD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
/// MEM[addr+63:addr] := v1[i+63:i]
/// ENDFOR
-/// \endoperation
+/// \endcode
#define _mm512_i32loscatter_pd(base_addr, vindex, v1, scale) \
_mm512_i32scatter_pd((base_addr), _mm512_castsi512_si256(vindex), (v1), (scale))
@@ -9698,7 +9719,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VSCATTERDPD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
@@ -9707,7 +9728,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
/// MEM[addr+63:addr] := a[i+63:i]
/// FI
/// ENDFOR
-/// \endoperation
+/// \endcode
#define _mm512_mask_i32loscatter_pd(base_addr, mask, vindex, v1, scale) \
_mm512_mask_i32scatter_pd((base_addr), (mask), \
_mm512_castsi512_si256(vindex), (v1), (scale))
@@ -9718,14 +9739,14 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VPSCATTERDQ </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
/// MEM[addr+63:addr] := a[i+63:i]
/// ENDFOR
-/// \endoperation
+/// \endcode
#define _mm512_i32loscatter_epi64(base_addr, vindex, v1, scale) \
_mm512_i32scatter_epi64((base_addr), \
_mm512_castsi512_si256(vindex), (v1), (scale))
@@ -9737,7 +9758,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VPSCATTERDQ </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
@@ -9746,7 +9767,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
/// MEM[addr+63:addr] := a[i+63:i]
/// FI
/// ENDFOR
-/// \endoperation
+/// \endcode
#define _mm512_mask_i32loscatter_epi64(base_addr, mask, vindex, v1, scale) \
_mm512_mask_i32scatter_epi64((base_addr), (mask), \
_mm512_castsi512_si256(vindex), (v1), (scale))