aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/clang/lib/Headers/avx512vlbf16intrin.h
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/clang/lib/Headers/avx512vlbf16intrin.h')
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vlbf16intrin.h137
1 files changed, 90 insertions, 47 deletions
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vlbf16intrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vlbf16intrin.h
index 1b1a744bcdbf..89c9f49c7aed 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vlbf16intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vlbf16intrin.h
@@ -10,17 +10,19 @@
#error "Never use <avx512vlbf16intrin.h> directly; include <immintrin.h> instead."
#endif
+#ifdef __SSE2__
+
#ifndef __AVX512VLBF16INTRIN_H
#define __AVX512VLBF16INTRIN_H
-typedef short __m128bh __attribute__((__vector_size__(16), __aligned__(16)));
-
-#define __DEFAULT_FN_ATTRS128 \
- __attribute__((__always_inline__, __nodebug__, \
- __target__("avx512vl, avx512bf16"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS256 \
- __attribute__((__always_inline__, __nodebug__, \
- __target__("avx512vl, avx512bf16"), __min_vector_width__(256)))
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,avx512bf16,no-evex512"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,avx512bf16,no-evex512"), \
+ __min_vector_width__(256)))
/// Convert Two Packed Single Data to One Packed BF16 Data.
///
@@ -59,9 +61,9 @@ _mm_cvtne2ps_pbh(__m128 __A, __m128 __B) {
/// conversion of __B, and higher 64 bits come from conversion of __A.
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
_mm_mask_cvtne2ps_pbh(__m128bh __W, __mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128bh)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_cvtne2ps_pbh(__A, __B),
- (__v8hi)__W);
+ return (__m128bh)__builtin_ia32_selectpbf_128((__mmask8)__U,
+ (__v8bf)_mm_cvtne2ps_pbh(__A, __B),
+ (__v8bf)__W);
}
/// Convert Two Packed Single Data to One Packed BF16 Data.
@@ -81,9 +83,9 @@ _mm_mask_cvtne2ps_pbh(__m128bh __W, __mmask8 __U, __m128 __A, __m128 __B) {
/// conversion of __B, and higher 64 bits come from conversion of __A.
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
_mm_maskz_cvtne2ps_pbh(__mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128bh)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_cvtne2ps_pbh(__A, __B),
- (__v8hi)_mm_setzero_si128());
+ return (__m128bh)__builtin_ia32_selectpbf_128((__mmask8)__U,
+ (__v8bf)_mm_cvtne2ps_pbh(__A, __B),
+ (__v8bf)_mm_setzero_si128());
}
/// Convert Two Packed Single Data to One Packed BF16 Data.
@@ -123,9 +125,9 @@ _mm256_cvtne2ps_pbh(__m256 __A, __m256 __B) {
/// conversion of __B, and higher 128 bits come from conversion of __A.
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_mask_cvtne2ps_pbh(__m256bh __W, __mmask16 __U, __m256 __A, __m256 __B) {
- return (__m256bh)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_cvtne2ps_pbh(__A, __B),
- (__v16hi)__W);
+ return (__m256bh)__builtin_ia32_selectpbf_256((__mmask16)__U,
+ (__v16bf)_mm256_cvtne2ps_pbh(__A, __B),
+ (__v16bf)__W);
}
/// Convert Two Packed Single Data to One Packed BF16 Data.
@@ -145,9 +147,9 @@ _mm256_mask_cvtne2ps_pbh(__m256bh __W, __mmask16 __U, __m256 __A, __m256 __B) {
/// conversion of __B, and higher 128 bits come from conversion of __A.
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtne2ps_pbh(__mmask16 __U, __m256 __A, __m256 __B) {
- return (__m256bh)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_cvtne2ps_pbh(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+ return (__m256bh)__builtin_ia32_selectpbf_256((__mmask16)__U,
+ (__v16bf)_mm256_cvtne2ps_pbh(__A, __B),
+ (__v16bf)_mm256_setzero_si256());
}
/// Convert Packed Single Data to Packed BF16 Data.
@@ -160,12 +162,8 @@ _mm256_maskz_cvtne2ps_pbh(__mmask16 __U, __m256 __A, __m256 __B) {
/// A 128-bit vector of [4 x float].
/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
/// conversion of __A, and higher 64 bits are 0.
-static __inline__ __m128bh __DEFAULT_FN_ATTRS128
-_mm_cvtneps_pbh(__m128 __A) {
- return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A,
- (__v8hi)_mm_undefined_si128(),
- (__mmask8)-1);
-}
+#define _mm_cvtneps_pbh(A) \
+ ((__m128bh)__builtin_ia32_vcvtneps2bf16128((__v4sf)(A)))
/// Convert Packed Single Data to Packed BF16 Data.
///
@@ -185,7 +183,7 @@ _mm_cvtneps_pbh(__m128 __A) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
_mm_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m128 __A) {
return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A,
- (__v8hi)__W,
+ (__v8bf)__W,
(__mmask8)__U);
}
@@ -205,7 +203,7 @@ _mm_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m128 __A) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
_mm_maskz_cvtneps_pbh(__mmask8 __U, __m128 __A) {
return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A,
- (__v8hi)_mm_setzero_si128(),
+ (__v8bf)_mm_setzero_si128(),
(__mmask8)__U);
}
@@ -218,12 +216,8 @@ _mm_maskz_cvtneps_pbh(__mmask8 __U, __m128 __A) {
/// \param __A
/// A 256-bit vector of [8 x float].
/// \returns A 128-bit vector of [8 x bfloat] comes from conversion of __A.
-static __inline__ __m128bh __DEFAULT_FN_ATTRS256
-_mm256_cvtneps_pbh(__m256 __A) {
- return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A,
- (__v8hi)_mm_undefined_si128(),
- (__mmask8)-1);
-}
+#define _mm256_cvtneps_pbh(A) \
+ ((__m128bh)__builtin_ia32_vcvtneps2bf16256((__v8sf)(A)))
/// Convert Packed Single Data to Packed BF16 Data.
///
@@ -242,7 +236,7 @@ _mm256_cvtneps_pbh(__m256 __A) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS256
_mm256_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m256 __A) {
return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A,
- (__v8hi)__W,
+ (__v8bf)__W,
(__mmask8)__U);
}
@@ -261,7 +255,7 @@ _mm256_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m256 __A) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtneps_pbh(__mmask8 __U, __m256 __A) {
return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A,
- (__v8hi)_mm_setzero_si128(),
+ (__v8bf)_mm_setzero_si128(),
(__mmask8)__U);
}
@@ -282,8 +276,8 @@ _mm256_maskz_cvtneps_pbh(__mmask8 __U, __m256 __A) {
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_dpbf16_ps(__m128 __D, __m128bh __A, __m128bh __B) {
return (__m128)__builtin_ia32_dpbf16ps_128((__v4sf)__D,
- (__v4si)__A,
- (__v4si)__B);
+ (__v8bf)__A,
+ (__v8bf)__B);
}
/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
@@ -351,8 +345,8 @@ _mm_maskz_dpbf16_ps(__mmask8 __U, __m128 __D, __m128bh __A, __m128bh __B) {
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_dpbf16_ps(__m256 __D, __m256bh __A, __m256bh __B) {
return (__m256)__builtin_ia32_dpbf16ps_256((__v8sf)__D,
- (__v8si)__A,
- (__v8si)__B);
+ (__v16bf)__A,
+ (__v16bf)__B);
}
/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
@@ -413,11 +407,23 @@ _mm256_maskz_dpbf16_ps(__mmask8 __U, __m256 __D, __m256bh __A, __m256bh __B) {
/// A float data.
/// \returns A bf16 data whose sign field and exponent field keep unchanged,
/// and fraction field is truncated to 7 bits.
-static __inline__ __bfloat16 __DEFAULT_FN_ATTRS128 _mm_cvtness_sbh(float __A) {
+static __inline__ __bf16 __DEFAULT_FN_ATTRS128 _mm_cvtness_sbh(float __A) {
__v4sf __V = {__A, 0, 0, 0};
- __v8hi __R = __builtin_ia32_cvtneps2bf16_128_mask(
- (__v4sf)__V, (__v8hi)_mm_undefined_si128(), (__mmask8)-1);
- return __R[0];
+ __v8bf __R = __builtin_ia32_cvtneps2bf16_128_mask(
+ (__v4sf)__V, (__v8bf)_mm_undefined_si128(), (__mmask8)-1);
+ return (__bf16)__R[0];
+}
+
+/// Convert Packed BF16 Data to Packed float Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \param __A
+/// A 128-bit vector of [4 x bfloat].
+/// \returns A 128-bit vector of [4 x float] come from conversion of __A
+static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_cvtpbh_ps(__m128bh __A) {
+ return _mm_castsi128_ps(
+ (__m128i)_mm_slli_epi32((__m128i)_mm_cvtepi16_epi32((__m128i)__A), 16));
}
/// Convert Packed BF16 Data to Packed float Data.
@@ -426,7 +432,7 @@ static __inline__ __bfloat16 __DEFAULT_FN_ATTRS128 _mm_cvtness_sbh(float __A) {
///
/// \param __A
/// A 128-bit vector of [8 x bfloat].
-/// \returns A 256-bit vector of [8 x float] come from convertion of __A
+/// \returns A 256-bit vector of [8 x float] come from conversion of __A
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_cvtpbh_ps(__m128bh __A) {
return _mm256_castsi256_ps((__m256i)_mm256_slli_epi32(
(__m256i)_mm256_cvtepi16_epi32((__m128i)__A), 16));
@@ -437,11 +443,27 @@ static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_cvtpbh_ps(__m128bh __A) {
/// \headerfile <x86intrin.h>
///
/// \param __U
+/// A 4-bit mask. Elements are zeroed out when the corresponding mask
+/// bit is not set.
+/// \param __A
+/// A 128-bit vector of [4 x bfloat].
+/// \returns A 128-bit vector of [4 x float] come from conversion of __A
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) {
+ return _mm_castsi128_ps((__m128i)_mm_slli_epi32(
+ (__m128i)_mm_maskz_cvtepi16_epi32((__mmask8)__U, (__m128i)__A), 16));
+}
+
+/// Convert Packed BF16 Data to Packed float Data using zeroing mask.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \param __U
/// A 8-bit mask. Elements are zeroed out when the corresponding mask
/// bit is not set.
/// \param __A
/// A 128-bit vector of [8 x bfloat].
-/// \returns A 256-bit vector of [8 x float] come from convertion of __A
+/// \returns A 256-bit vector of [8 x float] come from conversion of __A
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) {
return _mm256_castsi256_ps((__m256i)_mm256_slli_epi32(
@@ -453,6 +475,26 @@ _mm256_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) {
/// \headerfile <x86intrin.h>
///
/// \param __S
+/// A 128-bit vector of [4 x float]. Elements are copied from __S when
+/// the corresponding mask bit is not set.
+/// \param __U
+/// A 4-bit mask. Elements are zeroed out when the corresponding mask
+/// bit is not set.
+/// \param __A
+/// A 128-bit vector of [4 x bfloat].
+/// \returns A 128-bit vector of [4 x float] come from conversion of __A
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
+_mm_mask_cvtpbh_ps(__m128 __S, __mmask8 __U, __m128bh __A) {
+ return _mm_castsi128_ps((__m128i)_mm_mask_slli_epi32(
+ (__m128i)__S, (__mmask8)__U, (__m128i)_mm_cvtepi16_epi32((__m128i)__A),
+ 16));
+}
+
+/// Convert Packed BF16 Data to Packed float Data using merging mask.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \param __S
/// A 256-bit vector of [8 x float]. Elements are copied from __S when
/// the corresponding mask bit is not set.
/// \param __U
@@ -460,7 +502,7 @@ _mm256_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) {
/// bit is not set.
/// \param __A
/// A 128-bit vector of [8 x bfloat].
-/// \returns A 256-bit vector of [8 x float] come from convertion of __A
+/// \returns A 256-bit vector of [8 x float] come from conversion of __A
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_cvtpbh_ps(__m256 __S, __mmask8 __U, __m128bh __A) {
return _mm256_castsi256_ps((__m256i)_mm256_mask_slli_epi32(
@@ -472,3 +514,4 @@ _mm256_mask_cvtpbh_ps(__m256 __S, __mmask8 __U, __m128bh __A) {
#undef __DEFAULT_FN_ATTRS256
#endif
+#endif