aboutsummaryrefslogtreecommitdiff
path: root/runtime/src/kmp_os.h
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/src/kmp_os.h')
-rw-r--r--runtime/src/kmp_os.h111
1 files changed, 93 insertions, 18 deletions
diff --git a/runtime/src/kmp_os.h b/runtime/src/kmp_os.h
index 3c2426bcae46..c4c7bcf6cc57 100644
--- a/runtime/src/kmp_os.h
+++ b/runtime/src/kmp_os.h
@@ -4,10 +4,9 @@
//===----------------------------------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is dual licensed under the MIT and the University of Illinois Open
-// Source Licenses. See LICENSE.txt for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -40,6 +39,14 @@
#define KMP_MEM_CONS_MODEL KMP_MEM_CONS_VOLATILE
#endif
+#ifndef __has_cpp_attribute
+#define __has_cpp_attribute(x) 0
+#endif
+
+#ifndef __has_attribute
+#define __has_attribute(x) 0
+#endif
+
/* ------------------------- Compiler recognition ---------------------- */
#define KMP_COMPILER_ICC 0
#define KMP_COMPILER_GCC 0
@@ -297,6 +304,20 @@ extern "C" {
#define KMP_CACHE_PREFETCH(ADDR) /* nothing */
+// Define attribute that indicates that the fall through from the previous
+// case label is intentional and should not be diagnosed by a compiler
+// Code from libcxx/include/__config
+// Use a function like macro to imply that it must be followed by a semicolon
+#if __cplusplus > 201402L && __has_cpp_attribute(fallthrough)
+# define KMP_FALLTHROUGH() [[fallthrough]]
+#elif __has_cpp_attribute(clang::fallthrough)
+# define KMP_FALLTHROUGH() [[clang::fallthrough]]
+#elif __has_attribute(fallthough) || __GNUC__ >= 7
+# define KMP_FALLTHROUGH() __attribute__((__fallthrough__))
+#else
+# define KMP_FALLTHROUGH() ((void)0)
+#endif
+
// Define attribute that indicates a function does not return
#if __cplusplus >= 201103L
#define KMP_NORETURN [[noreturn]]
@@ -512,32 +533,56 @@ extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);
__sync_fetch_and_add((volatile kmp_int32 *)(p), 1)
#define KMP_TEST_THEN_INC_ACQ32(p) \
__sync_fetch_and_add((volatile kmp_int32 *)(p), 1)
+#if KMP_ARCH_MIPS
+#define KMP_TEST_THEN_INC64(p) \
+ __atomic_fetch_add((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
+#define KMP_TEST_THEN_INC_ACQ64(p) \
+ __atomic_fetch_add((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
+#else
#define KMP_TEST_THEN_INC64(p) \
__sync_fetch_and_add((volatile kmp_int64 *)(p), 1LL)
#define KMP_TEST_THEN_INC_ACQ64(p) \
__sync_fetch_and_add((volatile kmp_int64 *)(p), 1LL)
+#endif
#define KMP_TEST_THEN_ADD4_32(p) \
__sync_fetch_and_add((volatile kmp_int32 *)(p), 4)
#define KMP_TEST_THEN_ADD4_ACQ32(p) \
__sync_fetch_and_add((volatile kmp_int32 *)(p), 4)
+#if KMP_ARCH_MIPS
+#define KMP_TEST_THEN_ADD4_64(p) \
+ __atomic_fetch_add((volatile kmp_int64 *)(p), 4LL, __ATOMIC_SEQ_CST)
+#define KMP_TEST_THEN_ADD4_ACQ64(p) \
+ __atomic_fetch_add((volatile kmp_int64 *)(p), 4LL, __ATOMIC_SEQ_CST)
+#define KMP_TEST_THEN_DEC64(p) \
+ __atomic_fetch_sub((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
+#define KMP_TEST_THEN_DEC_ACQ64(p) \
+ __atomic_fetch_sub((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
+#else
#define KMP_TEST_THEN_ADD4_64(p) \
__sync_fetch_and_add((volatile kmp_int64 *)(p), 4LL)
#define KMP_TEST_THEN_ADD4_ACQ64(p) \
__sync_fetch_and_add((volatile kmp_int64 *)(p), 4LL)
-#define KMP_TEST_THEN_DEC32(p) \
- __sync_fetch_and_sub((volatile kmp_int32 *)(p), 1)
-#define KMP_TEST_THEN_DEC_ACQ32(p) \
- __sync_fetch_and_sub((volatile kmp_int32 *)(p), 1)
#define KMP_TEST_THEN_DEC64(p) \
__sync_fetch_and_sub((volatile kmp_int64 *)(p), 1LL)
#define KMP_TEST_THEN_DEC_ACQ64(p) \
__sync_fetch_and_sub((volatile kmp_int64 *)(p), 1LL)
+#endif
+#define KMP_TEST_THEN_DEC32(p) \
+ __sync_fetch_and_sub((volatile kmp_int32 *)(p), 1)
+#define KMP_TEST_THEN_DEC_ACQ32(p) \
+ __sync_fetch_and_sub((volatile kmp_int32 *)(p), 1)
#define KMP_TEST_THEN_ADD8(p, v) \
__sync_fetch_and_add((volatile kmp_int8 *)(p), (kmp_int8)(v))
#define KMP_TEST_THEN_ADD32(p, v) \
__sync_fetch_and_add((volatile kmp_int32 *)(p), (kmp_int32)(v))
+#if KMP_ARCH_MIPS
+#define KMP_TEST_THEN_ADD64(p, v) \
+ __atomic_fetch_add((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
+ __ATOMIC_SEQ_CST)
+#else
#define KMP_TEST_THEN_ADD64(p, v) \
__sync_fetch_and_add((volatile kmp_int64 *)(p), (kmp_int64)(v))
+#endif
#define KMP_TEST_THEN_OR8(p, v) \
__sync_fetch_and_or((volatile kmp_int8 *)(p), (kmp_int8)(v))
@@ -547,10 +592,19 @@ extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);
__sync_fetch_and_or((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
#define KMP_TEST_THEN_AND32(p, v) \
__sync_fetch_and_and((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
+#if KMP_ARCH_MIPS
+#define KMP_TEST_THEN_OR64(p, v) \
+ __atomic_fetch_or((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
+ __ATOMIC_SEQ_CST)
+#define KMP_TEST_THEN_AND64(p, v) \
+ __atomic_fetch_and((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
+ __ATOMIC_SEQ_CST)
+#else
#define KMP_TEST_THEN_OR64(p, v) \
__sync_fetch_and_or((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
#define KMP_TEST_THEN_AND64(p, v) \
__sync_fetch_and_and((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
+#endif
#define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
__sync_bool_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \
@@ -570,12 +624,6 @@ extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);
#define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
__sync_bool_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
(kmp_uint32)(sv))
-#define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
- __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
- (kmp_uint64)(sv))
-#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
- __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
- (kmp_uint64)(sv))
#define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
__sync_bool_compare_and_swap((void *volatile *)(p), (void *)(cv), \
(void *)(sv))
@@ -589,9 +637,38 @@ extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);
#define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
__sync_val_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
(kmp_uint32)(sv))
+#if KMP_ARCH_MIPS
+static inline bool mips_sync_bool_compare_and_swap(
+ volatile kmp_uint64 *p, kmp_uint64 cv, kmp_uint64 sv) {
+ return __atomic_compare_exchange(p, &cv, &sv, false, __ATOMIC_SEQ_CST,
+ __ATOMIC_SEQ_CST);
+}
+static inline bool mips_sync_val_compare_and_swap(
+ volatile kmp_uint64 *p, kmp_uint64 cv, kmp_uint64 sv) {
+ __atomic_compare_exchange(p, &cv, &sv, false, __ATOMIC_SEQ_CST,
+ __ATOMIC_SEQ_CST);
+ return cv;
+}
+#define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
+ mips_sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv),\
+ (kmp_uint64)(sv))
+#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
+ mips_sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv),\
+ (kmp_uint64)(sv))
+#define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
+ mips_sync_val_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
+ (kmp_uint64)(sv))
+#else
+#define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
+ __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
+ (kmp_uint64)(sv))
+#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
+ __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
+ (kmp_uint64)(sv))
#define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
__sync_val_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
(kmp_uint64)(sv))
+#endif
#define KMP_XCHG_FIXED8(p, v) \
__sync_lock_test_and_set((volatile kmp_uint8 *)(p), (kmp_uint8)(v))
@@ -861,8 +938,8 @@ typedef void (*microtask_t)(int *gtid, int *npr, ...);
#define VOLATILE_CAST(x) (x)
#endif
-#define KMP_WAIT_YIELD __kmp_wait_yield_4
-#define KMP_WAIT_YIELD_PTR __kmp_wait_yield_4_ptr
+#define KMP_WAIT __kmp_wait_4
+#define KMP_WAIT_PTR __kmp_wait_4_ptr
#define KMP_EQ __kmp_eq_4
#define KMP_NEQ __kmp_neq_4
#define KMP_LT __kmp_lt_4
@@ -888,9 +965,7 @@ typedef void (*microtask_t)(int *gtid, int *npr, ...);
#endif
// Enable dynamic user lock
-#if OMP_45_ENABLED
#define KMP_USE_DYNAMIC_LOCK 1
-#endif
// Enable Intel(R) Transactional Synchronization Extensions (Intel(R) TSX) if
// dynamic user lock is turned on