aboutsummaryrefslogtreecommitdiff
path: root/runtime/src/z_Linux_util.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/src/z_Linux_util.cpp')
-rw-r--r--runtime/src/z_Linux_util.cpp113
1 files changed, 59 insertions, 54 deletions
diff --git a/runtime/src/z_Linux_util.cpp b/runtime/src/z_Linux_util.cpp
index aa0302c0fc37..1983fc2b9834 100644
--- a/runtime/src/z_Linux_util.cpp
+++ b/runtime/src/z_Linux_util.cpp
@@ -4,10 +4,9 @@
//===----------------------------------------------------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is dual licensed under the MIT and the University of Illinois Open
-// Source Licenses. See LICENSE.txt for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -438,7 +437,7 @@ void __kmp_terminate_thread(int gtid) {
__kmp_msg_null);
}
#endif
- __kmp_yield(TRUE);
+ KMP_YIELD(TRUE);
} //
/* Set thread stack info according to values returned by pthread_getattr_np().
@@ -581,8 +580,6 @@ static void *__kmp_launch_monitor(void *thr) {
sigset_t new_set;
#endif /* KMP_BLOCK_SIGNALS */
struct timespec interval;
- int yield_count;
- int yield_cycles = 0;
KMP_MB(); /* Flush all pending memory write invalidates. */
@@ -666,13 +663,6 @@ static void *__kmp_launch_monitor(void *thr) {
KA_TRACE(10, ("__kmp_launch_monitor: #2 monitor\n"));
- if (__kmp_yield_cycle) {
- __kmp_yielding_on = 0; /* Start out with yielding shut off */
- yield_count = __kmp_yield_off_count;
- } else {
- __kmp_yielding_on = 1; /* Yielding is on permanently */
- }
-
while (!TCR_4(__kmp_global.g.g_done)) {
struct timespec now;
struct timeval tval;
@@ -708,22 +698,6 @@ static void *__kmp_launch_monitor(void *thr) {
status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
- if (__kmp_yield_cycle) {
- yield_cycles++;
- if ((yield_cycles % yield_count) == 0) {
- if (__kmp_yielding_on) {
- __kmp_yielding_on = 0; /* Turn it off now */
- yield_count = __kmp_yield_off_count;
- } else {
- __kmp_yielding_on = 1; /* Turn it on now */
- yield_count = __kmp_yield_on_count;
- }
- yield_cycles = 0;
- }
- } else {
- __kmp_yielding_on = 1;
- }
-
TCW_4(__kmp_global.g.g_time.dt.t_value,
TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
@@ -1012,8 +986,8 @@ retry:
// Wait for the monitor thread is really started and set its *priority*.
KMP_DEBUG_ASSERT(sizeof(kmp_uint32) ==
sizeof(__kmp_global.g.g_time.dt.t_value));
- __kmp_wait_yield_4((kmp_uint32 volatile *)&__kmp_global.g.g_time.dt.t_value,
- -1, &__kmp_neq_4, NULL);
+ __kmp_wait_4((kmp_uint32 volatile *)&__kmp_global.g.g_time.dt.t_value, -1,
+ &__kmp_neq_4, NULL);
#endif // KMP_REAL_TIME_FIX
#ifdef KMP_THREAD_ATTR
@@ -1290,11 +1264,9 @@ static void __kmp_atfork_child(void) {
// over-subscription after the fork and this can improve things for
// scripting languages that use OpenMP inside process-parallel code).
__kmp_affinity_type = affinity_none;
-#if OMP_40_ENABLED
if (__kmp_nested_proc_bind.bind_types != NULL) {
__kmp_nested_proc_bind.bind_types[0] = proc_bind_false;
}
-#endif // OMP_40_ENABLED
#endif // KMP_AFFINITY_SUPPORTED
__kmp_init_runtime = FALSE;
@@ -1378,11 +1350,22 @@ void __kmp_suspend_initialize(void) {
KMP_CHECK_SYSFAIL("pthread_condattr_init", status);
}
-static void __kmp_suspend_initialize_thread(kmp_info_t *th) {
+void __kmp_suspend_initialize_thread(kmp_info_t *th) {
ANNOTATE_HAPPENS_AFTER(&th->th.th_suspend_init_count);
- if (th->th.th_suspend_init_count <= __kmp_fork_count) {
- /* this means we haven't initialized the suspension pthread objects for this
- thread in this instance of the process */
+ int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
+ int new_value = __kmp_fork_count + 1;
+ // Return if already initialized
+ if (old_value == new_value)
+ return;
+ // Wait, then return if being initialized
+ if (old_value == -1 ||
+ !__kmp_atomic_compare_store(&th->th.th_suspend_init_count, old_value,
+ -1)) {
+ while (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) != new_value) {
+ KMP_CPU_PAUSE();
+ }
+ } else {
+ // Claim to be the initializer and do initializations
int status;
status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
&__kmp_suspend_cond_attr);
@@ -1390,13 +1373,13 @@ static void __kmp_suspend_initialize_thread(kmp_info_t *th) {
status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
&__kmp_suspend_mutex_attr);
KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
- *(volatile int *)&th->th.th_suspend_init_count = __kmp_fork_count + 1;
+ KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
ANNOTATE_HAPPENS_BEFORE(&th->th.th_suspend_init_count);
}
}
void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
- if (th->th.th_suspend_init_count > __kmp_fork_count) {
+ if (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) > __kmp_fork_count) {
/* this means we have initialize the suspension pthread objects for this
thread in this instance of the process */
int status;
@@ -1410,10 +1393,26 @@ void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
KMP_SYSFAIL("pthread_mutex_destroy", status);
}
--th->th.th_suspend_init_count;
- KMP_DEBUG_ASSERT(th->th.th_suspend_init_count == __kmp_fork_count);
+ KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count) ==
+ __kmp_fork_count);
}
}
+// return true if lock obtained, false otherwise
+int __kmp_try_suspend_mx(kmp_info_t *th) {
+ return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
+}
+
+void __kmp_lock_suspend_mx(kmp_info_t *th) {
+ int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
+ KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
+}
+
+void __kmp_unlock_suspend_mx(kmp_info_t *th) {
+ int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
+ KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
+}
+
/* This routine puts the calling thread to sleep after setting the
sleep bit for the indicated flag variable to true. */
template <class C>
@@ -1437,7 +1436,13 @@ static inline void __kmp_suspend_template(int th_gtid, C *flag) {
/* TODO: shouldn't this use release semantics to ensure that
__kmp_suspend_initialize_thread gets called first? */
old_spin = flag->set_sleeping();
-
+ if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME &&
+ __kmp_pause_status != kmp_soft_paused) {
+ flag->unset_sleeping();
+ status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
+ KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
+ return;
+ }
KF_TRACE(5, ("__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x,"
" was %x\n",
th_gtid, flag->get(), flag->load(), old_spin));
@@ -1666,18 +1671,7 @@ void __kmp_resume_monitor() {
}
#endif // KMP_USE_MONITOR
-void __kmp_yield(int cond) {
- if (!cond)
- return;
-#if KMP_USE_MONITOR
- if (!__kmp_yielding_on)
- return;
-#else
- if (__kmp_yield_cycle && !KMP_YIELD_NOW())
- return;
-#endif
- sched_yield();
-}
+void __kmp_yield() { sched_yield(); }
void __kmp_gtid_set_specific(int gtid) {
if (__kmp_init_gtid) {
@@ -1835,6 +1829,17 @@ void __kmp_runtime_initialize(void) {
__kmp_xproc = __kmp_get_xproc();
+#if ! KMP_32_BIT_ARCH
+ struct rlimit rlim;
+ // read stack size of calling thread, save it as default for worker threads;
+ // this should be done before reading environment variables
+ status = getrlimit(RLIMIT_STACK, &rlim);
+ if (status == 0) { // success?
+ __kmp_stksize = rlim.rlim_cur;
+ __kmp_check_stksize(&__kmp_stksize); // check value and adjust if needed
+ }
+#endif /* KMP_32_BIT_ARCH */
+
if (sysconf(_SC_THREADS)) {
/* Query the maximum number of threads */