aboutsummaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2017-12-31 00:47:04 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2017-12-31 00:47:04 +0000
commit28f1a9e3ffe3044d6678e822e39f986ef250d69a (patch)
tree2b7283e28ca4849fd986fa3a6f5795450b4ff165 /sys/kern
parent518e4554be9201f9dc426aeb198da38aa7ca2aa9 (diff)
downloadsrc-28f1a9e3ffe3044d6678e822e39f986ef250d69a.tar.gz
src-28f1a9e3ffe3044d6678e822e39f986ef250d69a.zip
locks: re-check the reason to go to sleep after locking sleepq/turnstile
In both rw and sx locks we always go to sleep if the lock owner is not running. We do spin for some time if the lock is read-locked. However, if we decide to go to sleep due to the lock owner being off cpu and after sleepq/turnstile gets acquired the lock is read-locked, we should fallback to the aforementioned wait.
Notes
Notes: svn path=/head/; revision=327399
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_rwlock.c6
-rw-r--r--sys/kern/kern_sx.c14
2 files changed, 17 insertions, 3 deletions
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index 9eac607ab317..ea701330e442 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -872,6 +872,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
#ifdef ADAPTIVE_RWLOCKS
int spintries = 0;
int i, n;
+ int sleep_reason = 0;
#endif
uintptr_t x;
#ifdef LOCK_PROFILING
@@ -952,6 +953,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
* running on another CPU, spin until the owner stops
* running or the state of the lock changes.
*/
+ sleep_reason = 1;
owner = lv_rw_wowner(v);
if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
if (LOCK_LOG_TEST(&rw->lock_object, 0))
@@ -995,6 +997,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
#endif
if (i != rowner_loops)
continue;
+ sleep_reason = 2;
}
#endif
ts = turnstile_trywait(&rw->lock_object);
@@ -1015,6 +1018,9 @@ retry_ts:
turnstile_cancel(ts);
continue;
}
+ } else if (RW_READERS(v) > 0 && sleep_reason == 1) {
+ turnstile_cancel(ts);
+ continue;
}
#endif
/*
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 65ab0e6c6d80..f22f7b33d493 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -534,6 +534,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
volatile struct thread *owner;
u_int i, n, spintries = 0;
bool adaptive;
+ int sleep_reason = 0;
#endif
#ifdef LOCK_PROFILING
uint64_t waittime = 0;
@@ -647,6 +648,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
sched_tdname(curthread), "running");
continue;
}
+ sleep_reason = 1;
} else if (SX_SHARERS(x) && spintries < asx_retries) {
KTR_STATE1(KTR_SCHED, "thread",
sched_tdname(curthread), "spinning",
@@ -671,6 +673,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
sched_tdname(curthread), "running");
if (i != asx_loops)
continue;
+ sleep_reason = 2;
}
#endif
sleepq:
@@ -695,9 +698,14 @@ retry_sleepq:
* chain lock. If so, drop the sleep queue lock and try
* again.
*/
- if (!(x & SX_LOCK_SHARED) && adaptive) {
- owner = (struct thread *)SX_OWNER(x);
- if (TD_IS_RUNNING(owner)) {
+ if (adaptive) {
+ if (!(x & SX_LOCK_SHARED)) {
+ owner = (struct thread *)SX_OWNER(x);
+ if (TD_IS_RUNNING(owner)) {
+ sleepq_release(&sx->lock_object);
+ continue;
+ }
+ } else if (SX_SHARERS(x) > 0 && sleep_reason == 1) {
sleepq_release(&sx->lock_object);
continue;
}