aboutsummaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2017-02-06 09:40:14 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2017-02-06 09:40:14 +0000
commitc1aaf63cb52133580093e5efe9af6dd7081f32be (patch)
tree450e675c7635c450e7da28b551fefa58c13e3cdb /sys/kern
parent467c82cb848afc8fc3e1540553900c9ed2c97082 (diff)
downloadsrc-c1aaf63cb52133580093e5efe9af6dd7081f32be.tar.gz
src-c1aaf63cb52133580093e5efe9af6dd7081f32be.zip
locks: fix recursion support after recent changes
When a relevant lockstat probe is enabled the fallback primitive is called with a constant signifying a free lock. This works fine for typical cases but breaks with recursion, since it checks if the passed value is that of the executing thread. Read the value if necessary.
Notes
Notes: svn path=/head/; revision=313335
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_mutex.c2
-rw-r--r--sys/kern/kern_rwlock.c2
-rw-r--r--sys/kern/kern_sx.c3
3 files changed, 7 insertions, 0 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 83977bb6425e..e37fb34cd521 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -495,6 +495,8 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts,
lock_delay_arg_init(&lda, NULL);
#endif
m = mtxlock2mtx(c);
+ if (__predict_false(v == MTX_UNOWNED))
+ v = MTX_READ_VALUE(m);
if (__predict_false(lv_mtx_owner(v) == (struct thread *)tid)) {
KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index c024de5cc8cb..0a59bf94f3f8 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -812,6 +812,8 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
lock_delay_arg_init(&lda, NULL);
#endif
rw = rwlock2rw(c);
+ if (__predict_false(v == RW_UNLOCKED))
+ v = RW_READ_VALUE(rw);
if (__predict_false(lv_rw_wowner(v) == (struct thread *)tid)) {
KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 4cce16d301b8..c0fc26aabd32 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -531,6 +531,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, uintptr_t tid, int opts,
lock_delay_arg_init(&lda, NULL);
#endif
+ if (__predict_false(x == SX_LOCK_UNLOCKED))
+ x = SX_READ_VALUE(sx);
+
/* If we already hold an exclusive lock, then recurse. */
if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,