aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2020-07-21 14:42:22 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2020-07-21 14:42:22 +0000
commit4aff9f5d99e0519061217fe07dbd5a1771f8545d (patch)
tree4a30577f7fb227b32a925d7d4040bc9fa498f25a
parentf6b091fbbd77cbb09728a52f0880dd5632ed7c51 (diff)
downloadsrc-4aff9f5d99e0519061217fe07dbd5a1771f8545d.tar.gz
src-4aff9f5d99e0519061217fe07dbd5a1771f8545d.zip
lockmgr: denote recursion with a bit in lock value
This reduces excessive reads from the lock. Tested by: pho
Notes
Notes: svn path=/head/; revision=363394
-rw-r--r--sys/kern/kern_lock.c10
-rw-r--r--sys/sys/lockmgr.h9
2 files changed, 12 insertions, 7 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 28eeb42ad143..11396a6e259f 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -736,6 +736,7 @@ lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
panic("%s: recursing on non recursive lockmgr %p "
"@ %s:%d\n", __func__, lk, file, line);
}
+ atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
lk->lk_recurse++;
LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
@@ -1039,9 +1040,11 @@ lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_obje
* The lock is held in exclusive mode.
* If the lock is recursed also, then unrecurse it.
*/
- if (lockmgr_xlocked_v(x) && lockmgr_recursed(lk)) {
+ if (lockmgr_recursed_v(x)) {
LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
lk->lk_recurse--;
+ if (lk->lk_recurse == 0)
+ atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
goto out;
}
if (tid != LK_KERNPROC)
@@ -1187,9 +1190,8 @@ lockmgr_unlock(struct lock *lk)
} else {
tid = (uintptr_t)curthread;
lockmgr_note_exclusive_release(lk, file, line);
- if (!lockmgr_recursed(lk) &&
- atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
- LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_WRITER);
+ if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
+ LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER);
} else {
return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
}
diff --git a/sys/sys/lockmgr.h b/sys/sys/lockmgr.h
index 65a06d7f3ecc..336f57007013 100644
--- a/sys/sys/lockmgr.h
+++ b/sys/sys/lockmgr.h
@@ -42,13 +42,14 @@
#define LK_SHARED_WAITERS 0x02
#define LK_EXCLUSIVE_WAITERS 0x04
#define LK_EXCLUSIVE_SPINNERS 0x08
+#define LK_WRITER_RECURSED 0x10
#define LK_ALL_WAITERS \
(LK_SHARED_WAITERS | LK_EXCLUSIVE_WAITERS)
#define LK_FLAGMASK \
- (LK_SHARE | LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)
+ (LK_SHARE | LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS | LK_WRITER_RECURSED)
#define LK_HOLDER(x) ((x) & ~LK_FLAGMASK)
-#define LK_SHARERS_SHIFT 4
+#define LK_SHARERS_SHIFT 5
#define LK_SHARERS(x) (LK_HOLDER(x) >> LK_SHARERS_SHIFT)
#define LK_SHARERS_LOCK(x) ((x) << LK_SHARERS_SHIFT | LK_SHARE)
#define LK_ONE_SHARER (1 << LK_SHARERS_SHIFT)
@@ -131,8 +132,10 @@ _lockmgr_args_rw(struct lock *lk, u_int flags, struct rwlock *ilk,
LOCK_FILE, LOCK_LINE)
#define lockmgr_disown(lk) \
_lockmgr_disown((lk), LOCK_FILE, LOCK_LINE)
+#define lockmgr_recursed_v(v) \
+ (v & LK_WRITER_RECURSED)
#define lockmgr_recursed(lk) \
- ((lk)->lk_recurse != 0)
+ lockmgr_recursed_v((lk)->lk_lock)
#define lockmgr_rw(lk, flags, ilk) \
_lockmgr_args_rw((lk), (flags), (ilk), LK_WMESG_DEFAULT, \
LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, LOCK_FILE, LOCK_LINE)