aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/kern_lock.c
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2018-05-18 22:57:52 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2018-05-18 22:57:52 +0000
commit10391db5300b72ff2918fbeecb0b6ab20f4dc373 (patch)
tree256639fac2b83cf940692c911db0c99c8c5adef4 /sys/kern/kern_lock.c
parent6b7e508c4a956603239068e537227d907d72cfb3 (diff)
downloadsrc-10391db5300b72ff2918fbeecb0b6ab20f4dc373.tar.gz
src-10391db5300b72ff2918fbeecb0b6ab20f4dc373.zip
lockmgr: avoid atomic on unlock in the slow path
The code is pretty much guaranteed not to be able to unlock. This is a minor nit. The code still performs way too many reads. The altered exclusive-locked condition is supposed to be always true as well, to be cleaned up at a later date.
Notes
Notes: svn path=/head/; revision=333816
Diffstat (limited to 'sys/kern/kern_lock.c')
-rw-r--r--sys/kern/kern_lock.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 9fbee4dcdc30..023d730889cd 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -132,8 +132,10 @@ CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
#define lockmgr_disowned(lk) \
(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
-#define lockmgr_xlocked(lk) \
- (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
+#define lockmgr_xlocked_v(v) \
+ (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
+
+#define lockmgr_xlocked(lk) lockmgr_xlocked_v((lk)->lk_lock)
static void assert_lockmgr(const struct lock_object *lock, int how);
#ifdef DDB
@@ -1021,7 +1023,7 @@ lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_obje
* The lock is held in exclusive mode.
* If the lock is recursed also, then unrecurse it.
*/
- if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
+ if (lockmgr_xlocked_v(x) && lockmgr_recursed(lk)) {
LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
lk->lk_recurse--;
goto out;
@@ -1029,7 +1031,7 @@ lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_obje
if (tid != LK_KERNPROC)
lock_profile_release_lock(&lk->lock_object);
- if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
+ if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
goto out;
sleepq_lock(&lk->lock_object);