aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/kern_lock.c
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2021-02-15 04:24:17 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2021-02-15 13:57:25 +0000
commiteac22dd48079d5cccecb07f4e60b44bb692167f8 (patch)
tree20a17b60a1464eebb283adf35405e2c433d93c11 /sys/kern/kern_lock.c
parentcc743b050acf59862b268296d50923d6e5f0e86f (diff)
downloadsrc-eac22dd48079d5cccecb07f4e60b44bb692167f8.tar.gz
src-eac22dd48079d5cccecb07f4e60b44bb692167f8.zip
lockmgr: shrink struct lock by 8 bytes on LP64
Currently the struct has a 4 byte padding stemming from 3 ints. 1. prio comfortably fits in short, unfortunately there is no dedicated type for it and plumbing it throughout the codebase is not worth it right now, instead an assert is added which covers also flags for safety 2. lk_exslpfail can in principle exceed u_short, but the count is already not considered reliable and it only ever gets modified straight to 0. In other words it can be incrementing with an upper bound of USHRT_MAX With these in place struct lock shrinks from 48 to 40 bytes. Reviewed by: kib (previous version) Differential Revision: https://reviews.freebsd.org/D28680
Diffstat (limited to 'sys/kern/kern_lock.c')
-rw-r--r--sys/kern/kern_lock.c18
1 files changed, 13 insertions, 5 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 091abcda2a1e..5cefcf7a597b 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -37,6 +37,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kdb.h>
#include <sys/ktr.h>
+#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/lock_profile.h>
#include <sys/lockmgr.h>
@@ -61,6 +62,12 @@ __FBSDID("$FreeBSD$");
PMC_SOFT_DECLARE( , , lock, failed);
#endif
+/*
+ * Hack. There should be prio_t or similar so that this is not necessary.
+ */
+_Static_assert((PRILASTFLAG * 2) - 1 <= USHRT_MAX,
+ "prio flags wont fit in u_short pri in struct lock");
+
CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
@@ -280,8 +287,10 @@ sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
if (flags & LK_INTERLOCK)
class->lc_unlock(ilk);
- if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
- lk->lk_exslpfail++;
+ if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) {
+ if (lk->lk_exslpfail < USHRT_MAX)
+ lk->lk_exslpfail++;
+ }
GIANT_SAVE();
sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
SLEEPQ_INTERRUPTIBLE : 0), queue);
@@ -345,7 +354,7 @@ retry_sleepq:
realexslp = sleepq_sleepcnt(&lk->lock_object,
SQ_EXCLUSIVE_QUEUE);
if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
- if (lk->lk_exslpfail < realexslp) {
+ if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
lk->lk_exslpfail = 0;
queue = SQ_EXCLUSIVE_QUEUE;
v |= (x & LK_SHARED_WAITERS);
@@ -362,7 +371,6 @@ retry_sleepq:
SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
queue = SQ_SHARED_QUEUE;
}
-
} else {
/*
* Exclusive waiters sleeping with LK_SLEEPFAIL on
@@ -1170,7 +1178,7 @@ lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_obje
MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
- if (lk->lk_exslpfail < realexslp) {
+ if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
lk->lk_exslpfail = 0;
queue = SQ_EXCLUSIVE_QUEUE;
v |= (x & LK_SHARED_WAITERS);