aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKonstantin Belousov <kib@FreeBSD.org>2024-08-29 20:45:21 +0000
committerKonstantin Belousov <kib@FreeBSD.org>2024-08-29 21:32:48 +0000
commit7e49f04c88390c4c07f90c733c0d35ad6ff00f1c (patch)
tree98ee5891677de011e40477063ce02a66e9610b2d
parent41e016289f77deb88b0ef1ec3f7b2ab3515ac7c8 (diff)
downloadsrc-7e49f04c88390c4c07f90c733c0d35ad6ff00f1c.tar.gz
src-7e49f04c88390c4c07f90c733c0d35ad6ff00f1c.zip
rangelocks: stop caching per-thread rl_q_entry
This should reduce the frequency of smr_synchronize() calls, that otherwise occur on almost each rangelock unlock. Reviewed by: markj Sponsored by: The FreeBSD Foundation Differential revision: https://reviews.freebsd.org/D46482
-rw-r--r--sys/kern/kern_rangelock.c26
-rw-r--r--sys/kern/kern_thread.c2
-rw-r--r--sys/sys/proc.h2
-rw-r--r--sys/sys/rangelock.h1
4 files changed, 4 insertions, 27 deletions
diff --git a/sys/kern/kern_rangelock.c b/sys/kern/kern_rangelock.c
index 3a11059a7f64..3437e3da77ba 100644
--- a/sys/kern/kern_rangelock.c
+++ b/sys/kern/kern_rangelock.c
@@ -313,15 +313,8 @@ static struct rl_q_entry *
rlqentry_alloc(vm_ooffset_t start, vm_ooffset_t end, int flags)
{
struct rl_q_entry *e;
- struct thread *td;
-
- td = curthread;
- if (td->td_rlqe != NULL) {
- e = td->td_rlqe;
- td->td_rlqe = NULL;
- } else {
- e = uma_zalloc_smr(rl_entry_zone, M_WAITOK);
- }
+
+ e = uma_zalloc_smr(rl_entry_zone, M_WAITOK);
e->rl_q_next = NULL;
e->rl_q_free = NULL;
e->rl_q_start = start;
@@ -334,12 +327,6 @@ rlqentry_alloc(vm_ooffset_t start, vm_ooffset_t end, int flags)
}
void
-rangelock_entry_free(struct rl_q_entry *e)
-{
- uma_zfree_smr(rl_entry_zone, e);
-}
-
-void
rangelock_init(struct rangelock *lock)
{
lock->sleepers = false;
@@ -401,19 +388,12 @@ static void
rangelock_free_free(struct rl_q_entry *free)
{
struct rl_q_entry *x, *xp;
- struct thread *td;
- td = curthread;
for (x = free; x != NULL; x = xp) {
MPASS(!rl_e_is_marked(x));
xp = x->rl_q_free;
MPASS(!rl_e_is_marked(xp));
- if (td->td_rlqe == NULL) {
- smr_synchronize(rl_smr);
- td->td_rlqe = x;
- } else {
- uma_zfree_smr(rl_entry_zone, x);
- }
+ uma_zfree_smr(rl_entry_zone, x);
}
}
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index 9c3694feb945..4ccd6b26528e 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -460,7 +460,6 @@ thread_init(void *mem, int size, int flags)
td->td_allocdomain = vm_phys_domain(vtophys(td));
td->td_sleepqueue = sleepq_alloc();
td->td_turnstile = turnstile_alloc();
- td->td_rlqe = NULL;
EVENTHANDLER_DIRECT_INVOKE(thread_init, td);
umtx_thread_init(td);
td->td_kstack = 0;
@@ -480,7 +479,6 @@ thread_fini(void *mem, int size)
EVENTHANDLER_DIRECT_INVOKE(thread_fini, td);
turnstile_free(td->td_turnstile);
sleepq_free(td->td_sleepqueue);
- rangelock_entry_free(td->td_rlqe);
umtx_thread_fini(td);
MPASS(td->td_sel == NULL);
}
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 57c910d8fce0..1e98cc84a60a 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -247,7 +247,7 @@ struct thread {
struct seltd *td_sel; /* Select queue/channel. */
struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */
struct turnstile *td_turnstile; /* (k) Associated turnstile. */
- struct rl_q_entry *td_rlqe; /* (k) Associated range lock entry. */
+ void *td_pad1; /* Available */
struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */
lwpid_t td_tid; /* (b) Thread ID. */
sigqueue_t td_sigqueue; /* (c) Sigs arrived, not delivered. */
diff --git a/sys/sys/rangelock.h b/sys/sys/rangelock.h
index accf33d7296b..32ccf3427b49 100644
--- a/sys/sys/rangelock.h
+++ b/sys/sys/rangelock.h
@@ -65,7 +65,6 @@ void *rangelock_wlock(struct rangelock *lock, vm_ooffset_t start,
vm_ooffset_t end);
void *rangelock_trywlock(struct rangelock *lock, vm_ooffset_t start,
vm_ooffset_t end);
-void rangelock_entry_free(struct rl_q_entry *e);
void rangelock_may_recurse(struct rangelock *lock);
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
void _rangelock_cookie_assert(void *cookie, int what, const char *file,