aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/subr_sleepqueue.c
diff options
context:
space:
mode:
authorJeff Roberson <jeff@FreeBSD.org>2019-12-15 21:26:50 +0000
committerJeff Roberson <jeff@FreeBSD.org>2019-12-15 21:26:50 +0000
commit686bcb5c14aba6e67524be84e125bfdd3514db9e (patch)
treeb10f0aa09f2e058a51defcaf01c077e1f19351fa /sys/kern/subr_sleepqueue.c
parent1223b40ebaf44102da51bedbd20f79829177982e (diff)
downloadsrc-686bcb5c14aba6e67524be84e125bfdd3514db9e.tar.gz
src-686bcb5c14aba6e67524be84e125bfdd3514db9e.zip
schedlock 4/4
Don't hold the scheduler lock while doing context switches. Instead we unlock after selecting the new thread and switch within a spinlock section leaving interrupts and preemption disabled to prevent local concurrency. This means that mi_switch() is entered with the thread locked but returns without. This dramatically simplifies scheduler locking because we will not hold the schedlock while spinning on blocked lock in switch. This change has not been made to 4BSD but in principle it would be more straightforward. Discussed with: markj Reviewed by: kib Tested by: pho Differential Revision: https://reviews.freebsd.org/D22778
Notes
Notes: svn path=/head/; revision=355784
Diffstat (limited to 'sys/kern/subr_sleepqueue.c')
-rw-r--r--sys/kern/subr_sleepqueue.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c
index 6e7410c4f46f..bd5150c1134b 100644
--- a/sys/kern/subr_sleepqueue.c
+++ b/sys/kern/subr_sleepqueue.c
@@ -546,8 +546,10 @@ out:
sq = sleepq_lookup(wchan);
sleepq_remove_thread(sq, td);
}
- mtx_unlock_spin(&sc->sc_lock);
MPASS(td->td_lock != &sc->sc_lock);
+ mtx_unlock_spin(&sc->sc_lock);
+ thread_unlock(td);
+
return (ret);
}
@@ -574,6 +576,7 @@ sleepq_switch(void *wchan, int pri)
*/
if (td->td_sleepqueue != NULL) {
mtx_unlock_spin(&sc->sc_lock);
+ thread_unlock(td);
return;
}
@@ -605,6 +608,7 @@ sleepq_switch(void *wchan, int pri)
sq = sleepq_lookup(wchan);
sleepq_remove_thread(sq, td);
mtx_unlock_spin(&sc->sc_lock);
+ thread_unlock(td);
return;
}
#ifdef SLEEPQUEUE_PROFILING
@@ -616,7 +620,7 @@ sleepq_switch(void *wchan, int pri)
thread_lock_set(td, &sc->sc_lock);
SDT_PROBE0(sched, , , sleep);
TD_SET_SLEEPING(td);
- mi_switch(SW_VOL | SWT_SLEEPQ, NULL);
+ mi_switch(SW_VOL | SWT_SLEEPQ);
KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
(void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
@@ -668,7 +672,6 @@ sleepq_wait(void *wchan, int pri)
MPASS(!(td->td_flags & TDF_SINTR));
thread_lock(td);
sleepq_switch(wchan, pri);
- thread_unlock(td);
}
/*
@@ -681,7 +684,6 @@ sleepq_wait_sig(void *wchan, int pri)
int rcatch;
rcatch = sleepq_catch_signals(wchan, pri);
- thread_unlock(curthread);
if (rcatch)
return (rcatch);
return (sleepq_check_signals());
@@ -698,9 +700,9 @@ sleepq_timedwait(void *wchan, int pri)
td = curthread;
MPASS(!(td->td_flags & TDF_SINTR));
+
thread_lock(td);
sleepq_switch(wchan, pri);
- thread_unlock(td);
return (sleepq_check_timeout());
}
@@ -715,8 +717,6 @@ sleepq_timedwait_sig(void *wchan, int pri)
int rcatch, rvalt, rvals;
rcatch = sleepq_catch_signals(wchan, pri);
- thread_unlock(curthread);
-
/* We must always call check_timeout() to clear sleeptimo. */
rvalt = sleepq_check_timeout();
rvals = sleepq_check_signals();