aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/sched_4bsd.c
diff options
context:
space:
mode:
authorJeff Roberson <jeff@FreeBSD.org>2019-12-15 21:26:50 +0000
committerJeff Roberson <jeff@FreeBSD.org>2019-12-15 21:26:50 +0000
commit686bcb5c14aba6e67524be84e125bfdd3514db9e (patch)
treeb10f0aa09f2e058a51defcaf01c077e1f19351fa /sys/kern/sched_4bsd.c
parent1223b40ebaf44102da51bedbd20f79829177982e (diff)
downloadsrc-686bcb5c14aba6e67524be84e125bfdd3514db9e.tar.gz
src-686bcb5c14aba6e67524be84e125bfdd3514db9e.zip
schedlock 4/4
Don't hold the scheduler lock while doing context switches. Instead we unlock after selecting the new thread and switch within a spinlock section leaving interrupts and preemption disabled to prevent local concurrency. This means that mi_switch() is entered with the thread locked but returns without. This dramatically simplifies scheduler locking because we will not hold the schedlock while spinning on blocked lock in switch. This change has not been made to 4BSD but in principle it would be more straightforward. Discussed with: markj Reviewed by: kib Tested by: pho Differential Revision: https://reviews.freebsd.org/D22778
Notes
Notes: svn path=/head/; revision=355784
Diffstat (limited to 'sys/kern/sched_4bsd.c')
-rw-r--r--sys/kern/sched_4bsd.c47
1 files changed, 15 insertions, 32 deletions
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index c558b9135749..9ac26355624d 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -671,7 +671,7 @@ schedinit(void)
*/
thread0.td_lock = &sched_lock;
td_get_sched(&thread0)->ts_slice = sched_slice;
- mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
+ mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN);
}
int
@@ -973,8 +973,9 @@ sched_sleep(struct thread *td, int pri)
}
void
-sched_switch(struct thread *td, struct thread *newtd, int flags)
+sched_switch(struct thread *td, int flags)
{
+ struct thread *newtd;
struct mtx *tmtx;
struct td_sched *ts;
struct proc *p;
@@ -1027,25 +1028,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
if ((td->td_flags & TDF_NOLOAD) == 0)
sched_load_rem();
- if (newtd) {
- /*
- * The thread we are about to run needs to be counted
- * as if it had been added to the run queue and selected.
- * It came from:
- * * A preemption
- * * An upcall
- * * A followon
- */
- KASSERT((newtd->td_inhibitors == 0),
- ("trying to run inhibited thread"));
- newtd->td_flags |= TDF_DIDRUN;
- TD_SET_RUNNING(newtd);
- if ((newtd->td_flags & TDF_NOLOAD) == 0)
- sched_load_add();
- } else {
- newtd = choosethread();
- }
-
+ newtd = choosethread();
MPASS(newtd->td_lock == &sched_lock);
#if (KTR_COMPILE & KTR_SCHED) != 0
@@ -1117,7 +1100,8 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
#endif
sched_lock.mtx_lock = (uintptr_t)td;
td->td_oncpu = PCPU_GET(cpuid);
- MPASS(td->td_lock == &sched_lock);
+ spinlock_enter();
+ mtx_unlock_spin(&sched_lock);
}
void
@@ -1517,12 +1501,12 @@ sched_preempt(struct thread *td)
{
SDT_PROBE2(sched, , , surrender, td, td->td_proc);
- thread_lock(td);
- if (td->td_critnest > 1)
+ if (td->td_critnest > 1) {
td->td_owepreempt = 1;
- else
- mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, NULL);
- thread_unlock(td);
+ } else {
+ thread_lock(td);
+ mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT);
+ }
}
void
@@ -1551,7 +1535,8 @@ sched_bind(struct thread *td, int cpu)
if (PCPU_GET(cpuid) == cpu)
return;
- mi_switch(SW_VOL, NULL);
+ mi_switch(SW_VOL);
+ thread_lock(td);
#endif
}
@@ -1574,8 +1559,7 @@ void
sched_relinquish(struct thread *td)
{
thread_lock(td);
- mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
- thread_unlock(td);
+ mi_switch(SW_VOL | SWT_RELINQUISH);
}
int
@@ -1666,8 +1650,7 @@ sched_idletd(void *dummy)
}
mtx_lock_spin(&sched_lock);
- mi_switch(SW_VOL | SWT_IDLE, NULL);
- mtx_unlock_spin(&sched_lock);
+ mi_switch(SW_VOL | SWT_IDLE);
}
}