aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/sched_4bsd.c
diff options
context:
space:
mode:
authorJeff Roberson <jeff@FreeBSD.org>2019-12-15 21:11:15 +0000
committerJeff Roberson <jeff@FreeBSD.org>2019-12-15 21:11:15 +0000
commit61a74c5ccd65d1a00a96779f16eda8c41ff3a426 (patch)
tree0325e01f4affe5d9ef25e68fae1a7cbd5d2ebde9 /sys/kern/sched_4bsd.c
parent054802650063bea1cb817ef22a887c3116813ba9 (diff)
downloadsrc-61a74c5ccd65d1a00a96779f16eda8c41ff3a426.tar.gz
src-61a74c5ccd65d1a00a96779f16eda8c41ff3a426.zip
schedlock 1/4
Eliminate recursion from most thread_lock consumers. Return from sched_add() without the thread_lock held. This eliminates unnecessary atomics and lock word loads as well as reducing the hold time for scheduler locks. This will eventually allow for lockless remote adds. Discussed with: kib Reviewed by: jhb Tested by: pho Differential Revision: https://reviews.freebsd.org/D22626
Notes
Notes: svn path=/head/; revision=355779
Diffstat (limited to 'sys/kern/sched_4bsd.c')
-rw-r--r--sys/kern/sched_4bsd.c66
1 files changed, 41 insertions, 25 deletions
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 3cfc76a1afd3..c558b9135749 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -846,7 +846,7 @@ sched_priority(struct thread *td, u_char prio)
td->td_priority = prio;
if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) {
sched_rem(td);
- sched_add(td, SRQ_BORING);
+ sched_add(td, SRQ_BORING | SRQ_HOLDTD);
}
}
@@ -980,25 +980,12 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
struct proc *p;
int preempted;
- tmtx = NULL;
+ tmtx = &sched_lock;
ts = td_get_sched(td);
p = td->td_proc;
THREAD_LOCK_ASSERT(td, MA_OWNED);
- /*
- * Switch to the sched lock to fix things up and pick
- * a new thread.
- * Block the td_lock in order to avoid breaking the critical path.
- */
- if (td->td_lock != &sched_lock) {
- mtx_lock_spin(&sched_lock);
- tmtx = thread_lock_block(td);
- }
-
- if ((td->td_flags & TDF_NOLOAD) == 0)
- sched_load_rem();
-
td->td_lastcpu = td->td_oncpu;
preempted = (td->td_flags & TDF_SLICEEND) == 0 &&
(flags & SW_PREEMPT) != 0;
@@ -1021,10 +1008,25 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
if (TD_IS_RUNNING(td)) {
/* Put us back on the run queue. */
sched_add(td, preempted ?
- SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
- SRQ_OURSELF|SRQ_YIELDING);
+ SRQ_HOLDTD|SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
+ SRQ_HOLDTD|SRQ_OURSELF|SRQ_YIELDING);
}
}
+
+ /*
+ * Switch to the sched lock to fix things up and pick
+ * a new thread. Block the td_lock in order to avoid
+ * breaking the critical path.
+ */
+ if (td->td_lock != &sched_lock) {
+ mtx_lock_spin(&sched_lock);
+ tmtx = thread_lock_block(td);
+ mtx_unlock_spin(tmtx);
+ }
+
+ if ((td->td_flags & TDF_NOLOAD) == 0)
+ sched_load_rem();
+
if (newtd) {
/*
* The thread we are about to run needs to be counted
@@ -1042,9 +1044,10 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
sched_load_add();
} else {
newtd = choosethread();
- MPASS(newtd->td_lock == &sched_lock);
}
+ MPASS(newtd->td_lock == &sched_lock);
+
#if (KTR_COMPILE & KTR_SCHED) != 0
if (TD_IS_IDLETHREAD(td))
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
@@ -1075,7 +1078,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
(*dtrace_vtime_switch_func)(newtd);
#endif
- cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock);
+ cpu_switch(td, newtd, tmtx);
lock_profile_obtain_lock_success(&sched_lock.lock_object,
0, 0, __FILE__, __LINE__);
/*
@@ -1100,8 +1103,10 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
if (PMC_PROC_IS_USING_PMCS(td->td_proc))
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
#endif
- } else
+ } else {
+ td->td_lock = &sched_lock;
SDT_PROBE0(sched, , , remain__cpu);
+ }
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
"prio:%d", td->td_priority);
@@ -1116,7 +1121,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
}
void
-sched_wakeup(struct thread *td)
+sched_wakeup(struct thread *td, int srqflags)
{
struct td_sched *ts;
@@ -1130,7 +1135,7 @@ sched_wakeup(struct thread *td)
td->td_slptick = 0;
ts->ts_slptime = 0;
ts->ts_slice = sched_slice;
- sched_add(td, SRQ_BORING);
+ sched_add(td, srqflags);
}
#ifdef SMP
@@ -1316,7 +1321,11 @@ sched_add(struct thread *td, int flags)
*/
if (td->td_lock != &sched_lock) {
mtx_lock_spin(&sched_lock);
- thread_lock_set(td, &sched_lock);
+ if ((flags & SRQ_HOLD) != 0)
+ td->td_lock = &sched_lock;
+ else
+ thread_lock_set(td, &sched_lock);
+
}
TD_SET_RUNQ(td);
@@ -1380,6 +1389,8 @@ sched_add(struct thread *td, int flags)
maybe_resched(td);
}
}
+ if ((flags & SRQ_HOLDTD) == 0)
+ thread_unlock(td);
}
#else /* SMP */
{
@@ -1407,7 +1418,10 @@ sched_add(struct thread *td, int flags)
*/
if (td->td_lock != &sched_lock) {
mtx_lock_spin(&sched_lock);
- thread_lock_set(td, &sched_lock);
+ if ((flags & SRQ_HOLD) != 0)
+ td->td_lock = &sched_lock;
+ else
+ thread_lock_set(td, &sched_lock);
}
TD_SET_RUNQ(td);
CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
@@ -1418,6 +1432,8 @@ sched_add(struct thread *td, int flags)
runq_add(ts->ts_runq, td, flags);
if (!maybe_preempt(td))
maybe_resched(td);
+ if ((flags & SRQ_HOLDTD) == 0)
+ thread_unlock(td);
}
#endif /* SMP */
@@ -1776,7 +1792,7 @@ sched_affinity(struct thread *td)
/* Put this thread on a valid per-CPU runqueue. */
sched_rem(td);
- sched_add(td, SRQ_BORING);
+ sched_add(td, SRQ_HOLDTD | SRQ_BORING);
break;
case TDS_RUNNING:
/*