aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2022-07-14 14:23:43 +0000
committerMark Johnston <markj@FreeBSD.org>2022-07-14 14:27:58 +0000
commit0927ff78147b4d00a75054bbad299946208e1e91 (patch)
treef2fa437e9597a4b4c7371ae8c29c999bfa8043c9
parent6d3f74a14a83b867c273c6be2599da182a9b9ec7 (diff)
downloadsrc-0927ff78147b4d00a75054bbad299946208e1e91.tar.gz
src-0927ff78147b4d00a75054bbad299946208e1e91.zip
sched_ule: Enable preemption of curthread in the load balancer
The load balancer executes from statclock and periodically tries to move threads among CPUs in order to balance load. It may move a thread to the current CPU (the loader balancer always runs on CPU 0). When it does so, it may need to schedule preemption of the interrupted thread. Use sched_setpreempt() to do so, same as sched_add(). PR: 264867 Reviewed by: mav, kib, jhb MFC after: 1 month Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D35744
-rw-r--r--sys/kern/sched_ule.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index 2652973f9b99..43991ca15c57 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -307,6 +307,7 @@ static struct tdq tdq_cpu;
#define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t)))
#define TDQ_LOCKPTR(t) ((struct mtx *)(&(t)->tdq_lock))
+static void sched_setpreempt(int);
static void sched_priority(struct thread *);
static void sched_thread_priority(struct thread *, u_char);
static int sched_interact_score(struct thread *);
@@ -949,13 +950,15 @@ sched_balance_pair(struct tdq *high, struct tdq *low)
lowpri = tdq_move(high, low);
if (lowpri != -1) {
/*
- * In case the target isn't the current cpu notify it of
+ * In case the target isn't the current CPU notify it of
* the new load, possibly sending an IPI to force it to
- * reschedule.
+ * reschedule. Otherwise maybe schedule a preemption.
*/
cpu = TDQ_ID(low);
if (cpu != PCPU_GET(cpuid))
tdq_notify(low, lowpri);
+ else
+ sched_setpreempt(low->tdq_lowpri);
ret = true;
}
}
@@ -2630,20 +2633,19 @@ sched_choose(void)
}
/*
- * Set owepreempt if necessary. Preemption never happens directly in ULE,
- * we always request it once we exit a critical section.
+ * Set owepreempt if the currently running thread has lower priority than "pri".
+ * Preemption never happens directly in ULE, we always request it once we exit a
+ * critical section.
*/
-static inline void
-sched_setpreempt(struct thread *td)
+static void
+sched_setpreempt(int pri)
{
struct thread *ctd;
int cpri;
- int pri;
-
- THREAD_LOCK_ASSERT(curthread, MA_OWNED);
ctd = curthread;
- pri = td->td_priority;
+ THREAD_LOCK_ASSERT(ctd, MA_OWNED);
+
cpri = ctd->td_priority;
if (pri < cpri)
ctd->td_flags |= TDF_NEEDRESCHED;
@@ -2720,7 +2722,7 @@ sched_add(struct thread *td, int flags)
if (cpu != PCPU_GET(cpuid))
tdq_notify(tdq, lowpri);
else if (!(flags & SRQ_YIELDING))
- sched_setpreempt(td);
+ sched_setpreempt(td->td_priority);
#else
tdq = TDQ_SELF();
/*
@@ -2736,7 +2738,7 @@ sched_add(struct thread *td, int flags)
}
(void)tdq_add(tdq, td, flags);
if (!(flags & SRQ_YIELDING))
- sched_setpreempt(td);
+ sched_setpreempt(td->td_priority);
#endif
if (!(flags & SRQ_HOLDTD))
thread_unlock(td);