aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/sched_ule.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/sched_ule.c')
-rw-r--r--sys/kern/sched_ule.c35
1 files changed, 17 insertions, 18 deletions
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index b5b31ba2d330..319a38de6ef8 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -772,18 +772,12 @@ sched_pickcpu(void)
void
sched_prio(struct thread *td, u_char prio)
{
- struct kse *ke;
- struct runq *rq;
mtx_assert(&sched_lock, MA_OWNED);
- ke = td->td_kse;
- td->td_priority = prio;
-
if (TD_ON_RUNQ(td)) {
- rq = ke->ke_runq;
-
- runq_remove(rq, ke);
- runq_add(rq, ke);
+ adjustrunqueue(td, prio);
+ } else {
+ td->td_priority = prio;
}
}
@@ -802,15 +796,20 @@ sched_switchout(struct thread *td)
td->td_flags &= ~TDF_NEEDRESCHED;
if (TD_IS_RUNNING(td)) {
- /*
- * This queue is always correct except for idle threads which
- * have a higher priority due to priority propagation.
- */
- if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE &&
- ke->ke_thread->td_priority > PRI_MIN_IDLE)
- ke->ke_runq = KSEQ_SELF()->ksq_curr;
- runq_add(ke->ke_runq, ke);
- /* setrunqueue(td); */
+ if (td->td_proc->p_flag & P_SA) {
+ kseq_rem(KSEQ_CPU(ke->ke_cpu), ke);
+ setrunqueue(td);
+ } else {
+ /*
+ * This queue is always correct except for idle threads which
+ * have a higher priority due to priority propagation.
+ */
+ if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE &&
+ ke->ke_thread->td_priority > PRI_MIN_IDLE)
+ ke->ke_runq = KSEQ_SELF()->ksq_curr;
+ runq_add(ke->ke_runq, ke);
+ /* setrunqueue(td); */
+ }
return;
}
if (ke->ke_runq)