aboutsummaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorJulian Elischer <julian@FreeBSD.org>2002-07-14 03:43:33 +0000
committerJulian Elischer <julian@FreeBSD.org>2002-07-14 03:43:33 +0000
commitc3b98db091b192a2e4333df74ea6da2a6ea90d26 (patch)
treeaafccf41b7ab2599852d32b259154da2899a2107 /sys/kern
parent010b4b09f11486a361033911561ab4eeac113c29 (diff)
downloadsrc-c3b98db091b192a2e4333df74ea6da2a6ea90d26.tar.gz
src-c3b98db091b192a2e4333df74ea6da2a6ea90d26.zip
Thinking about it I came to the conclusion that the KSE states were incorrectly
formulated. The correct states should be: IDLE: On the idle KSE list for that KSEG RUNQ: Linked onto the system run queue. THREAD: Attached to a thread and slaved to whatever state the thread is in. This means that most places where we were adjusting kse state can go away as it is just moving around because the thread is.. The only places we need to adjust the KSE state is in transition to and from the idle and run queues. Reviewed by: jhb@freebsd.org
Notes
Notes: svn path=/head/; revision=99942
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/init_main.c2
-rw-r--r--sys/kern/kern_fork.c4
-rw-r--r--sys/kern/kern_idle.c6
-rw-r--r--sys/kern/kern_kse.c3
-rw-r--r--sys/kern/kern_proc.c2
-rw-r--r--sys/kern/kern_switch.c141
-rw-r--r--sys/kern/kern_synch.c17
-rw-r--r--sys/kern/kern_thread.c3
8 files changed, 17 insertions, 161 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index da1b843845f8..6817bdc87a93 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -342,7 +342,7 @@ proc0_init(void *dummy __unused)
td->td_base_pri = PUSER;
td->td_kse = ke; /* XXXKSE */
ke->ke_oncpu = 0;
- ke->ke_state = KES_RUNNING;
+ ke->ke_state = KES_THREAD;
ke->ke_thread = td;
/* proc_linkup puts it in the idle queue, that's not what we want. */
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index d5fd23156663..77481cad484f 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -510,11 +510,10 @@ again:
/* Set up the thread as an active thread (as if runnable). */
TAILQ_REMOVE(&kg2->kg_iq, ke2, ke_kgrlist);
kg2->kg_idle_kses--;
- ke2->ke_state = KES_UNQUEUED;
+ ke2->ke_state = KES_THREAD;
ke2->ke_thread = td2;
td2->td_kse = ke2;
td2->td_flags &= ~TDF_UNBOUND; /* For the rest of this syscall. */
-KASSERT((ke2->ke_kgrlist.tqe_next != ke2), ("linked to self!"));
/* note.. XXXKSE no pcb or u-area yet */
@@ -835,7 +834,6 @@ fork_exit(callout, arg, frame)
td->td_kse->ke_oncpu = PCPU_GET(cpuid);
p->p_state = PRS_NORMAL;
td->td_state = TDS_RUNNING; /* Already done in switch() on 386. */
- td->td_kse->ke_state = KES_RUNNING;
/*
* Finish setting up thread glue. We need to initialize
* the thread into a td_critnest=1 state. Some platforms
diff --git a/sys/kern/kern_idle.c b/sys/kern/kern_idle.c
index 306f2a57cdad..1c33d19d1862 100644
--- a/sys/kern/kern_idle.c
+++ b/sys/kern/kern_idle.c
@@ -62,8 +62,7 @@ idle_setup(void *dummy)
p->p_flag |= P_NOLOAD;
td = FIRST_THREAD_IN_PROC(p);
- td->td_state = TDS_RUNQ;
- td->td_kse->ke_state = KES_ONRUNQ;
+ td->td_state = TDS_UNQUEUED;
td->td_kse->ke_flags |= KEF_IDLEKSE;
#ifdef SMP
}
@@ -84,8 +83,6 @@ idle_proc(void *dummy)
td = curthread;
p = td->td_proc;
- td->td_state = TDS_RUNNING;
- td->td_kse->ke_state = KES_RUNNING;
for (;;) {
mtx_assert(&Giant, MA_NOTOWNED);
@@ -115,7 +112,6 @@ idle_proc(void *dummy)
mtx_lock_spin(&sched_lock);
p->p_stats->p_ru.ru_nvcsw++;
mi_switch();
- td->td_kse->ke_state = KES_RUNNING;
mtx_unlock_spin(&sched_lock);
}
}
diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c
index ee7a25245a74..8ae0df7c46de 100644
--- a/sys/kern/kern_kse.c
+++ b/sys/kern/kern_kse.c
@@ -327,9 +327,6 @@ thread_exit(void)
/* Reassign this thread's KSE. */
if (ke != NULL) {
-KASSERT((ke->ke_state == KES_RUNNING), ("zapping kse not running"));
-KASSERT((ke->ke_thread == td ), ("kse ke_thread mismatch against curthread"));
-KASSERT((ke->ke_thread->td_state == TDS_RUNNING), ("zapping thread not running"));
ke->ke_thread = NULL;
td->td_kse = NULL;
ke->ke_state = KES_UNQUEUED;
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index 44bf2cea15db..3547196038cc 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -212,10 +212,8 @@ kse_link(struct kse *ke, struct ksegrp *kg)
{
struct proc *p = kg->kg_proc;
-KASSERT((ke->ke_state != KES_ONRUNQ), ("linking suspect kse on run queue"));
TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
kg->kg_kses++;
-KASSERT((ke->ke_state != KES_IDLE), ("already on idle queue"));
ke->ke_state = KES_IDLE;
TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist);
kg->kg_idle_kses++;
diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
index cd95243963df..4ae03a928950 100644
--- a/sys/kern/kern_switch.c
+++ b/sys/kern/kern_switch.c
@@ -155,7 +155,6 @@ choosethread(void)
} else {
/* Simulate runq_choose() having returned the idle thread */
td = PCPU_GET(idlethread);
- td->td_kse->ke_state = KES_RUNNING;
CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
}
td->td_state = TDS_RUNNING;
@@ -196,7 +195,6 @@ kse_reassign(struct kse *ke)
runq_add(&runq, ke);
CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
} else {
- KASSERT((ke->ke_state != KES_IDLE), ("kse already idle"));
ke->ke_state = KES_IDLE;
ke->ke_thread = NULL;
TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist);
@@ -239,7 +237,7 @@ remrunqueue(struct thread *td)
if ((td->td_flags & TDF_UNBOUND) == 0) {
/* Bring its kse with it, leave the thread attached */
runq_remove(&runq, ke);
- ke->ke_state = KES_UNQUEUED;
+ ke->ke_state = KES_THREAD;
return;
}
if (ke) {
@@ -286,8 +284,6 @@ remrunqueue(struct thread *td)
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
}
-#if 1 /* use the first version */
-
void
setrunqueue(struct thread *td)
{
@@ -331,7 +327,7 @@ setrunqueue(struct thread *td)
*/
ke = TAILQ_FIRST(&kg->kg_iq);
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
- ke->ke_state = KES_UNQUEUED;
+ ke->ke_state = KES_THREAD;
kg->kg_idle_kses--;
} else if (tda && (tda->td_priority > td->td_priority)) {
/*
@@ -345,8 +341,9 @@ setrunqueue(struct thread *td)
runq_remove(&runq, ke);
}
} else {
- KASSERT(ke->ke_thread == td, ("KSE/thread mismatch"));
- KASSERT(ke->ke_state != KES_IDLE, ("KSE unexpectedly idle"));
+ /*
+ * Temporarily disassociate so it looks like the other cases.
+ */
ke->ke_thread = NULL;
td->td_kse = NULL;
}
@@ -374,7 +371,7 @@ setrunqueue(struct thread *td)
if (tda == NULL) {
/*
* No pre-existing last assigned so whoever is first
- * gets the KSE we borught in.. (may be us)
+ * gets the KSE we brought in.. (maybe us)
*/
td2 = TAILQ_FIRST(&kg->kg_runq);
KASSERT((td2->td_kse == NULL),
@@ -404,121 +401,6 @@ setrunqueue(struct thread *td)
}
}
-#else
-
-void
-setrunqueue(struct thread *td)
-{
- struct kse *ke;
- struct ksegrp *kg;
- struct thread *td2;
-
- CTR1(KTR_RUNQ, "setrunqueue: td%p", td);
- KASSERT((td->td_state != TDS_RUNQ), ("setrunqueue: bad thread state"));
- td->td_state = TDS_RUNQ;
- kg = td->td_ksegrp;
- kg->kg_runnable++;
- if ((td->td_flags & TDF_UNBOUND) == 0) {
- /*
- * Common path optimisation: Only one of everything
- * and the KSE is always already attached.
- * Totally ignore the ksegrp run queue.
- */
- runq_add(&runq, td->td_kse);
- return;
- }
- /*
- * First add the thread to the ksegrp's run queue at
- * the appropriate place.
- */
- TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
- if (td2->td_priority > td->td_priority) {
- TAILQ_INSERT_BEFORE(td2, td, td_runq);
- break;
- }
- }
- if (td2 == NULL) {
- /* We ran off the end of the TAILQ or it was empty. */
- TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
- }
-
- /*
- * The following could be achieved by simply doing:
- * td->td_kse = NULL; kse_reassign(ke);
- * but I felt that I'd try do it inline here.
- * All this work may not be worth it.
- */
- if ((ke = td->td_kse)) { /* XXXKSE */
- /*
- * We have a KSE already. See whether we can keep it
- * or if we need to give it to someone else.
- * Either way it will need to be inserted into
- * the runq. kse_reassign() will do this as will runq_add().
- */
- if ((kg->kg_last_assigned) &&
- (kg->kg_last_assigned->td_priority > td->td_priority)) {
- /*
- * We can definitly keep the KSE
- * as the "last assignead thread" has
- * less priority than we do.
- * The "last assigned" pointer stays the same.
- */
- runq_add(&runq, ke);
- return;
-
- }
- /*
- * Give it to the correct thread,
- * which may be (often is) us, but may not be.
- */
- td->td_kse = NULL;
- kse_reassign(ke);
- return;
- }
- /*
- * There are two cases where KSE adjustment is needed.
- * Usurpation of an already assigned KSE, and assignment
- * of a previously IDLE KSE.
- */
- if (kg->kg_idle_kses) {
- /*
- * If there are unassigned KSEs then we definitly
- * will be assigned one from the idle KSE list.
- * If we are the last, we should get the "last
- * assigned" pointer set to us as well.
- */
- ke = TAILQ_FIRST(&kg->kg_iq);
- TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
- ke->ke_state = KES_UNQUEUED;
- kg->kg_idle_kses--;
- ke->ke_thread = td;
- td->td_kse = ke;
- runq_add(&runq, ke);
- if (TAILQ_NEXT(td, td_runq) == NULL) {
- kg->kg_last_assigned = td;
- }
- } else if (kg->kg_last_assigned &&
- (kg->kg_last_assigned->td_priority > td->td_priority)) {
- /*
- * If there were none last-assigned, all KSEs
- * are actually out running as we speak.
- * If there was a last assigned, but we didn't see it,
- * we must be inserting before it, so take the KSE from
- * the last assigned, and back it up one entry. Then,
- * assign the KSE to the new thread and adjust its priority.
- */
- td2 = kg->kg_last_assigned;
- ke = td2->td_kse;
- kg->kg_last_assigned =
- TAILQ_PREV(td2, threadqueue, td_runq);
- td2->td_kse = NULL;
- td->td_kse = ke;
- ke->ke_thread = td;
- runq_readjust(&runq, ke);
- }
-}
-#endif
-
/************************************************************************
* Critical section marker functions *
************************************************************************/
@@ -634,14 +516,11 @@ runq_add(struct runq *rq, struct kse *ke)
mtx_assert(&sched_lock, MA_OWNED);
KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE"));
- KASSERT((ke->ke_thread->td_kse != NULL), ("runq_add: No KSE on thread"));
- if (ke->ke_state == KES_ONRUNQ)
- return;
-#if defined(INVARIANTS) && defined(DIAGNOSTIC)
+ KASSERT((ke->ke_thread->td_kse != NULL),
+ ("runq_add: No KSE on thread"));
KASSERT(ke->ke_state != KES_ONRUNQ,
("runq_add: kse %p (%s) already in run queue", ke,
ke->ke_proc->p_comm));
-#endif
pri = ke->ke_thread->td_priority / RQ_PPQ;
ke->ke_rqindex = pri;
runq_setbit(rq, pri);
@@ -702,7 +581,7 @@ runq_choose(struct runq *rq)
runq_clrbit(rq, pri);
}
- ke->ke_state = KES_RUNNING;
+ ke->ke_state = KES_THREAD;
KASSERT((ke->ke_thread != NULL),
("runq_choose: No thread on KSE"));
KASSERT((ke->ke_thread->td_kse != NULL),
@@ -737,7 +616,7 @@ runq_remove(struct runq *rq, struct kse *ke)
CTR0(KTR_RUNQ, "runq_remove: empty");
runq_clrbit(rq, pri);
}
- ke->ke_state = KES_UNQUEUED;
+ ke->ke_state = KES_THREAD;
ke->ke_ksegrp->kg_runq_kses--;
}
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index d376910bcc43..d07f090d083f 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -282,8 +282,9 @@ schedcpu(arg)
* the kse slptimes are not touched in wakeup
* because the thread may not HAVE a KSE
*/
- if (ke->ke_state == KES_ONRUNQ ||
- ke->ke_state == KES_RUNNING) {
+ if ((ke->ke_state == KES_ONRUNQ) ||
+ ((ke->ke_state == KES_THREAD) &&
+ (ke->ke_thread->td_state == TDS_RUNNING))) {
ke->ke_slptime++;
} else {
ke->ke_slptime = 0;
@@ -442,8 +443,6 @@ msleep(ident, mtx, priority, wmesg, timo)
if (KTRPOINT(td, KTR_CSW))
ktrcsw(1, 0);
#endif
- KASSERT((td->td_kse != NULL), ("msleep: NULL KSE?"));
- KASSERT((td->td_kse->ke_state == KES_RUNNING), ("msleep: kse state?"));
WITNESS_SLEEP(0, &mtx->mtx_object);
KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL,
("sleeping without a mutex"));
@@ -470,19 +469,12 @@ msleep(ident, mtx, priority, wmesg, timo)
mtx_lock_spin(&sched_lock);
if (TAILQ_FIRST(&td->td_ksegrp->kg_runq) == NULL) {
/* Don't recurse here! */
- KASSERT((td->td_kse->ke_state == KES_RUNNING), ("msleep: kse stateX?"));
td->td_flags |= TDF_INMSLEEP;
thread_schedule_upcall(td, td->td_kse);
td->td_flags &= ~TDF_INMSLEEP;
- KASSERT((td->td_kse->ke_state == KES_RUNNING), ("msleep: kse stateY?"));
}
mtx_unlock_spin(&sched_lock);
}
- KASSERT((td->td_kse != NULL), ("msleep: NULL KSE2?"));
- KASSERT((td->td_kse->ke_state == KES_RUNNING),
- ("msleep: kse state2?"));
- KASSERT((td->td_kse->ke_thread == td),
- ("msleep: kse/thread mismatch?"));
}
mtx_lock_spin(&sched_lock);
if (cold || panicstr) {
@@ -797,7 +789,7 @@ mi_switch()
u_int sched_nest;
mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
- KASSERT((ke->ke_state == KES_RUNNING), ("mi_switch: kse state?"));
+ KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?"));
#ifdef INVARIANTS
if (td->td_state != TDS_MTX &&
td->td_state != TDS_RUNQ &&
@@ -884,7 +876,6 @@ mi_switch()
}
cpu_switch();
td->td_kse->ke_oncpu = PCPU_GET(cpuid);
- td->td_kse->ke_state = KES_RUNNING;
sched_lock.mtx_recurse = sched_nest;
sched_lock.mtx_lock = (uintptr_t)td;
CTR3(KTR_PROC, "mi_switch: new thread %p (pid %d, %s)", td, p->p_pid,
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index ee7a25245a74..8ae0df7c46de 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -327,9 +327,6 @@ thread_exit(void)
/* Reassign this thread's KSE. */
if (ke != NULL) {
-KASSERT((ke->ke_state == KES_RUNNING), ("zapping kse not running"));
-KASSERT((ke->ke_thread == td ), ("kse ke_thread mismatch against curthread"));
-KASSERT((ke->ke_thread->td_state == TDS_RUNNING), ("zapping thread not running"));
ke->ke_thread = NULL;
td->td_kse = NULL;
ke->ke_state = KES_UNQUEUED;