aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Baldwin <jhb@FreeBSD.org>2006-04-17 18:20:38 +0000
committerJohn Baldwin <jhb@FreeBSD.org>2006-04-17 18:20:38 +0000
commit0f180a7cce5bed801060557dd5bdbad64318d3c6 (patch)
tree6e1e68443c878ae084bc35e7e36a8edf68ad3352
parent00d02f943bbcd6ec85092821e9ee4d8adca83c83 (diff)
downloadsrc-0f180a7cce5bed801060557dd5bdbad64318d3c6.tar.gz
src-0f180a7cce5bed801060557dd5bdbad64318d3c6.zip
Change msleep() and tsleep() to not alter the calling thread's priority
if the specified priority is zero. This avoids a race where the calling thread could read a snapshot of it's current priority, then a different thread could change the first thread's priority, then the original thread would call sched_prio() inside msleep() undoing the change made by the second thread. I used a priority of zero as no thread that calls msleep() or tsleep() should be specifying a priority of zero anyway. The various places that passed 'curthread->td_priority' or some variant as the priority now pass 0.
Notes
Notes: svn path=/head/; revision=157815
-rw-r--r--sys/dev/hwpmc/hwpmc_mod.c9
-rw-r--r--sys/dev/random/randomdev_soft.c6
-rw-r--r--sys/kern/kern_intr.c8
-rw-r--r--sys/kern/kern_poll.c4
-rw-r--r--sys/kern/kern_synch.c8
-rw-r--r--sys/kern/kern_thr.c4
-rw-r--r--sys/kern/kern_umtx.c10
-rw-r--r--sys/kern/sched_4bsd.c2
-rw-r--r--sys/kern/subr_taskqueue.c2
-rw-r--r--sys/kern/uipc_mqueue.c4
-rw-r--r--sys/vm/vm_zeroidle.c4
11 files changed, 23 insertions, 38 deletions
diff --git a/sys/dev/hwpmc/hwpmc_mod.c b/sys/dev/hwpmc/hwpmc_mod.c
index 1ea0e680f3f5..579b6fd1a98b 100644
--- a/sys/dev/hwpmc/hwpmc_mod.c
+++ b/sys/dev/hwpmc/hwpmc_mod.c
@@ -646,15 +646,8 @@ pmc_select_cpu(int cpu)
static void
pmc_force_context_switch(void)
{
- u_char curpri;
-
- mtx_lock_spin(&sched_lock);
- curpri = curthread->td_priority;
- mtx_unlock_spin(&sched_lock);
-
- (void) tsleep((void *) pmc_force_context_switch, curpri,
- "pmcctx", 1);
+ (void) tsleep((void *) pmc_force_context_switch, 0, "pmcctx", 1);
}
/*
diff --git a/sys/dev/random/randomdev_soft.c b/sys/dev/random/randomdev_soft.c
index cd5e2d9bc0ba..159d6cebc54c 100644
--- a/sys/dev/random/randomdev_soft.c
+++ b/sys/dev/random/randomdev_soft.c
@@ -211,8 +211,7 @@ random_yarrow_deinit(void)
* Command the hash/reseed thread to end and wait for it to finish
*/
random_kthread_control = -1;
- tsleep((void *)&random_kthread_control, curthread->td_priority, "term",
- 0);
+ tsleep((void *)&random_kthread_control, 0, "term", 0);
/* Destroy the harvest fifos */
while (!STAILQ_EMPTY(&emptyfifo.head)) {
@@ -285,8 +284,7 @@ random_kthread(void *arg __unused)
/* Found nothing, so don't belabour the issue */
if (!active)
- tsleep(&harvestfifo, curthread->td_priority, "-",
- hz / 10);
+ tsleep(&harvestfifo, 0, "-", hz / 10);
}
diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c
index fae8e976e9c7..042a0ec1b639 100644
--- a/sys/kern/kern_intr.c
+++ b/sys/kern/kern_intr.c
@@ -373,8 +373,7 @@ intr_event_add_handler(struct intr_event *ie, const char *name,
/* Create a thread if we need one. */
while (ie->ie_thread == NULL && !(flags & INTR_FAST)) {
if (ie->ie_flags & IE_ADDING_THREAD)
- msleep(ie, &ie->ie_lock, curthread->td_priority,
- "ithread", 0);
+ msleep(ie, &ie->ie_lock, 0, "ithread", 0);
else {
ie->ie_flags |= IE_ADDING_THREAD;
mtx_unlock(&ie->ie_lock);
@@ -460,8 +459,7 @@ ok:
TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
mtx_unlock_spin(&sched_lock);
while (handler->ih_flags & IH_DEAD)
- msleep(handler, &ie->ie_lock, curthread->td_priority, "iev_rmh",
- 0);
+ msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
intr_event_update(ie);
#ifdef notyet
/*
@@ -685,7 +683,7 @@ ithread_execute_handlers(struct proc *p, struct intr_event *ie)
ie->ie_name);
ie->ie_warned = 1;
}
- tsleep(&ie->ie_count, curthread->td_priority, "istorm", 1);
+ tsleep(&ie->ie_count, 0, "istorm", 1);
} else
ie->ie_count++;
diff --git a/sys/kern/kern_poll.c b/sys/kern/kern_poll.c
index 992116fdf8f1..dadd7a354f72 100644
--- a/sys/kern/kern_poll.c
+++ b/sys/kern/kern_poll.c
@@ -577,13 +577,11 @@ poll_idle(void)
{
struct thread *td = curthread;
struct rtprio rtp;
- int pri;
rtp.prio = RTP_PRIO_MAX; /* lowest priority */
rtp.type = RTP_PRIO_IDLE;
mtx_lock_spin(&sched_lock);
rtp_to_pri(&rtp, td->td_ksegrp);
- pri = td->td_priority;
mtx_unlock_spin(&sched_lock);
for (;;) {
@@ -595,7 +593,7 @@ poll_idle(void)
mtx_unlock_spin(&sched_lock);
} else {
idlepoll_sleeping = 1;
- tsleep(&idlepoll_sleeping, pri, "pollid", hz * 3);
+ tsleep(&idlepoll_sleeping, 0, "pollid", hz * 3);
}
}
}
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index d2c6accb24c0..d84c64fe0020 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -195,9 +195,11 @@ msleep(ident, mtx, priority, wmesg, timo)
/*
* Adjust this thread's priority.
*/
- mtx_lock_spin(&sched_lock);
- sched_prio(td, priority & PRIMASK);
- mtx_unlock_spin(&sched_lock);
+ if ((priority & PRIMASK) != 0) {
+ mtx_lock_spin(&sched_lock);
+ sched_prio(td, priority & PRIMASK);
+ mtx_unlock_spin(&sched_lock);
+ }
if (timo && catch)
rval = sleepq_timedwait_sig(ident);
diff --git a/sys/kern/kern_thr.c b/sys/kern/kern_thr.c
index f58916012869..d0f786ee7f1d 100644
--- a/sys/kern/kern_thr.c
+++ b/sys/kern/kern_thr.c
@@ -368,8 +368,8 @@ thr_suspend(struct thread *td, struct thr_suspend_args *uap)
}
PROC_LOCK(td->td_proc);
if ((td->td_flags & TDF_THRWAKEUP) == 0)
- error = msleep((void *)td, &td->td_proc->p_mtx,
- td->td_priority | PCATCH, "lthr", hz);
+ error = msleep((void *)td, &td->td_proc->p_mtx, PCATCH, "lthr",
+ hz);
if (td->td_flags & TDF_THRWAKEUP) {
mtx_lock_spin(&sched_lock);
td->td_flags &= ~TDF_THRWAKEUP;
diff --git a/sys/kern/kern_umtx.c b/sys/kern/kern_umtx.c
index 672add361d75..a4ee457a33e4 100644
--- a/sys/kern/kern_umtx.c
+++ b/sys/kern/kern_umtx.c
@@ -168,7 +168,7 @@ umtxq_busy(struct umtx_key *key)
while (umtxq_chains[chain].uc_flags & UCF_BUSY) {
umtxq_chains[chain].uc_flags |= UCF_WANT;
msleep(&umtxq_chains[chain], umtxq_mtx(chain),
- curthread->td_priority, "umtxq_busy", 0);
+ 0, "umtxq_busy", 0);
}
umtxq_chains[chain].uc_flags |= UCF_BUSY;
}
@@ -424,8 +424,7 @@ _do_lock(struct thread *td, struct umtx *umtx, long id, int timo)
*/
umtxq_lock(&uq->uq_key);
if (old == owner && (td->td_flags & TDF_UMTXQ)) {
- error = umtxq_sleep(td, &uq->uq_key,
- td->td_priority | PCATCH,
+ error = umtxq_sleep(td, &uq->uq_key, PCATCH,
"umtx", timo);
}
umtxq_busy(&uq->uq_key);
@@ -547,7 +546,7 @@ do_wait(struct thread *td, struct umtx *umtx, long id, struct timespec *timeout)
umtxq_lock(&uq->uq_key);
if (td->td_flags & TDF_UMTXQ)
error = umtxq_sleep(td, &uq->uq_key,
- td->td_priority | PCATCH, "ucond", 0);
+ PCATCH, "ucond", 0);
if (!(td->td_flags & TDF_UMTXQ))
error = 0;
else
@@ -560,8 +559,7 @@ do_wait(struct thread *td, struct umtx *umtx, long id, struct timespec *timeout)
for (;;) {
umtxq_lock(&uq->uq_key);
if (td->td_flags & TDF_UMTXQ) {
- error = umtxq_sleep(td, &uq->uq_key,
- td->td_priority | PCATCH,
+ error = umtxq_sleep(td, &uq->uq_key, PCATCH,
"ucond", tvtohz(&tv));
}
if (!(td->td_flags & TDF_UMTXQ)) {
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 7ba4285f80e8..3733dec014de 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -543,7 +543,7 @@ schedcpu_thread(void)
for (;;) {
schedcpu();
- tsleep(&nowake, curthread->td_priority, "-", hz);
+ tsleep(&nowake, 0, "-", hz);
}
}
diff --git a/sys/kern/subr_taskqueue.c b/sys/kern/subr_taskqueue.c
index 00a842db0d72..bd17515d4401 100644
--- a/sys/kern/subr_taskqueue.c
+++ b/sys/kern/subr_taskqueue.c
@@ -365,7 +365,7 @@ taskqueue_thread_loop(void *arg)
TQ_LOCK(tq);
do {
taskqueue_run(tq);
- TQ_SLEEP(tq, tq, &tq->tq_mutex, curthread->td_priority, "-", 0);
+ TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
} while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0);
/* rendezvous with thread that asked us to terminate */
diff --git a/sys/kern/uipc_mqueue.c b/sys/kern/uipc_mqueue.c
index a6587d5e40df..e95f80063fec 100644
--- a/sys/kern/uipc_mqueue.c
+++ b/sys/kern/uipc_mqueue.c
@@ -1657,7 +1657,7 @@ _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, int timo)
}
mq->mq_senders++;
error = msleep(&mq->mq_senders, &mq->mq_mutex,
- curthread->td_priority | PCATCH, "mqsend", timo);
+ PCATCH, "mqsend", timo);
mq->mq_senders--;
if (error == EAGAIN)
error = ETIMEDOUT;
@@ -1809,7 +1809,7 @@ _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, int timo)
}
mq->mq_receivers++;
error = msleep(&mq->mq_receivers, &mq->mq_mutex,
- curthread->td_priority | PCATCH, "mqrecv", timo);
+ PCATCH, "mqrecv", timo);
mq->mq_receivers--;
if (error == EAGAIN)
error = ETIMEDOUT;
diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c
index 68babd0df138..683990f467e0 100644
--- a/sys/vm/vm_zeroidle.c
+++ b/sys/vm/vm_zeroidle.c
@@ -140,9 +140,7 @@ vm_page_zero_idle_wakeup(void)
static void
vm_pagezero(void __unused *arg)
{
- struct thread *td;
- td = curthread;
idlezero_enable = idlezero_enable_default;
for (;;) {
@@ -159,7 +157,7 @@ vm_pagezero(void __unused *arg)
vm_page_lock_queues();
wakeup_needed = TRUE;
msleep(&zero_state, &vm_page_queue_mtx,
- PDROP | td->td_priority, "pgzero", hz * 300);
+ PDROP, "pgzero", hz * 300);
}
}
}