aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAttilio Rao <attilio@FreeBSD.org>2010-02-09 14:56:10 +0000
committerAttilio Rao <attilio@FreeBSD.org>2010-02-09 14:56:10 +0000
commit2b0ebd550afb38a37b5eb77ab4067e071bd965ff (patch)
tree68b395e28b09e7097d442f90ffac6fe0b4a34127
parenteb3f7c0c720e63fa2490e08d907365908d971dd5 (diff)
downloadsrc-2b0ebd550afb38a37b5eb77ab4067e071bd965ff.tar.gz
src-2b0ebd550afb38a37b5eb77ab4067e071bd965ff.zip
MFC r202889, r202940:
- Fix a race in sched_switch() of sched_4bsd. Block the td_lock when acquiring explicitly sched_lock in order to prevent races with other td_lock contenders. - Merge the ULE's internal function thread_block_switch() into the global thread_lock_block() and make the former semantic as the default for thread_lock_block(). - Split out an invariant in order to have better checks. Tested by: Giovanni Trematerra <giovanni dot trematerra at gmail dot com> Approved by: re (kib)
Notes
Notes: svn path=/stable/7/; revision=203704
-rw-r--r--sys/kern/kern_mutex.c2
-rw-r--r--sys/kern/sched_4bsd.c13
-rw-r--r--sys/kern/sched_ule.c27
3 files changed, 15 insertions, 27 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 9f7f1c87bee0..94c8c2313ce0 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -557,7 +557,6 @@ thread_lock_block(struct thread *td)
{
struct mtx *lock;
- spinlock_enter();
THREAD_LOCK_ASSERT(td, MA_OWNED);
lock = td->td_lock;
td->td_lock = &blocked_lock;
@@ -572,7 +571,6 @@ thread_lock_unblock(struct thread *td, struct mtx *new)
mtx_assert(new, MA_OWNED);
MPASS(td->td_lock == &blocked_lock);
atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
- spinlock_exit();
}
void
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 56f69bd34af8..c0989062cb06 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -824,9 +824,11 @@ sched_sleep(struct thread *td)
void
sched_switch(struct thread *td, struct thread *newtd, int flags)
{
+ struct mtx *tmtx;
struct td_sched *ts;
struct proc *p;
+ tmtx = NULL;
ts = td->td_sched;
p = td->td_proc;
@@ -835,17 +837,20 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
/*
* Switch to the sched lock to fix things up and pick
* a new thread.
+ * Block the td_lock in order to avoid breaking the critical path.
*/
if (td->td_lock != &sched_lock) {
mtx_lock_spin(&sched_lock);
- thread_unlock(td);
+ tmtx = thread_lock_block(td);
}
if ((p->p_flag & P_NOLOAD) == 0)
sched_load_rem();
- if (newtd)
+ if (newtd) {
+ MPASS(newtd->td_lock == &sched_lock);
newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED);
+ }
td->td_lastcpu = td->td_oncpu;
td->td_flags &= ~TDF_NEEDRESCHED;
@@ -888,8 +893,8 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
sched_load_add();
} else {
newtd = choosethread();
+ MPASS(newtd->td_lock == &sched_lock);
}
- MPASS(newtd->td_lock == &sched_lock);
if (td != newtd) {
#ifdef HWPMC_HOOKS
@@ -907,7 +912,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
(*dtrace_vtime_switch_func)(newtd);
#endif
/* I feel sleepy */
- cpu_switch(td, newtd, td->td_lock);
+ cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock);
/*
* Where am I? What year is it?
* We are in the same thread that went to sleep above,
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index d1d4bbeb5130..97c7bcd52b2f 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -318,7 +318,6 @@ static void sched_balance_groups(void);
static void sched_balance_group(struct tdq_group *);
static void sched_balance_pair(struct tdq *, struct tdq *);
static inline struct tdq *sched_setcpu(struct td_sched *, int, int);
-static inline struct mtx *thread_block_switch(struct thread *);
static inline void thread_unblock_switch(struct thread *, struct mtx *);
static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
#endif
@@ -989,9 +988,11 @@ sched_setcpu(struct td_sched *ts, int cpu, int flags)
* The hard case, migration, we need to block the thread first to
* prevent order reversals with other cpus locks.
*/
+ spinlock_enter();
thread_lock_block(td);
TDQ_LOCK(tdq);
thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
+ spinlock_exit();
return (tdq);
}
@@ -1789,23 +1790,6 @@ sched_switchin(struct tdq *tdq, struct thread *td)
}
/*
- * Block a thread for switching. Similar to thread_block() but does not
- * bump the spin count.
- */
-static inline struct mtx *
-thread_block_switch(struct thread *td)
-{
- struct mtx *lock;
-
- THREAD_LOCK_ASSERT(td, MA_OWNED);
- lock = td->td_lock;
- td->td_lock = &blocked_lock;
- mtx_unlock_spin(lock);
-
- return (lock);
-}
-
-/*
* Handle migration from sched_switch(). This happens only for
* cpu binding.
*/
@@ -1822,7 +1806,7 @@ sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
* not holding either run-queue lock.
*/
spinlock_enter();
- thread_block_switch(td); /* This releases the lock on tdq. */
+ thread_lock_block(td); /* This releases the lock on tdq. */
/*
* Acquire both run-queue locks before placing the thread on the new
@@ -1848,7 +1832,8 @@ sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
}
/*
- * Release a thread that was blocked with thread_block_switch().
+ * Variadic version of thread_lock_unblock() that does not assume td_lock
+ * is blocked.
*/
static inline void
thread_unblock_switch(struct thread *td, struct mtx *mtx)
@@ -1907,7 +1892,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
} else {
/* This thread must be going to sleep. */
TDQ_LOCK(tdq);
- mtx = thread_block_switch(td);
+ mtx = thread_lock_block(td);
tdq_load_rem(tdq, ts);
}
/*