aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/sched_4bsd.c
diff options
context:
space:
mode:
authorAttilio Rao <attilio@FreeBSD.org>2010-01-23 15:54:21 +0000
committerAttilio Rao <attilio@FreeBSD.org>2010-01-23 15:54:21 +0000
commitb0b9dee5c9e8c74516fa4c8ca246aa89782ec4fd (patch)
tree8948b981ee7ffc54e90f7295617953bd509fcc3f /sys/kern/sched_4bsd.c
parentccbe0b21efc45d4785995f19c75001f4ae4a2635 (diff)
downloadsrc-b0b9dee5c9e8c74516fa4c8ca246aa89782ec4fd.tar.gz
src-b0b9dee5c9e8c74516fa4c8ca246aa89782ec4fd.zip
- Fix a race in sched_switch() of sched_4bsd.
In the case of the thread being on a sleepqueue or a turnstile, the sched_lock was acquired (without the aid of the td_lock interface) and the td_lock was dropped. This was going to break locking rules on other threads willing to access to the thread (via the td_lock interface) and modify his flags (allowed as long as the container lock was different by the one used in sched_switch). In order to prevent this situation, while sched_lock is acquired there the td_lock gets blocked. [0] - Merge the ULE's internal function thread_block_switch() into the global thread_lock_block() and make the former semantic as the default for thread_lock_block(). This means that thread_lock_block() will not disable interrupts when called (and consequently thread_unlock_block() will not re-enabled them when called). This should be done manually when necessary. Note, however, that ULE's thread_unblock_switch() is not reaped because it does reflect a difference in semantic due in ULE (the td_lock may not be necessarilly still blocked_lock when calling this). While asymmetric, it does describe a remarkable difference in semantic that is good to keep in mind. [0] Reported by: Kohji Okuno <okuno dot kohji at jp dot panasonic dot com> Tested by: Giovanni Trematerra <giovanni dot trematerra at gmail dot com> MFC: 2 weeks
Notes
Notes: svn path=/head/; revision=202889
Diffstat (limited to 'sys/kern/sched_4bsd.c')
-rw-r--r--sys/kern/sched_4bsd.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 99ea7b802380..bcec5b9205dd 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -920,9 +920,11 @@ sched_sleep(struct thread *td, int pri)
void
sched_switch(struct thread *td, struct thread *newtd, int flags)
{
+ struct mtx *tmtx;
struct td_sched *ts;
struct proc *p;
+ tmtx = NULL;
ts = td->td_sched;
p = td->td_proc;
@@ -931,10 +933,11 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
/*
* Switch to the sched lock to fix things up and pick
* a new thread.
+ * Block the td_lock in order to avoid breaking the critical path.
*/
if (td->td_lock != &sched_lock) {
mtx_lock_spin(&sched_lock);
- thread_unlock(td);
+ tmtx = thread_lock_block(td);
}
if ((td->td_flags & TDF_NOLOAD) == 0)
@@ -1004,7 +1007,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
(*dtrace_vtime_switch_func)(newtd);
#endif
- cpu_switch(td, newtd, td->td_lock);
+ cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock);
lock_profile_obtain_lock_success(&sched_lock.lock_object,
0, 0, __FILE__, __LINE__);
/*