From b0b9dee5c9e8c74516fa4c8ca246aa89782ec4fd Mon Sep 17 00:00:00 2001 From: Attilio Rao Date: Sat, 23 Jan 2010 15:54:21 +0000 Subject: - Fix a race in sched_switch() of sched_4bsd. In the case of the thread being on a sleepqueue or a turnstile, the sched_lock was acquired (without the aid of the td_lock interface) and the td_lock was dropped. This was going to break locking rules on other threads willing to access to the thread (via the td_lock interface) and modify his flags (allowed as long as the container lock was different by the one used in sched_switch). In order to prevent this situation, while sched_lock is acquired there the td_lock gets blocked. [0] - Merge the ULE's internal function thread_block_switch() into the global thread_lock_block() and make the former semantic as the default for thread_lock_block(). This means that thread_lock_block() will not disable interrupts when called (and consequently thread_unlock_block() will not re-enabled them when called). This should be done manually when necessary. Note, however, that ULE's thread_unblock_switch() is not reaped because it does reflect a difference in semantic due in ULE (the td_lock may not be necessarilly still blocked_lock when calling this). While asymmetric, it does describe a remarkable difference in semantic that is good to keep in mind. [0] Reported by: Kohji Okuno Tested by: Giovanni Trematerra MFC: 2 weeks --- sys/kern/sched_4bsd.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'sys/kern/sched_4bsd.c') diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index 99ea7b802380..bcec5b9205dd 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -920,9 +920,11 @@ sched_sleep(struct thread *td, int pri) void sched_switch(struct thread *td, struct thread *newtd, int flags) { + struct mtx *tmtx; struct td_sched *ts; struct proc *p; + tmtx = NULL; ts = td->td_sched; p = td->td_proc; @@ -931,10 +933,11 @@ sched_switch(struct thread *td, struct thread *newtd, int flags) /* * Switch to the sched lock to fix things up and pick * a new thread. + * Block the td_lock in order to avoid breaking the critical path. */ if (td->td_lock != &sched_lock) { mtx_lock_spin(&sched_lock); - thread_unlock(td); + tmtx = thread_lock_block(td); } if ((td->td_flags & TDF_NOLOAD) == 0) @@ -1004,7 +1007,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags) (*dtrace_vtime_switch_func)(newtd); #endif - cpu_switch(td, newtd, td->td_lock); + cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock); lock_profile_obtain_lock_success(&sched_lock.lock_object, 0, 0, __FILE__, __LINE__); /* -- cgit v1.2.3