aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/subr_prof.c
diff options
context:
space:
mode:
authorJeff Roberson <jeff@FreeBSD.org>2007-06-05 00:00:57 +0000
committerJeff Roberson <jeff@FreeBSD.org>2007-06-05 00:00:57 +0000
commit982d11f836278f1e95ae1ae398aa4d1d07a19006 (patch)
tree6727b982fa0d93b8aafab313bdc797aee9e314d2 /sys/kern/subr_prof.c
parenta8cdbf449a61645a36419c3ae4f1134fc39d8f8e (diff)
downloadsrc-982d11f836278f1e95ae1ae398aa4d1d07a19006.tar.gz
src-982d11f836278f1e95ae1ae398aa4d1d07a19006.zip
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling sychronization. - Use the per-process spinlock rather than the sched_lock for per-process scheduling synchronization. Tested by: kris, current@ Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc. Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
Notes
Notes: svn path=/head/; revision=170307
Diffstat (limited to 'sys/kern/subr_prof.c')
-rw-r--r--sys/kern/subr_prof.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/sys/kern/subr_prof.c b/sys/kern/subr_prof.c
index 498d9d2fc958..8da12ac9e559 100644
--- a/sys/kern/subr_prof.c
+++ b/sys/kern/subr_prof.c
@@ -423,12 +423,12 @@ profil(td, uap)
}
PROC_LOCK(p);
upp = &td->td_proc->p_stats->p_prof;
- mtx_lock_spin(&time_lock);
+ PROC_SLOCK(p);
upp->pr_off = uap->offset;
upp->pr_scale = uap->scale;
upp->pr_base = uap->samples;
upp->pr_size = uap->size;
- mtx_unlock_spin(&time_lock);
+ PROC_SUNLOCK(p);
startprofclock(p);
PROC_UNLOCK(p);
@@ -468,22 +468,22 @@ addupc_intr(struct thread *td, uintfptr_t pc, u_int ticks)
if (ticks == 0)
return;
prof = &td->td_proc->p_stats->p_prof;
- mtx_lock_spin(&time_lock);
+ PROC_SLOCK(td->td_proc);
if (pc < prof->pr_off ||
(i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
- mtx_unlock_spin(&time_lock);
+ PROC_SUNLOCK(td->td_proc);
return; /* out of range; ignore */
}
addr = prof->pr_base + i;
- mtx_unlock_spin(&time_lock);
+ PROC_SUNLOCK(td->td_proc);
if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) {
td->td_profil_addr = pc;
td->td_profil_ticks = ticks;
td->td_pflags |= TDP_OWEUPC;
- mtx_lock_spin(&sched_lock);
+ thread_lock(td);
td->td_flags |= TDF_ASTPENDING;
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(td);
}
}
@@ -511,12 +511,15 @@ addupc_task(struct thread *td, uintfptr_t pc, u_int ticks)
}
p->p_profthreads++;
prof = &p->p_stats->p_prof;
+ PROC_SLOCK(p);
if (pc < prof->pr_off ||
(i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
+ PROC_SUNLOCK(p);
goto out;
}
addr = prof->pr_base + i;
+ PROC_SUNLOCK(p);
PROC_UNLOCK(p);
if (copyin(addr, &v, sizeof(v)) == 0) {
v += ticks;