aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/sched_4bsd.c
diff options
context:
space:
mode:
authorAttilio Rao <attilio@FreeBSD.org>2009-11-03 16:46:52 +0000
committerAttilio Rao <attilio@FreeBSD.org>2009-11-03 16:46:52 +0000
commit1b9d701feea709d06a3f85e0bf2d058aa5758b52 (patch)
tree33345f6b0c0607c9b4b3c28c1eb1f317c974a310 /sys/kern/sched_4bsd.c
parent1c89fc757aea8b62246d11b40c144d843dfd837d (diff)
downloadsrc-1b9d701feea709d06a3f85e0bf2d058aa5758b52.tar.gz
src-1b9d701feea709d06a3f85e0bf2d058aa5758b52.zip
Split P_NOLOAD into a per-thread flag (TDF_NOLOAD).
This improvements aims for avoiding further cache-misses in scheduler specific functions which need to keep track of average thread running time and further locking in places setting for this flag. Reported by: jeff (originally), kris (currently) Reviewed by: jhb Tested by: Giuseppe Cocomazzi <sbudella at email dot it>
Notes
Notes: svn path=/head/; revision=198854
Diffstat (limited to 'sys/kern/sched_4bsd.c')
-rw-r--r--sys/kern/sched_4bsd.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 4fe1c1415f2c..e13cffc70d9f 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -728,10 +728,10 @@ sched_exit_thread(struct thread *td, struct thread *child)
thread_lock(td);
td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu);
thread_unlock(td);
- mtx_lock_spin(&sched_lock);
- if ((child->td_proc->p_flag & P_NOLOAD) == 0)
+ thread_lock(child);
+ if ((child->td_flags & TDF_NOLOAD) == 0)
sched_load_rem();
- mtx_unlock_spin(&sched_lock);
+ thread_unlock(child);
}
void
@@ -937,7 +937,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
thread_unlock(td);
}
- if ((p->p_flag & P_NOLOAD) == 0)
+ if ((td->td_flags & TDF_NOLOAD) == 0)
sched_load_rem();
if (newtd)
@@ -980,7 +980,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
("trying to run inhibited thread"));
newtd->td_flags |= TDF_DIDRUN;
TD_SET_RUNNING(newtd);
- if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
+ if ((newtd->td_flags & TDF_NOLOAD) == 0)
sched_load_add();
} else {
newtd = choosethread();
@@ -1289,7 +1289,7 @@ sched_add(struct thread *td, int flags)
}
}
- if ((td->td_proc->p_flag & P_NOLOAD) == 0)
+ if ((td->td_flags & TDF_NOLOAD) == 0)
sched_load_add();
runq_add(ts->ts_runq, td, flags);
if (cpu != NOCPU)
@@ -1338,7 +1338,7 @@ sched_add(struct thread *td, int flags)
if (maybe_preempt(td))
return;
}
- if ((td->td_proc->p_flag & P_NOLOAD) == 0)
+ if ((td->td_flags & TDF_NOLOAD) == 0)
sched_load_add();
runq_add(ts->ts_runq, td, flags);
maybe_resched(td);
@@ -1360,7 +1360,7 @@ sched_rem(struct thread *td)
"prio:%d", td->td_priority, KTR_ATTR_LINKED,
sched_tdname(curthread));
- if ((td->td_proc->p_flag & P_NOLOAD) == 0)
+ if ((td->td_flags & TDF_NOLOAD) == 0)
sched_load_rem();
#ifdef SMP
if (ts->ts_runq != &runq)