aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/subr_lock.c
diff options
context:
space:
mode:
authorKip Macy <kmacy@FreeBSD.org>2007-02-26 08:26:44 +0000
committerKip Macy <kmacy@FreeBSD.org>2007-02-26 08:26:44 +0000
commitfe68a9163151dfb00adbf9de32b482579f46af76 (patch)
tree329dd33524baa0ad9fe43f498498a2d120152d0d /sys/kern/subr_lock.c
parent772ad651bf7af82b57558e5a6268e3961e9c2ad8 (diff)
downloadsrc-fe68a9163151dfb00adbf9de32b482579f46af76.tar.gz
src-fe68a9163151dfb00adbf9de32b482579f46af76.zip
general LOCK_PROFILING cleanup
- only collect timestamps when a lock is contested - this reduces the overhead of collecting profiles from 20x to 5x - remove unused function from subr_lock.c - generalize cnt_hold and cnt_lock statistics to be kept for all locks - NOTE: rwlock profiling generates invalid statistics (and most likely always has) someone familiar with that should review
Notes
Notes: svn path=/head/; revision=167012
Diffstat (limited to 'sys/kern/subr_lock.c')
-rw-r--r--sys/kern/subr_lock.c61
1 files changed, 6 insertions, 55 deletions
diff --git a/sys/kern/subr_lock.c b/sys/kern/subr_lock.c
index f55c85c5702f..8bd443150402 100644
--- a/sys/kern/subr_lock.c
+++ b/sys/kern/subr_lock.c
@@ -250,13 +250,17 @@ DB_SHOW_COMMAND(lock, db_show_lock)
#endif
#ifdef LOCK_PROFILING
-void _lock_profile_obtain_lock_success(struct lock_object *lo, uint64_t waittime, con\
-st char *file, int line)
+void _lock_profile_obtain_lock_success(struct lock_object *lo, int contested, uint64_t waittime, const char *file, int line)
{
struct lock_profile_object *l = &lo->lo_profile_obj;
/* don't reset the timer when/if recursing */
if (l->lpo_acqtime == 0) {
+ lo->lo_profile_obj.lpo_contest_holding = 0;
+
+ if (contested)
+ lo->lo_profile_obj.lpo_contest_locking++;
+
l->lpo_filename = file;
l->lpo_lineno = line;
l->lpo_acqtime = nanoseconds();
@@ -267,59 +271,6 @@ st char *file, int line)
}
}
-void _lock_profile_update_wait(struct lock_object *lo, uint64_t waitstart)
-{
- struct lock_profile_object *l = &lo->lo_profile_obj;
-
- if (lock_prof_enable && waitstart) {
- uint64_t now, waittime;
- struct lock_prof *mpp;
- u_int hash;
- const char *p = l->lpo_filename;
- int collision = 0;
- now = nanoseconds();
- if (now < waitstart)
- return;
- waittime = now - waitstart;
- hash = (l->lpo_namehash * 31 * 31 + (uintptr_t)p * 31 + l->lpo_lineno) & LPROF_HASH_MASK;
-
- mpp = &lprof_buf[hash];
- while (mpp->name != NULL) {
- if (mpp->line == l->lpo_lineno &&
- mpp->file == p &&
- mpp->namehash == l->lpo_namehash)
- break;
- /* If the lprof_hash entry is allocated to someone else, try the next one */
- collision = 1;
- CTR4(KTR_SPARE1, "Hash collision, %s:%d %s(%x)", mpp->file, mpp->line, mpp->name, mpp->namehash);
- hash = (hash + 1) & LPROF_HASH_MASK;
- mpp = &lprof_buf[hash];
- }
- if (mpp->name == NULL) {
- int buf;
-
- buf = atomic_fetchadd_int(&allocated_lprof_buf, 1);
- /* Just exit if we cannot get a trace buffer */
- if (buf >= LPROF_HASH_SIZE) {
- ++lock_prof_rejected;
- return;
- }
- mpp->file = p;
- mpp->line = l->lpo_lineno;
- mpp->namehash = l->lpo_namehash;
- mpp->type = l->lpo_type;
- mpp->name = lo->lo_name;
- if (collision)
- ++lock_prof_collisions;
- /* We might have raced someone else but who cares, they'll try again next time */
- ++lock_prof_records;
- }
- LPROF_LOCK(hash);
- mpp->cnt_wait += waittime;
- LPROF_UNLOCK(hash);
- }
-}
-
void _lock_profile_release_lock(struct lock_object *lo)
{
struct lock_profile_object *l = &lo->lo_profile_obj;