aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKip Macy <kmacy@FreeBSD.org>2007-03-01 09:35:48 +0000
committerKip Macy <kmacy@FreeBSD.org>2007-03-01 09:35:48 +0000
commita5bceb77f204b396a2f5b2ff3bd11ed4e65ed9f8 (patch)
tree28b7efc36f36fa54c22f80ec44e51ce68aabe73c
parentb07b4f122960ce4163c65caf7e76a2178a3c83f3 (diff)
downloadsrc-a5bceb77f204b396a2f5b2ff3bd11ed4e65ed9f8.tar.gz
src-a5bceb77f204b396a2f5b2ff3bd11ed4e65ed9f8.zip
Evidently I've overestimated gcc's ability to peak inside inline functions
and optimize away unused stack values. The 48 bytes that the lock_profile_object adds to the stack evidently has a measurable performance impact on certain workloads.
Notes
Notes: svn path=/head/; revision=167136
-rw-r--r--sys/kern/kern_mutex.c12
-rw-r--r--sys/kern/kern_sx.c10
2 files changed, 16 insertions, 6 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 00d0353f1ec6..00ed475ab30e 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -157,9 +157,9 @@ _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
void
_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
{
-
+#ifdef LOCK_PROFILING
struct lock_object lo;
-
+#endif
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
@@ -176,7 +176,9 @@ _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
m->mtx_object.lo_flags &= ~LO_CONTESTED;
#endif
_rel_sleep_lock(m, curthread, opts, file, line);
+#ifdef LOCK_PROFILING
lock_profile_release_lock(&lo);
+#endif
}
void
@@ -200,9 +202,9 @@ _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
void
_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
{
-
+#ifdef LOCK_PROFILING
struct lock_object lo;
-
+#endif
MPASS(curthread != NULL);
KASSERT(m->mtx_lock != MTX_DESTROYED,
("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
@@ -218,7 +220,9 @@ _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
m->mtx_object.lo_flags &= ~LO_CONTESTED;
#endif
_rel_spin_lock(m);
+#ifdef LOCK_PROFILING
lock_profile_release_lock(&lo);
+#endif
}
/*
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 2381c06b9793..967d7a56d574 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -228,9 +228,10 @@ _sx_try_xlock(struct sx *sx, const char *file, int line)
void
_sx_sunlock(struct sx *sx, const char *file, int line)
{
+#ifdef LOCK_PROFILING
struct lock_object lo;
int count = -1;
-
+#endif
_sx_assert(sx, SX_SLOCKED, file, line);
mtx_lock(sx->sx_lock);
@@ -262,15 +263,18 @@ _sx_sunlock(struct sx *sx, const char *file, int line)
LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line);
mtx_unlock(sx->sx_lock);
+#ifdef LOCK_PROFILING
if (count == 0)
lock_profile_release_lock(&lo);
-
+#endif
}
void
_sx_xunlock(struct sx *sx, const char *file, int line)
{
+#ifdef LOCK_PROFILING
struct lock_object lo;
+#endif
_sx_assert(sx, SX_XLOCKED, file, line);
mtx_lock(sx->sx_lock);
@@ -298,7 +302,9 @@ _sx_xunlock(struct sx *sx, const char *file, int line)
LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line);
mtx_unlock(sx->sx_lock);
+#ifdef LOCK_PROFILING
lock_profile_release_lock(&lo);
+#endif
}
int