aboutsummaryrefslogtreecommitdiff
path: root/sys/sys
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2017-10-20 00:30:35 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2017-10-20 00:30:35 +0000
commit0d74fe267b81cfd13f274a0d7f1a20ca80b464f2 (patch)
tree203e0f4a015b1e4c96705a3f9c9d98518cccf09f /sys/sys
parentda979d442a42c54d33e271367d97eb245f1e4c0b (diff)
downloadsrc-0d74fe267b81cfd13f274a0d7f1a20ca80b464f2.tar.gz
src-0d74fe267b81cfd13f274a0d7f1a20ca80b464f2.zip
mtx: clean up locking spin mutexes
1) shorten the fast path by pushing the lockstat probe to the slow path 2) test for kernel panic only after it turns out we will have to spin, in particular test only after we know we are not recursing MFC after: 1 week
Notes
Notes: svn path=/head/; revision=324778
Diffstat (limited to 'sys/sys')
-rw-r--r--sys/sys/mutex.h25
1 files changed, 16 insertions, 9 deletions
diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h
index 359b1199714c..c37d4ebd2aaa 100644
--- a/sys/sys/mutex.h
+++ b/sys/sys/mutex.h
@@ -105,8 +105,12 @@ void __mtx_unlock_sleep(volatile uintptr_t *c);
#endif
#ifdef SMP
-void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
- int opts, const char *file, int line);
+#if LOCK_DEBUG > 0
+void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts,
+ const char *file, int line);
+#else
+void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v);
+#endif
#endif
void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file,
int line);
@@ -154,8 +158,13 @@ void thread_lock_flags_(struct thread *, int, const char *, int);
__mtx_unlock_sleep(&(m)->mtx_lock)
#endif
#ifdef SMP
-#define _mtx_lock_spin(m, v, t, o, f, l) \
- _mtx_lock_spin_cookie(&(m)->mtx_lock, v, t, o, f, l)
+#if LOCK_DEBUG > 0
+#define _mtx_lock_spin(m, v, o, f, l) \
+ _mtx_lock_spin_cookie(&(m)->mtx_lock, v, o, f, l)
+#else
+#define _mtx_lock_spin(m, v, o, f, l) \
+ _mtx_lock_spin_cookie(&(m)->mtx_lock, v)
+#endif
#endif
#define _mtx_lock_flags(m, o, f, l) \
__mtx_lock_flags(&(m)->mtx_lock, o, f, l)
@@ -219,11 +228,9 @@ void thread_lock_flags_(struct thread *, int, const char *, int);
uintptr_t _v = MTX_UNOWNED; \
\
spinlock_enter(); \
- if (!_mtx_obtain_lock_fetch((mp), &_v, _tid)) \
- _mtx_lock_spin((mp), _v, _tid, (opts), (file), (line)); \
- else \
- LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, \
- mp, 0, 0, file, line); \
+ if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire) || \
+ !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \
+ _mtx_lock_spin((mp), _v, (opts), (file), (line)); \
} while (0)
#define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \
uintptr_t _tid = (uintptr_t)(tid); \