aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2020-07-23 17:26:53 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2020-07-23 17:26:53 +0000
commitc795344ff71ff37ee942ac1ace3705a58a9d9ad8 (patch)
tree9e68af145b3af63758a4d41639dc738e81da2690 /sys
parente605dcc939848312a201b4aa53bd7bb67d862b18 (diff)
downloadsrc-c795344ff71ff37ee942ac1ace3705a58a9d9ad8.tar.gz
src-c795344ff71ff37ee942ac1ace3705a58a9d9ad8.zip
locks: fix a long standing bug for primitives with kdtrace but without spinning
In such a case the second argument to lock_delay_arg_init was NULL which was immediately causing a null pointer deref. Since the sructure is only used for spin count, provide a dedicate routine initializing it. Reported by: andrew
Notes
Notes: svn path=/head/; revision=363451
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_mutex.c2
-rw-r--r--sys/kern/kern_rwlock.c4
-rw-r--r--sys/kern/kern_sx.c4
-rw-r--r--sys/sys/lock.h7
4 files changed, 12 insertions, 5 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 125ff9397783..13a04a5c8d12 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -538,7 +538,7 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
#if defined(ADAPTIVE_MUTEXES)
lock_delay_arg_init(&lda, &mtx_delay);
#elif defined(KDTRACE_HOOKS)
- lock_delay_arg_init(&lda, NULL);
+ lock_delay_arg_init_noadapt(&lda);
#endif
if (__predict_false(v == MTX_UNOWNED))
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index 4d1f314c09d3..2489d029cbb3 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -475,7 +475,7 @@ __rw_rlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v
#if defined(ADAPTIVE_RWLOCKS)
lock_delay_arg_init(&lda, &rw_delay);
#elif defined(KDTRACE_HOOKS)
- lock_delay_arg_init(&lda, NULL);
+ lock_delay_arg_init_noadapt(&lda);
#endif
#ifdef HWPMC_HOOKS
@@ -951,7 +951,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
#if defined(ADAPTIVE_RWLOCKS)
lock_delay_arg_init(&lda, &rw_delay);
#elif defined(KDTRACE_HOOKS)
- lock_delay_arg_init(&lda, NULL);
+ lock_delay_arg_init_noadapt(&lda);
#endif
if (__predict_false(v == RW_UNLOCKED))
v = RW_READ_VALUE(rw);
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index a016d8c5e964..d1c6cd38886d 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -623,7 +623,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
#if defined(ADAPTIVE_SX)
lock_delay_arg_init(&lda, &sx_delay);
#elif defined(KDTRACE_HOOKS)
- lock_delay_arg_init(&lda, NULL);
+ lock_delay_arg_init_noadapt(&lda);
#endif
if (__predict_false(x == SX_LOCK_UNLOCKED))
@@ -1063,7 +1063,7 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
#if defined(ADAPTIVE_SX)
lock_delay_arg_init(&lda, &sx_delay);
#elif defined(KDTRACE_HOOKS)
- lock_delay_arg_init(&lda, NULL);
+ lock_delay_arg_init_noadapt(&lda);
#endif
#ifdef HWPMC_HOOKS
diff --git a/sys/sys/lock.h b/sys/sys/lock.h
index 9dd4e642f320..e682f590985c 100644
--- a/sys/sys/lock.h
+++ b/sys/sys/lock.h
@@ -195,6 +195,13 @@ lock_delay_arg_init(struct lock_delay_arg *la, struct lock_delay_config *lc)
la->spin_cnt = 0;
}
+static inline void
+lock_delay_arg_init_noadapt(struct lock_delay_arg *la)
+{
+ la->delay = 0;
+ la->spin_cnt = 0;
+}
+
#define lock_delay_spin(n) do { \
u_int _i; \
\