aboutsummaryrefslogtreecommitdiff
path: root/sys/sys/mutex.h
diff options
context:
space:
mode:
authorBosko Milekic <bmilekic@FreeBSD.org>2001-01-19 01:59:14 +0000
committerBosko Milekic <bmilekic@FreeBSD.org>2001-01-19 01:59:14 +0000
commit08812b3925ae0a1a184e562499deb9a3cc59ec40 (patch)
tree5e46b9a4ade7937185420c854e4fd3b6644d343f /sys/sys/mutex.h
parent5683c3dd1f2b4d138c72d854d553404f9e5a4100 (diff)
downloadsrc-08812b3925ae0a1a184e562499deb9a3cc59ec40.tar.gz
src-08812b3925ae0a1a184e562499deb9a3cc59ec40.zip
Implement MTX_RECURSE flag for mtx_init().
All calls to mtx_init() for mutexes that recurse must now include the MTX_RECURSE bit in the flag argument variable. This change is in preparation for an upcoming (further) mutex API cleanup. The witness code will call panic() if a lock is found to recurse but the MTX_RECURSE bit was not set during the lock's initialization. The old MTX_RECURSE "state" bit (in mtx_lock) has been renamed to MTX_RECURSED, which is more appropriate given its meaning. The following locks have been made "recursive," thus far: eventhandler, Giant, callout, sched_lock, possibly some others declared in the architecture-specific code, all of the network card driver locks in pci/, as well as some other locks in dev/ stuff that I've found to be recursive. Reviewed by: jhb
Notes
Notes: svn path=/head/; revision=71228
Diffstat (limited to 'sys/sys/mutex.h')
-rw-r--r--sys/sys/mutex.h15
1 files changed, 8 insertions, 7 deletions
diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h
index cdb9939bff2d..dff8098203e6 100644
--- a/sys/sys/mutex.h
+++ b/sys/sys/mutex.h
@@ -67,7 +67,8 @@
#define MTX_SPIN 0x1 /* Spin only lock */
/* Options */
-#define MTX_RLIKELY 0x4 /* (opt) Recursion likely */
+#define MTX_RECURSE 0x2 /* Recursive lock (for mtx_init) */
+#define MTX_RLIKELY 0x4 /* Recursion likely */
#define MTX_NORECURSE 0x8 /* No recursion possible */
#define MTX_NOSPIN 0x10 /* Don't spin before sleeping */
#define MTX_NOSWITCH 0x20 /* Do not switch on release */
@@ -80,9 +81,9 @@
#define MTX_HARDOPTS (MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH)
/* Flags/value used in mtx_lock */
-#define MTX_RECURSE 0x01 /* (non-spin) lock held recursively */
+#define MTX_RECURSED 0x01 /* (non-spin) lock held recursively */
#define MTX_CONTESTED 0x02 /* (non-spin) lock contested */
-#define MTX_FLAGMASK ~(MTX_RECURSE | MTX_CONTESTED)
+#define MTX_FLAGMASK ~(MTX_RECURSED | MTX_CONTESTED)
#define MTX_UNOWNED 0x8 /* Cookie for free mutex */
#endif /* _KERNEL */
@@ -360,7 +361,7 @@ void witness_restore(struct mtx *, const char *, int);
if (((mp)->mtx_lock & MTX_FLAGMASK) != ((uintptr_t)(tid)))\
mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
else { \
- atomic_set_ptr(&(mp)->mtx_lock, MTX_RECURSE); \
+ atomic_set_ptr(&(mp)->mtx_lock, MTX_RECURSED); \
(mp)->mtx_recurse++; \
} \
} \
@@ -408,10 +409,10 @@ void witness_restore(struct mtx *, const char *, int);
*/
#define _exitlock(mp, tid, type) do { \
if (!_release_lock(mp, tid)) { \
- if ((mp)->mtx_lock & MTX_RECURSE) { \
+ if ((mp)->mtx_lock & MTX_RECURSED) { \
if (--((mp)->mtx_recurse) == 0) \
atomic_clear_ptr(&(mp)->mtx_lock, \
- MTX_RECURSE); \
+ MTX_RECURSED); \
} else { \
mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
} \
@@ -422,7 +423,7 @@ void witness_restore(struct mtx *, const char *, int);
#ifndef _exitlock_spin
/* Release a spin lock (with possible recursion). */
#define _exitlock_spin(mp) do { \
- if ((mp)->mtx_recurse == 0) { \
+ if (!mtx_recursed((mp))) { \
int _mtx_intr = (mp)->mtx_saveintr; \
\
_release_lock_quick(mp); \