diff options
author | Jeff Roberson <jeff@FreeBSD.org> | 2008-04-04 10:00:46 +0000 |
---|---|---|
committer | Jeff Roberson <jeff@FreeBSD.org> | 2008-04-04 10:00:46 +0000 |
commit | 00ca09449df8e90be2331c3c5541589513656e44 (patch) | |
tree | fd5332828316b33e969f08af5795805892e46c05 /sys/kern/kern_rwlock.c | |
parent | aea15cbc62561caeedb9bdf69066c497861ce077 (diff) | |
download | src-00ca09449df8e90be2331c3c5541589513656e44.tar.gz src-00ca09449df8e90be2331c3c5541589513656e44.zip |
- Add sysctls at debug.rwlock to control the behavior of the speculative
spinning when readers hold a lock. This spinning is speculative because,
unlike the write case, we can not test whether the owners are running.
- Add speculative read spinning for readers who are blocked by pending
writers while a read lock is still held. This allows the thread to
spin until the write lock succeeds after which it may spin until the
writer has released the lock. This prevents excessive context switches
when readers and writers both hold the lock for brief periods.
Sponsored by: Nokia
Notes
Notes:
svn path=/head/; revision=177912
Diffstat (limited to 'sys/kern/kern_rwlock.c')
-rw-r--r-- | sys/kern/kern_rwlock.c | 29 |
1 files changed, 26 insertions, 3 deletions
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c index b2469edde1ea..6a0aab924cae 100644 --- a/sys/kern/kern_rwlock.c +++ b/sys/kern/kern_rwlock.c @@ -39,10 +39,12 @@ __FBSDID("$FreeBSD$"); #include <sys/param.h> #include <sys/ktr.h> +#include <sys/kernel.h> #include <sys/lock.h> #include <sys/mutex.h> #include <sys/proc.h> #include <sys/rwlock.h> +#include <sys/sysctl.h> #include <sys/systm.h> #include <sys/turnstile.h> @@ -54,6 +56,14 @@ CTASSERT((RW_RECURSE & LO_CLASSFLAGS) == RW_RECURSE); #define ADAPTIVE_RWLOCKS #endif +#ifdef ADAPTIVE_RWLOCKS +static int rowner_retries = 10; +static int rowner_loops = 10000; +SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL, "rwlock debugging"); +SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, ""); +SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, ""); +#endif + #ifdef DDB #include <ddb/ddb.h> @@ -261,6 +271,8 @@ _rw_rlock(struct rwlock *rw, const char *file, int line) struct turnstile *ts; #ifdef ADAPTIVE_RWLOCKS volatile struct thread *owner; + int spintries = 0; + int i; #endif uint64_t waittime = 0; int contested = 0; @@ -324,6 +336,16 @@ _rw_rlock(struct rwlock *rw, const char *file, int line) cpu_spinwait(); continue; } + } else if (spintries < rowner_retries) { + spintries++; + for (i = 0; i < rowner_loops; i++) { + v = rw->rw_lock; + if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v)) + break; + cpu_spinwait(); + } + if (i != rowner_loops) + continue; } #endif @@ -592,7 +614,8 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) cpu_spinwait(); continue; } - if ((v & RW_LOCK_READ) && RW_READERS(v) && spintries < 100) { + if ((v & RW_LOCK_READ) && RW_READERS(v) && + spintries < rowner_retries) { if (!(v & RW_LOCK_WRITE_SPINNER)) { if (!atomic_cmpset_ptr(&rw->rw_lock, v, v | RW_LOCK_WRITE_SPINNER)) { @@ -601,12 +624,12 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) } } spintries++; - for (i = 100000; i > 0; i--) { + for (i = 0; i < rowner_loops; i++) { if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0) break; cpu_spinwait(); } - if (i) + if (i != rowner_loops) continue; } #endif |