aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/kern_rwlock.c
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2017-10-05 19:18:02 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2017-10-05 19:18:02 +0000
commitd07e22cdd8b156267a70d2be0f2af2c5081d6dd5 (patch)
treecf91f68e2025c93b6be0272f1ce506e5d858c51a /sys/kern/kern_rwlock.c
parent6cdf28b7ad1410ac40403ba0efcbe10836b01234 (diff)
downloadsrc-d07e22cdd8b156267a70d2be0f2af2c5081d6dd5.tar.gz
src-d07e22cdd8b156267a70d2be0f2af2c5081d6dd5.zip
locks: take the number of readers into account when waiting
Previous code would always spin once before checking the lock. But a lock with e.g. 6 readers is not going to become free in the duration of once spin even if they start draining immediately. Conservatively perform one for each reader. Note that the total number of allowed spins is still extremely small and is subject to change later. MFC after: 1 week
Notes
Notes: svn path=/head/; revision=324335
Diffstat (limited to 'sys/kern/kern_rwlock.c')
-rw-r--r--sys/kern/kern_rwlock.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index ca9c1613210a..4adcbc8f56cf 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -414,7 +414,7 @@ __rw_rlock_hard(volatile uintptr_t *c, struct thread *td, uintptr_t v,
#ifdef ADAPTIVE_RWLOCKS
volatile struct thread *owner;
int spintries = 0;
- int i;
+ int i, n;
#endif
#ifdef LOCK_PROFILING
uint64_t waittime = 0;
@@ -488,8 +488,9 @@ __rw_rlock_hard(volatile uintptr_t *c, struct thread *td, uintptr_t v,
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
"spinning", "lockname:\"%s\"",
rw->lock_object.lo_name);
- for (i = 0; i < rowner_loops; i++) {
- cpu_spinwait();
+ for (i = 0; i < rowner_loops; i += n) {
+ n = RW_READERS(v);
+ lock_delay_spin(n);
v = RW_READ_VALUE(rw);
if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(td, v))
break;
@@ -830,7 +831,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
#ifdef ADAPTIVE_RWLOCKS
volatile struct thread *owner;
int spintries = 0;
- int i;
+ int i, n;
#endif
uintptr_t x;
#ifdef LOCK_PROFILING
@@ -928,8 +929,9 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
"spinning", "lockname:\"%s\"",
rw->lock_object.lo_name);
- for (i = 0; i < rowner_loops; i++) {
- cpu_spinwait();
+ for (i = 0; i < rowner_loops; i += n) {
+ n = RW_READERS(v);
+ lock_delay_spin(n);
v = RW_READ_VALUE(rw);
if ((v & RW_LOCK_WRITE_SPINNER) == 0)
break;