diff options
author | Mateusz Guzik <mjg@FreeBSD.org> | 2020-11-04 21:18:08 +0000 |
---|---|---|
committer | Mateusz Guzik <mjg@FreeBSD.org> | 2020-11-04 21:18:08 +0000 |
commit | 6fc2b069ca0a5604a06dcf189fd74fc0c323f707 (patch) | |
tree | b2330b2abb2dd2a071b3d7bca80c150d31ab29f5 | |
parent | 4e306624d1e47f7ace600fea24486b378d07aaf5 (diff) | |
download | src-6fc2b069ca0a5604a06dcf189fd74fc0c323f707.tar.gz src-6fc2b069ca0a5604a06dcf189fd74fc0c323f707.zip |
rms: fixup concurrent writer handling and add more features
Previously the code had one wait channel for all pending writers.
This could result in a buggy scenario where after a writer switches
the lock mode form readers to writers goes off CPU, another writer
queues itself and then the last reader wakes up the latter instead
of the former.
Use a separate channel.
While here add features to reliably detect whether curthread has
the lock write-owned. This will be used by ZFS.
Notes
Notes:
svn path=/head/; revision=367341
-rw-r--r-- | sys/kern/kern_rmlock.c | 51 | ||||
-rw-r--r-- | sys/sys/_rmlock.h | 1 | ||||
-rw-r--r-- | sys/sys/rmlock.h | 25 |
3 files changed, 64 insertions, 13 deletions
diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c index 28927f6f84f8..74227f6a46ec 100644 --- a/sys/kern/kern_rmlock.c +++ b/sys/kern/kern_rmlock.c @@ -878,10 +878,15 @@ db_show_rm(const struct lock_object *lock) * problem at some point. The easiest way to lessen it is to provide a bitmap. */ +#define RMS_NOOWNER ((void *)0x1) +#define RMS_TRANSIENT ((void *)0x2) +#define RMS_FLAGMASK 0xf + void rms_init(struct rmslock *rms, const char *name) { + rms->owner = RMS_NOOWNER; rms->writers = 0; rms->readers = 0; mtx_init(&rms->mtx, name, NULL, MTX_DEF | MTX_NEW); @@ -922,6 +927,7 @@ rms_rlock(struct rmslock *rms) { WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); + MPASS(atomic_load_ptr(&rms->owner) != curthread); critical_enter(); zpcpu_set_protected(rms->readers_influx, 1); @@ -941,6 +947,8 @@ int rms_try_rlock(struct rmslock *rms) { + MPASS(atomic_load_ptr(&rms->owner) != curthread); + critical_enter(); zpcpu_set_protected(rms->readers_influx, 1); __compiler_membar(); @@ -1054,23 +1062,33 @@ rms_wlock(struct rmslock *rms) { WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__); + MPASS(atomic_load_ptr(&rms->owner) != curthread); mtx_lock(&rms->mtx); rms->writers++; if (rms->writers > 1) { - msleep(&rms->writers, &rms->mtx, (PUSER - 1) | PDROP, + msleep(&rms->owner, &rms->mtx, (PUSER - 1), mtx_name(&rms->mtx), 0); MPASS(rms->readers == 0); - return; + KASSERT(rms->owner == RMS_TRANSIENT, + ("%s: unexpected owner value %p\n", __func__, + rms->owner)); + goto out_grab; } + KASSERT(rms->owner == RMS_NOOWNER, + ("%s: unexpected owner value %p\n", __func__, rms->owner)); + rms_wlock_switch(rms); - if (rms->readers > 0) - msleep(&rms->writers, &rms->mtx, (PUSER - 1) | PDROP, + if (rms->readers > 0) { + msleep(&rms->writers, &rms->mtx, (PUSER - 1), mtx_name(&rms->mtx), 0); - else - mtx_unlock(&rms->mtx); + } + +out_grab: + rms->owner = curthread; + mtx_unlock(&rms->mtx); MPASS(rms->readers == 0); } @@ -1079,12 +1097,27 @@ rms_wunlock(struct rmslock *rms) { mtx_lock(&rms->mtx); + KASSERT(rms->owner == curthread, + ("%s: unexpected owner value %p\n", __func__, rms->owner)); MPASS(rms->writers >= 1); MPASS(rms->readers == 0); rms->writers--; - if (rms->writers > 0) - wakeup_one(&rms->writers); - else + if (rms->writers > 0) { + wakeup_one(&rms->owner); + rms->owner = RMS_TRANSIENT; + } else { wakeup(&rms->readers); + rms->owner = RMS_NOOWNER; + } mtx_unlock(&rms->mtx); } + +void +rms_unlock(struct rmslock *rms) +{ + + if (rms_wowned(rms)) + rms_wunlock(rms); + else + rms_runlock(rms); +} diff --git a/sys/sys/_rmlock.h b/sys/sys/_rmlock.h index 68ddc3b3fb03..e153130c4e75 100644 --- a/sys/sys/_rmlock.h +++ b/sys/sys/_rmlock.h @@ -72,6 +72,7 @@ struct rm_priotracker { struct rmslock { struct mtx mtx; + struct thread *owner; int writers; int readers; int *readers_pcpu; diff --git a/sys/sys/rmlock.h b/sys/sys/rmlock.h index ba2b9bbb732d..9476d4a9d9f9 100644 --- a/sys/sys/rmlock.h +++ b/sys/sys/rmlock.h @@ -140,16 +140,33 @@ int rms_try_rlock(struct rmslock *rms); void rms_runlock(struct rmslock *rms); void rms_wlock(struct rmslock *rms); void rms_wunlock(struct rmslock *rms); +void rms_unlock(struct rmslock *rms); + +static inline int +rms_wowned(struct rmslock *rms) +{ + + return (rms->owner == curthread); +} /* - * Writers are not explicitly tracked, thus until that changes the best we can - * do is indicate the lock is taken for writing by *someone*. + * Only valid to call if you hold the lock in some manner. */ static inline int -rms_wowned(struct rmslock *rms) +rms_rowned(struct rmslock *rms) { - return (rms->writers > 0); + return (rms->readers > 0); +} + +static inline int +rms_owned_any(struct rmslock *rms) +{ + + if (rms_wowned(rms)) + return (1); + + return (rms_rowned(rms)); } #endif /* _KERNEL */ |