aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2017-01-18 17:55:08 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2017-01-18 17:55:08 +0000
commitc5f61e6f967295c453280d0ba555c72d0149bacc (patch)
treefee670505646fc89c49ec37e79de091ca8639d99
parent3f0a0612e8751cbc1f678eeb7cdd5f13e4bff69b (diff)
downloadsrc-c5f61e6f967295c453280d0ba555c72d0149bacc.tar.gz
src-c5f61e6f967295c453280d0ba555c72d0149bacc.zip
sx: reduce lock accesses similarly to r311172
Discussed with: jhb Tested by: pho (previous version)
Notes
Notes: svn path=/head/; revision=312390
-rw-r--r--sys/kern/kern_sx.c68
-rw-r--r--sys/sys/sx.h5
2 files changed, 49 insertions, 24 deletions
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 0036734caff1..d89df3b5bcc8 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -563,8 +563,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
lock_delay_arg_init(&lda, NULL);
#endif
+ x = SX_READ_VALUE(sx);
+
/* If we already hold an exclusive lock, then recurse. */
- if (sx_xlocked(sx)) {
+ if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
sx->lock_object.lo_name, file, line));
@@ -581,12 +583,15 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
#ifdef KDTRACE_HOOKS
all_time -= lockstat_nsecs(&sx->lock_object);
- state = sx->sx_lock;
+ state = x;
#endif
for (;;) {
- if (sx->sx_lock == SX_LOCK_UNLOCKED &&
- atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid))
- break;
+ if (x == SX_LOCK_UNLOCKED) {
+ if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, tid))
+ break;
+ x = SX_READ_VALUE(sx);
+ continue;
+ }
#ifdef KDTRACE_HOOKS
lda.spin_cnt++;
#endif
@@ -601,11 +606,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
* running on another CPU, spin until the owner stops
* running or the state of the lock changes.
*/
- x = sx->sx_lock;
if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
if ((x & SX_LOCK_SHARED) == 0) {
- x = SX_OWNER(x);
- owner = (struct thread *)x;
+ owner = lv_sx_owner(x);
if (TD_IS_RUNNING(owner)) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR3(KTR_LOCK,
@@ -616,9 +619,12 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
"lockname:\"%s\"",
sx->lock_object.lo_name);
GIANT_SAVE();
- while (SX_OWNER(sx->sx_lock) == x &&
- TD_IS_RUNNING(owner))
+ do {
lock_delay(&lda);
+ x = SX_READ_VALUE(sx);
+ owner = lv_sx_owner(x);
+ } while (owner != NULL &&
+ TD_IS_RUNNING(owner));
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
continue;
@@ -645,6 +651,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
}
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
+ x = SX_READ_VALUE(sx);
if (i != asx_loops)
continue;
}
@@ -652,7 +659,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
#endif
sleepq_lock(&sx->lock_object);
- x = sx->sx_lock;
+ x = SX_READ_VALUE(sx);
/*
* If the lock was released while spinning on the
@@ -701,6 +708,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
break;
}
sleepq_release(&sx->lock_object);
+ x = SX_READ_VALUE(sx);
continue;
}
@@ -712,6 +720,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
if (!atomic_cmpset_ptr(&sx->sx_lock, x,
x | SX_LOCK_EXCLUSIVE_WAITERS)) {
sleepq_release(&sx->lock_object);
+ x = SX_READ_VALUE(sx);
continue;
}
if (LOCK_LOG_TEST(&sx->lock_object, 0))
@@ -753,6 +762,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
__func__, sx);
+ x = SX_READ_VALUE(sx);
}
#ifdef KDTRACE_HOOKS
all_time += lockstat_nsecs(&sx->lock_object);
@@ -872,20 +882,18 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
lock_delay_arg_init(&lda, NULL);
#endif
#ifdef KDTRACE_HOOKS
- state = sx->sx_lock;
all_time -= lockstat_nsecs(&sx->lock_object);
#endif
+ x = SX_READ_VALUE(sx);
+#ifdef KDTRACE_HOOKS
+ state = x;
+#endif
/*
* As with rwlocks, we don't make any attempt to try to block
* shared locks once there is an exclusive waiter.
*/
for (;;) {
-#ifdef KDTRACE_HOOKS
- lda.spin_cnt++;
-#endif
- x = sx->sx_lock;
-
/*
* If no other thread has an exclusive lock then try to bump up
* the count of sharers. Since we have to preserve the state
@@ -903,8 +911,13 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
(void *)(x + SX_ONE_SHARER));
break;
}
+ x = SX_READ_VALUE(sx);
continue;
}
+#ifdef KDTRACE_HOOKS
+ lda.spin_cnt++;
+#endif
+
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
@@ -918,8 +931,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
* changes.
*/
if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
- x = SX_OWNER(x);
- owner = (struct thread *)x;
+ owner = lv_sx_owner(x);
if (TD_IS_RUNNING(owner)) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR3(KTR_LOCK,
@@ -929,9 +941,11 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
sched_tdname(curthread), "spinning",
"lockname:\"%s\"", sx->lock_object.lo_name);
GIANT_SAVE();
- while (SX_OWNER(sx->sx_lock) == x &&
- TD_IS_RUNNING(owner))
+ do {
lock_delay(&lda);
+ x = SX_READ_VALUE(sx);
+ owner = lv_sx_owner(x);
+ } while (owner != NULL && TD_IS_RUNNING(owner));
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
continue;
@@ -944,7 +958,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
* start the process of blocking.
*/
sleepq_lock(&sx->lock_object);
- x = sx->sx_lock;
+ x = SX_READ_VALUE(sx);
/*
* The lock could have been released while we spun.
@@ -966,6 +980,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
owner = (struct thread *)SX_OWNER(x);
if (TD_IS_RUNNING(owner)) {
sleepq_release(&sx->lock_object);
+ x = SX_READ_VALUE(sx);
continue;
}
}
@@ -980,6 +995,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
if (!atomic_cmpset_ptr(&sx->sx_lock, x,
x | SX_LOCK_SHARED_WAITERS)) {
sleepq_release(&sx->lock_object);
+ x = SX_READ_VALUE(sx);
continue;
}
if (LOCK_LOG_TEST(&sx->lock_object, 0))
@@ -1020,6 +1036,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
__func__, sx);
+ x = SX_READ_VALUE(sx);
}
#ifdef KDTRACE_HOOKS
all_time += lockstat_nsecs(&sx->lock_object);
@@ -1054,9 +1071,8 @@ _sx_sunlock_hard(struct sx *sx, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
+ x = SX_READ_VALUE(sx);
for (;;) {
- x = sx->sx_lock;
-
/*
* We should never have sharers while at least one thread
* holds a shared lock.
@@ -1078,6 +1094,8 @@ _sx_sunlock_hard(struct sx *sx, const char *file, int line)
(void *)(x - SX_ONE_SHARER));
break;
}
+
+ x = SX_READ_VALUE(sx);
continue;
}
@@ -1094,6 +1112,7 @@ _sx_sunlock_hard(struct sx *sx, const char *file, int line)
__func__, sx);
break;
}
+ x = SX_READ_VALUE(sx);
continue;
}
@@ -1115,6 +1134,7 @@ _sx_sunlock_hard(struct sx *sx, const char *file, int line)
SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
SX_LOCK_UNLOCKED)) {
sleepq_release(&sx->lock_object);
+ x = SX_READ_VALUE(sx);
continue;
}
if (LOCK_LOG_TEST(&sx->lock_object, 0))
diff --git a/sys/sys/sx.h b/sys/sys/sx.h
index 57a31d9aace8..e8cffaa2593d 100644
--- a/sys/sys/sx.h
+++ b/sys/sys/sx.h
@@ -88,6 +88,11 @@
#define sx_recurse lock_object.lo_data
+#define SX_READ_VALUE(sx) ((sx)->sx_lock)
+
+#define lv_sx_owner(v) \
+ ((v & SX_LOCK_SHARED) ? NULL : (struct thread *)SX_OWNER(v))
+
/*
* Function prototipes. Routines that start with an underscore are not part
* of the public interface and are wrappered with a macro.