aboutsummaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2017-12-31 00:37:50 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2017-12-31 00:37:50 +0000
commitfb10612355224999d7716b52197082eb4d46e91c (patch)
tree17d988b0d4dcb25477c8887fd0db22e620d66aaa /sys/kern
parent03cd714fc58e5471ad334f04dd34ae2a3326ffc2 (diff)
downloadsrc-fb10612355224999d7716b52197082eb4d46e91c.tar.gz
src-fb10612355224999d7716b52197082eb4d46e91c.zip
sx: read the SX_NOADAPTIVE flag and Giant ownership only once
These used to be read multiple times when waiting for the lock the become free, which had the potential to issue completely avoidable traffic.
Notes
Notes: svn path=/head/; revision=327397
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_sx.c158
1 files changed, 87 insertions, 71 deletions
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 84ffb6090536..65ab0e6c6d80 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -91,7 +91,7 @@ PMC_SOFT_DECLARE( , , lock, failed);
WITNESS_SAVE_DECL(Giant) \
#define GIANT_SAVE(work) do { \
- if (mtx_owned(&Giant)) { \
+ if (__predict_false(mtx_owned(&Giant))) { \
work++; \
WITNESS_SAVE(&Giant.lock_object, Giant); \
while (mtx_owned(&Giant)) { \
@@ -533,6 +533,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
#ifdef ADAPTIVE_SX
volatile struct thread *owner;
u_int i, n, spintries = 0;
+ bool adaptive;
#endif
#ifdef LOCK_PROFILING
uint64_t waittime = 0;
@@ -581,6 +582,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
+#ifdef ADAPTIVE_SX
+ adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) != 0);
+#endif
+
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
@@ -597,6 +602,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
state = x;
}
#endif
+#ifndef INVARIANTS
+ GIANT_SAVE(extra_work);
+#endif
for (;;) {
if (x == SX_LOCK_UNLOCKED) {
@@ -604,67 +612,68 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
break;
continue;
}
+#ifdef INVARIANTS
+ GIANT_SAVE(extra_work);
+#endif
#ifdef KDTRACE_HOOKS
lda.spin_cnt++;
#endif
#ifdef ADAPTIVE_SX
+ if (__predict_false(!adaptive))
+ goto sleepq;
/*
* If the lock is write locked and the owner is
* running on another CPU, spin until the owner stops
* running or the state of the lock changes.
*/
- if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
- if ((x & SX_LOCK_SHARED) == 0) {
- owner = lv_sx_owner(x);
- if (TD_IS_RUNNING(owner)) {
- if (LOCK_LOG_TEST(&sx->lock_object, 0))
- CTR3(KTR_LOCK,
- "%s: spinning on %p held by %p",
- __func__, sx, owner);
- KTR_STATE1(KTR_SCHED, "thread",
- sched_tdname(curthread), "spinning",
- "lockname:\"%s\"",
- sx->lock_object.lo_name);
- GIANT_SAVE(extra_work);
- do {
- lock_delay(&lda);
- x = SX_READ_VALUE(sx);
- owner = lv_sx_owner(x);
- } while (owner != NULL &&
- TD_IS_RUNNING(owner));
- KTR_STATE0(KTR_SCHED, "thread",
- sched_tdname(curthread), "running");
- continue;
- }
- } else if (SX_SHARERS(x) && spintries < asx_retries) {
+ if ((x & SX_LOCK_SHARED) == 0) {
+ owner = lv_sx_owner(x);
+ if (TD_IS_RUNNING(owner)) {
+ if (LOCK_LOG_TEST(&sx->lock_object, 0))
+ CTR3(KTR_LOCK,
+ "%s: spinning on %p held by %p",
+ __func__, sx, owner);
KTR_STATE1(KTR_SCHED, "thread",
sched_tdname(curthread), "spinning",
- "lockname:\"%s\"", sx->lock_object.lo_name);
- GIANT_SAVE(extra_work);
- spintries++;
- for (i = 0; i < asx_loops; i += n) {
- if (LOCK_LOG_TEST(&sx->lock_object, 0))
- CTR4(KTR_LOCK,
- "%s: shared spinning on %p with %u and %u",
- __func__, sx, spintries, i);
- n = SX_SHARERS(x);
- lock_delay_spin(n);
+ "lockname:\"%s\"",
+ sx->lock_object.lo_name);
+ do {
+ lock_delay(&lda);
x = SX_READ_VALUE(sx);
- if ((x & SX_LOCK_SHARED) == 0 ||
- SX_SHARERS(x) == 0)
- break;
- }
-#ifdef KDTRACE_HOOKS
- lda.spin_cnt += i;
-#endif
+ owner = lv_sx_owner(x);
+ } while (owner != NULL &&
+ TD_IS_RUNNING(owner));
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
- if (i != asx_loops)
- continue;
+ continue;
+ }
+ } else if (SX_SHARERS(x) && spintries < asx_retries) {
+ KTR_STATE1(KTR_SCHED, "thread",
+ sched_tdname(curthread), "spinning",
+ "lockname:\"%s\"", sx->lock_object.lo_name);
+ spintries++;
+ for (i = 0; i < asx_loops; i += n) {
+ if (LOCK_LOG_TEST(&sx->lock_object, 0))
+ CTR4(KTR_LOCK,
+ "%s: shared spinning on %p with %u and %u",
+ __func__, sx, spintries, i);
+ n = SX_SHARERS(x);
+ lock_delay_spin(n);
+ x = SX_READ_VALUE(sx);
+ if ((x & SX_LOCK_SHARED) == 0 ||
+ SX_SHARERS(x) == 0)
+ break;
}
+#ifdef KDTRACE_HOOKS
+ lda.spin_cnt += i;
+#endif
+ KTR_STATE0(KTR_SCHED, "thread",
+ sched_tdname(curthread), "running");
+ if (i != asx_loops)
+ continue;
}
#endif
-
+sleepq:
sleepq_lock(&sx->lock_object);
x = SX_READ_VALUE(sx);
retry_sleepq:
@@ -686,8 +695,7 @@ retry_sleepq:
* chain lock. If so, drop the sleep queue lock and try
* again.
*/
- if (!(x & SX_LOCK_SHARED) &&
- (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
+ if (!(x & SX_LOCK_SHARED) && adaptive) {
owner = (struct thread *)SX_OWNER(x);
if (TD_IS_RUNNING(owner)) {
sleepq_release(&sx->lock_object);
@@ -742,7 +750,6 @@ retry_sleepq:
#ifdef KDTRACE_HOOKS
sleep_time -= lockstat_nsecs(&sx->lock_object);
#endif
- GIANT_SAVE(extra_work);
sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
@@ -892,6 +899,7 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
GIANT_DECLARE;
#ifdef ADAPTIVE_SX
volatile struct thread *owner;
+ bool adaptive;
#endif
#ifdef LOCK_PROFILING
uint64_t waittime = 0;
@@ -920,6 +928,10 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
lock_delay_arg_init(&lda, NULL);
#endif
+#ifdef ADAPTIVE_SX
+ adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) != 0);
+#endif
+
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
@@ -936,6 +948,9 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
state = x;
}
#endif
+#ifndef INVARIANTS
+ GIANT_SAVE(extra_work);
+#endif
/*
* As with rwlocks, we don't make any attempt to try to block
@@ -944,39 +959,42 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
for (;;) {
if (__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG))
break;
+#ifdef INVARIANTS
+ GIANT_SAVE(extra_work);
+#endif
#ifdef KDTRACE_HOOKS
lda.spin_cnt++;
#endif
#ifdef ADAPTIVE_SX
+ if (__predict_false(!adaptive))
+ goto sleepq;
/*
* If the owner is running on another CPU, spin until
* the owner stops running or the state of the lock
* changes.
*/
- if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
- owner = lv_sx_owner(x);
- if (TD_IS_RUNNING(owner)) {
- if (LOCK_LOG_TEST(&sx->lock_object, 0))
- CTR3(KTR_LOCK,
- "%s: spinning on %p held by %p",
- __func__, sx, owner);
- KTR_STATE1(KTR_SCHED, "thread",
- sched_tdname(curthread), "spinning",
- "lockname:\"%s\"", sx->lock_object.lo_name);
- GIANT_SAVE(extra_work);
- do {
- lock_delay(&lda);
- x = SX_READ_VALUE(sx);
- owner = lv_sx_owner(x);
- } while (owner != NULL && TD_IS_RUNNING(owner));
- KTR_STATE0(KTR_SCHED, "thread",
- sched_tdname(curthread), "running");
- continue;
- }
+ owner = lv_sx_owner(x);
+ if (TD_IS_RUNNING(owner)) {
+ if (LOCK_LOG_TEST(&sx->lock_object, 0))
+ CTR3(KTR_LOCK,
+ "%s: spinning on %p held by %p",
+ __func__, sx, owner);
+ KTR_STATE1(KTR_SCHED, "thread",
+ sched_tdname(curthread), "spinning",
+ "lockname:\"%s\"", sx->lock_object.lo_name);
+ do {
+ lock_delay(&lda);
+ x = SX_READ_VALUE(sx);
+ owner = lv_sx_owner(x);
+ } while (owner != NULL && TD_IS_RUNNING(owner));
+ KTR_STATE0(KTR_SCHED, "thread",
+ sched_tdname(curthread), "running");
+ continue;
}
#endif
+sleepq:
/*
* Some other thread already has an exclusive lock, so
* start the process of blocking.
@@ -999,8 +1017,7 @@ retry_sleepq:
* the owner stops running or the state of the lock
* changes.
*/
- if (!(x & SX_LOCK_SHARED) &&
- (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
+ if (!(x & SX_LOCK_SHARED) && adaptive) {
owner = (struct thread *)SX_OWNER(x);
if (TD_IS_RUNNING(owner)) {
sleepq_release(&sx->lock_object);
@@ -1035,7 +1052,6 @@ retry_sleepq:
#ifdef KDTRACE_HOOKS
sleep_time -= lockstat_nsecs(&sx->lock_object);
#endif
- GIANT_SAVE(extra_work);
sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);