aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexander Motin <mav@FreeBSD.org>2021-09-03 01:16:46 +0000
committerAlexander Motin <mav@FreeBSD.org>2021-09-17 01:40:15 +0000
commitb4194a432b599c9e96ce8cd7418e15447d4a4cdb (patch)
tree464424355baf6739a989cc6adb85a40daf6d406b
parentc8f2af5f5d1fff0212c18d876acc79ffc17178c8 (diff)
downloadsrc-b4194a432b599c9e96ce8cd7418e15447d4a4cdb.tar.gz
src-b4194a432b599c9e96ce8cd7418e15447d4a4cdb.zip
callout(9): Allow spin locks use with callout_init_mtx().
Implement lock_spin()/unlock_spin() lock class methods, moving the assertion to _sleep() instead. Change assertions in callout(9) to allow spin locks for both regular and C_DIRECT_EXEC cases. In case of C_DIRECT_EXEC callouts spin locks are the only locks allowed actually. As the first use case allow taskqueue_enqueue_timeout() use on fast task queues. It actually becomes more efficient due to avoided extra context switches in callout(9) thanks to C_DIRECT_EXEC. MFC after: 2 weeks Reviewed by: hselasky Differential Revision: https://reviews.freebsd.org/D31778 (cherry picked from commit 4730a8972b1f4b67bf9ffde8e63ca906ef4c9563)
-rw-r--r--share/man/man9/callout.95
-rw-r--r--share/man/man9/taskqueue.95
-rw-r--r--sys/kern/kern_mutex.c8
-rw-r--r--sys/kern/kern_synch.c2
-rw-r--r--sys/kern/kern_timeout.c10
-rw-r--r--sys/kern/subr_taskqueue.c3
6 files changed, 17 insertions, 16 deletions
diff --git a/share/man/man9/callout.9 b/share/man/man9/callout.9
index 9e0cf5e6dc5e..5c011a20d338 100644
--- a/share/man/man9/callout.9
+++ b/share/man/man9/callout.9
@@ -29,7 +29,7 @@
.\"
.\" $FreeBSD$
.\"
-.Dd December 13, 2019
+.Dd September 1, 2021
.Dt CALLOUT 9
.Os
.Sh NAME
@@ -241,9 +241,6 @@ and the associated lock is released.
This ensures that stopping or rescheduling the callout will abort any
previously scheduled invocation.
.Pp
-Only regular mutexes may be used with
-.Fn callout_init_mtx ;
-spin mutexes are not supported.
A sleepable read-mostly lock
.Po
one initialized with the
diff --git a/share/man/man9/taskqueue.9 b/share/man/man9/taskqueue.9
index 2ede90458629..58eb6b7c5571 100644
--- a/share/man/man9/taskqueue.9
+++ b/share/man/man9/taskqueue.9
@@ -28,7 +28,7 @@
.\"
.\" $FreeBSD$
.\"
-.Dd June 6, 2020
+.Dd September 1, 2021
.Dt TASKQUEUE 9
.Os
.Sh NAME
@@ -237,9 +237,6 @@ and
.Va flags ,
as detailed in
.Xr callout 9 .
-Only non-fast task queues can be used for
-.Va timeout_task
-scheduling.
If the
.Va ticks
argument is negative, the already scheduled enqueueing is not re-scheduled.
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index d9db69e2ac09..54a96603ece3 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -215,7 +215,7 @@ void
lock_spin(struct lock_object *lock, uintptr_t how)
{
- panic("spin locks can only use msleep_spin");
+ mtx_lock_spin((struct mtx *)lock);
}
uintptr_t
@@ -232,8 +232,12 @@ unlock_mtx(struct lock_object *lock)
uintptr_t
unlock_spin(struct lock_object *lock)
{
+ struct mtx *m;
- panic("spin locks can only use msleep_spin");
+ m = (struct mtx *)lock;
+ mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
+ mtx_unlock_spin(m);
+ return (0);
}
#ifdef KDTRACE_HOOKS
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index b63877e26b68..3d51af2740fc 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -187,6 +187,8 @@ _sleep(const void *ident, struct lock_object *lock, int priority,
DROP_GIANT();
if (lock != NULL && lock != &Giant.lock_object &&
!(class->lc_flags & LC_SLEEPABLE)) {
+ KASSERT(!(class->lc_flags & LC_SPINLOCK),
+ ("spin locks can only use msleep_spin"));
WITNESS_SAVE(lock, lock_witness);
lock_state = class->lc_unlock(lock);
} else
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index d9cf2784f642..992a093d30ab 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -919,8 +919,9 @@ callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec,
} else {
direct = 0;
}
- KASSERT(!direct || c->c_lock == NULL,
- ("%s: direct callout %p has lock", __func__, c));
+ KASSERT(!direct || c->c_lock == NULL ||
+ (LOCK_CLASS(c->c_lock)->lc_flags & LC_SPINLOCK),
+ ("%s: direct callout %p has non-spin lock", __func__, c));
cc = callout_lock(c);
/*
* Don't allow migration if the user does not care.
@@ -1332,9 +1333,8 @@ _callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
("callout_init_lock: bad flags %d", flags));
KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
- KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
- (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
- __func__));
+ KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & LC_SLEEPABLE),
+ ("%s: callout %p has sleepable lock", __func__, c));
c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
c->c_cpu = cc_default_cpu;
}
diff --git a/sys/kern/subr_taskqueue.c b/sys/kern/subr_taskqueue.c
index 061361cc06d7..e43b09010761 100644
--- a/sys/kern/subr_taskqueue.c
+++ b/sys/kern/subr_taskqueue.c
@@ -309,7 +309,6 @@ taskqueue_enqueue_timeout_sbt(struct taskqueue *queue,
TQ_LOCK(queue);
KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
("Migrated queue"));
- KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
timeout_task->q = queue;
res = timeout_task->t.ta_pending;
if (timeout_task->f & DT_DRAIN_IN_PROGRESS) {
@@ -329,6 +328,8 @@ taskqueue_enqueue_timeout_sbt(struct taskqueue *queue,
sbt = -sbt; /* Ignore overflow. */
}
if (sbt > 0) {
+ if (queue->tq_spin)
+ flags |= C_DIRECT_EXEC;
callout_reset_sbt(&timeout_task->c, sbt, pr,
taskqueue_timeout_func, timeout_task, flags);
}