aboutsummaryrefslogtreecommitdiff
path: root/sys/compat/linuxkpi/common/include/linux/wait.h
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2017-06-09 19:41:12 +0000
committerMark Johnston <markj@FreeBSD.org>2017-06-09 19:41:12 +0000
commit465659643b5539bd609c00c41a611e3095a31cf9 (patch)
tree9cd334dd9f8300fd8828924f53868e9fef7d9579 /sys/compat/linuxkpi/common/include/linux/wait.h
parent1bece9d5626f41adc05632184576f2b56cd148c0 (diff)
downloadsrc-465659643b5539bd609c00c41a611e3095a31cf9.tar.gz
src-465659643b5539bd609c00c41a611e3095a31cf9.zip
Augment wait queue support in the LinuxKPI.
In particular: - Don't evaluate event conditions with a sleepqueue lock held, since such code may attempt to acquire arbitrary locks. - Fix the return value for wait_event_interruptible() in the case that the wait is interrupted by a signal. - Implement wait_on_bit_timeout() and wait_on_atomic_t(). - Implement some functions used to test for pending signals. - Implement a number of wait_event_*() variants and unify the existing implementations. - Unify the mechanism used by wait_event_*() and schedule() to put the calling thread to sleep. This is required to support updated DRM drivers. Thanks to hselasky for finding and fixing a number of bugs in the original revision. Reviewed by: hselasky MFC after: 2 weeks Differential Revision: https://reviews.freebsd.org/D10986
Notes
Notes: svn path=/head/; revision=319757
Diffstat (limited to 'sys/compat/linuxkpi/common/include/linux/wait.h')
-rw-r--r--sys/compat/linuxkpi/common/include/linux/wait.h332
1 files changed, 208 insertions, 124 deletions
diff --git a/sys/compat/linuxkpi/common/include/linux/wait.h b/sys/compat/linuxkpi/common/include/linux/wait.h
index 14da6d264cec..c04a79092c6a 100644
--- a/sys/compat/linuxkpi/common/include/linux/wait.h
+++ b/sys/compat/linuxkpi/common/include/linux/wait.h
@@ -3,6 +3,7 @@
* Copyright (c) 2010 iX Systems, Inc.
* Copyright (c) 2010 Panasas, Inc.
* Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -28,160 +29,243 @@
*
* $FreeBSD$
*/
-#ifndef _LINUX_WAIT_H_
+
+#ifndef _LINUX_WAIT_H_
#define _LINUX_WAIT_H_
#include <linux/compiler.h>
#include <linux/list.h>
-#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+
+#include <asm/atomic.h>
#include <sys/param.h>
#include <sys/systm.h>
-#include <sys/sleepqueue.h>
-#include <sys/kernel.h>
-#include <sys/proc.h>
-typedef struct {
-} wait_queue_t;
+#define SKIP_SLEEP() (SCHEDULER_STOPPED() || kdb_active)
-typedef struct {
- unsigned int wchan;
-} wait_queue_head_t;
+#define might_sleep() \
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "might_sleep()")
-#define init_waitqueue_head(x) \
- do { } while (0)
+struct wait_queue;
+struct wait_queue_head;
-static inline void
-__wake_up(wait_queue_head_t *q, int all)
-{
- int wakeup_swapper;
- void *c;
-
- c = &q->wchan;
- sleepq_lock(c);
- if (all)
- wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
- else
- wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
- sleepq_release(c);
- if (wakeup_swapper)
- kick_proc0();
-}
+typedef struct wait_queue wait_queue_t;
+typedef struct wait_queue_head wait_queue_head_t;
+
+typedef int wait_queue_func_t(wait_queue_t *, unsigned int, int, void *);
+
+/*
+ * Many API consumers directly reference these fields and those of
+ * wait_queue_head.
+ */
+struct wait_queue {
+ unsigned int flags; /* always 0 */
+ void *private;
+ wait_queue_func_t *func;
+ struct list_head task_list;
+};
+
+struct wait_queue_head {
+ spinlock_t lock;
+ struct list_head task_list;
+};
+
+/*
+ * This function is referenced by at least one DRM driver, so it may not be
+ * renamed and furthermore must be the default wait queue callback.
+ */
+extern wait_queue_func_t autoremove_wake_function;
+
+#define DEFINE_WAIT(name) \
+ wait_queue_t name = { \
+ .private = current, \
+ .func = autoremove_wake_function, \
+ .task_list = LINUX_LIST_HEAD_INIT(name.task_list) \
+ }
+
+#define DECLARE_WAITQUEUE(name, task) \
+ wait_queue_t name = { \
+ .private = task, \
+ .task_list = LINUX_LIST_HEAD_INIT(name.task_list) \
+ }
-#define wake_up(q) __wake_up(q, 0)
-#define wake_up_nr(q, nr) __wake_up(q, 1)
-#define wake_up_all(q) __wake_up(q, 1)
-#define wake_up_interruptible(q) __wake_up(q, 0)
-#define wake_up_interruptible_nr(q, nr) __wake_up(q, 1)
-#define wake_up_interruptible_all(q, nr) __wake_up(q, 1)
-
-#define wait_event(q, cond) \
-do { \
- void *c = &(q).wchan; \
- if (!(cond)) { \
- for (;;) { \
- if (SCHEDULER_STOPPED()) \
- break; \
- sleepq_lock(c); \
- if (cond) { \
- sleepq_release(c); \
- break; \
- } \
- sleepq_add(c, NULL, "completion", SLEEPQ_SLEEP, 0); \
- sleepq_wait(c, 0); \
- } \
- } \
+#define DECLARE_WAIT_QUEUE_HEAD(name) \
+ wait_queue_head_t name = { \
+ .task_list = LINUX_LIST_HEAD_INIT(name.task_list), \
+ }; \
+ MTX_SYSINIT(name, &(name).lock.m, spin_lock_name("wqhead"), MTX_DEF)
+
+#define init_waitqueue_head(wqh) do { \
+ mtx_init(&(wqh)->lock.m, spin_lock_name("wqhead"), \
+ NULL, MTX_DEF | MTX_NEW | MTX_NOWITNESS); \
+ INIT_LIST_HEAD(&(wqh)->task_list); \
} while (0)
-#define wait_event_interruptible(q, cond) \
-({ \
- void *c = &(q).wchan; \
- int _error; \
- \
- _error = 0; \
- if (!(cond)) { \
- for (; _error == 0;) { \
- if (SCHEDULER_STOPPED()) \
- break; \
- sleepq_lock(c); \
- if (cond) { \
- sleepq_release(c); \
- break; \
- } \
- sleepq_add(c, NULL, "completion", \
- SLEEPQ_SLEEP | SLEEPQ_INTERRUPTIBLE, 0); \
- if (sleepq_wait_sig(c, 0)) \
- _error = -ERESTARTSYS; \
- } \
- } \
- -_error; \
+void linux_wake_up(wait_queue_head_t *, unsigned int, int, bool);
+
+#define wake_up(wqh) \
+ linux_wake_up(wqh, TASK_NORMAL, 1, false)
+#define wake_up_all(wqh) \
+ linux_wake_up(wqh, TASK_NORMAL, 0, false)
+#define wake_up_locked(wqh) \
+ linux_wake_up(wqh, TASK_NORMAL, 1, true)
+#define wake_up_all_locked(wqh) \
+ linux_wake_up(wqh, TASK_NORMAL, 0, true)
+#define wake_up_interruptible(wqh) \
+ linux_wake_up(wqh, TASK_INTERRUPTIBLE, 1, false)
+#define wake_up_interruptible_all(wqh) \
+ linux_wake_up(wqh, TASK_INTERRUPTIBLE, 0, false)
+
+int linux_wait_event_common(wait_queue_head_t *, wait_queue_t *, int,
+ unsigned int, spinlock_t *);
+
+/*
+ * Returns -ERESTARTSYS for a signal, 0 if cond is false after timeout, 1 if
+ * cond is true after timeout, remaining jiffies (> 0) if cond is true before
+ * timeout.
+ */
+#define __wait_event_common(wqh, cond, timeout, state, lock) ({ \
+ DEFINE_WAIT(__wq); \
+ const int __timeout = (timeout) < 1 ? 1 : (timeout); \
+ int __start = ticks; \
+ int __ret = 0; \
+ \
+ for (;;) { \
+ linux_prepare_to_wait(&(wqh), &__wq, state); \
+ if (cond) { \
+ __ret = 1; \
+ break; \
+ } \
+ __ret = linux_wait_event_common(&(wqh), &__wq, \
+ __timeout, state, lock); \
+ if (__ret != 0) \
+ break; \
+ } \
+ linux_finish_wait(&(wqh), &__wq); \
+ if (__timeout != MAX_SCHEDULE_TIMEOUT) { \
+ if (__ret == -EWOULDBLOCK) \
+ __ret = !!(cond); \
+ else if (__ret != -ERESTARTSYS) { \
+ __ret = __timeout + __start - ticks; \
+ /* range check return value */ \
+ if (__ret < 1) \
+ __ret = 1; \
+ else if (__ret > __timeout) \
+ __ret = __timeout; \
+ } \
+ } \
+ __ret; \
})
-#define wait_event_interruptible_timeout(q, cond, timeout) \
-({ \
- void *c = &(q).wchan; \
- long end = jiffies + timeout; \
- int __ret = 0; \
- int __rc = 0; \
- \
- if (!(cond)) { \
- for (; __rc == 0;) { \
- if (SCHEDULER_STOPPED()) \
- break; \
- sleepq_lock(c); \
- if (cond) { \
- sleepq_release(c); \
- __ret = 1; \
- break; \
- } \
- sleepq_add(c, NULL, "completion", \
- SLEEPQ_SLEEP | SLEEPQ_INTERRUPTIBLE, 0); \
- sleepq_set_timeout(c, linux_timer_jiffies_until(end));\
- __rc = sleepq_timedwait_sig (c, 0); \
- if (__rc != 0) { \
- /* check for timeout or signal. \
- * 0 if the condition evaluated to false\
- * after the timeout elapsed, 1 if the \
- * condition evaluated to true after the\
- * timeout elapsed. \
- */ \
- if (__rc == EWOULDBLOCK) \
- __ret = (cond); \
- else \
- __ret = -ERESTARTSYS; \
- } \
+#define wait_event(wqh, cond) ({ \
+ __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \
+ TASK_UNINTERRUPTIBLE, NULL); \
+})
+
+#define wait_event_timeout(wqh, cond, timeout) ({ \
+ __wait_event_common(wqh, cond, timeout, TASK_UNINTERRUPTIBLE, \
+ NULL); \
+})
+
+#define wait_event_interruptible(wqh, cond) ({ \
+ __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \
+ TASK_INTERRUPTIBLE, NULL); \
+})
+
+#define wait_event_interruptible_timeout(wqh, cond, timeout) ({ \
+ __wait_event_common(wqh, cond, timeout, TASK_INTERRUPTIBLE, \
+ NULL); \
+})
+
+/*
+ * Wait queue is already locked.
+ */
+#define wait_event_interruptible_locked(wqh, cond) ({ \
+ int __ret; \
\
- } \
- } else { \
- /* return remaining jiffies (at least 1) if the \
- * condition evaluated to true before the timeout \
- * elapsed. \
- */ \
- __ret = (end - jiffies); \
- if( __ret < 1 ) \
- __ret = 1; \
- } \
+ spin_unlock(&(wqh).lock); \
+ __ret = __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \
+ TASK_INTERRUPTIBLE, NULL); \
+ spin_lock(&(wqh).lock); \
__ret; \
})
+/*
+ * Hold the (locked) spinlock when testing the cond.
+ */
+#define wait_event_interruptible_lock_irq(wqh, cond, lock) ({ \
+ __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \
+ TASK_INTERRUPTIBLE, &(lock)); \
+})
-static inline int
-waitqueue_active(wait_queue_head_t *q)
+static inline void
+__add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
{
- return 0; /* XXX: not really implemented */
+ list_add(&wq->task_list, &wqh->task_list);
}
-#define DEFINE_WAIT(name) \
- wait_queue_t name = {}
+static inline void
+add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
+{
+
+ spin_lock(&wqh->lock);
+ __add_wait_queue(wqh, wq);
+ spin_unlock(&wqh->lock);
+}
static inline void
-prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
+__add_wait_queue_tail(wait_queue_head_t *wqh, wait_queue_t *wq)
{
+ list_add_tail(&wq->task_list, &wqh->task_list);
}
static inline void
-finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
+__remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
{
+ list_del(&wq->task_list);
}
-#endif /* _LINUX_WAIT_H_ */
+static inline void
+remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
+{
+
+ spin_lock(&wqh->lock);
+ __remove_wait_queue(wqh, wq);
+ spin_unlock(&wqh->lock);
+}
+
+bool linux_waitqueue_active(wait_queue_head_t *);
+
+#define waitqueue_active(wqh) linux_waitqueue_active(wqh)
+
+void linux_prepare_to_wait(wait_queue_head_t *, wait_queue_t *, int);
+void linux_finish_wait(wait_queue_head_t *, wait_queue_t *);
+
+#define prepare_to_wait(wqh, wq, state) linux_prepare_to_wait(wqh, wq, state)
+#define finish_wait(wqh, wq) linux_finish_wait(wqh, wq)
+
+void linux_wake_up_bit(void *, int);
+int linux_wait_on_bit_timeout(unsigned long *, int, unsigned int, int);
+void linux_wake_up_atomic_t(atomic_t *);
+int linux_wait_on_atomic_t(atomic_t *, unsigned int);
+
+#define wake_up_bit(word, bit) linux_wake_up_bit(word, bit)
+#define wait_on_bit_timeout(word, bit, state, timeout) \
+ linux_wait_on_bit_timeout(word, bit, state, timeout)
+#define wake_up_atomic_t(a) linux_wake_up_atomic_t(a)
+/*
+ * All existing callers have a cb that just schedule()s. To avoid adding
+ * complexity, just emulate that internally. The prototype is different so that
+ * callers must be manually modified; a cb that does something other than call
+ * schedule() will require special treatment.
+ */
+#define wait_on_atomic_t(a, state) linux_wait_on_atomic_t(a, state)
+
+struct task_struct;
+bool linux_wake_up_state(struct task_struct *, unsigned int);
+
+#define wake_up_process(task) linux_wake_up_state(task, TASK_NORMAL)
+#define wake_up_state(task, state) linux_wake_up_state(task, state)
+
+#endif /* _LINUX_WAIT_H_ */