aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/kern_alq.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/kern_alq.c')
-rw-r--r--sys/kern/kern_alq.c674
1 files changed, 568 insertions, 106 deletions
diff --git a/sys/kern/kern_alq.c b/sys/kern/kern_alq.c
index a4ece799a5b9..98e6de885cfb 100644
--- a/sys/kern/kern_alq.c
+++ b/sys/kern/kern_alq.c
@@ -1,7 +1,13 @@
/*-
* Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
+ * Copyright (c) 2008-2009, Lawrence Stewart <lstewart@freebsd.org>
+ * Copyright (c) 2009-2010, The FreeBSD Foundation
* All rights reserved.
*
+ * Portions of this software were developed at the Centre for Advanced
+ * Internet Architectures, Swinburne University of Technology, Melbourne,
+ * Australia by Lawrence Stewart under sponsorship from the FreeBSD Foundation.
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -27,6 +33,8 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#include "opt_mac.h"
+
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
@@ -47,16 +55,23 @@ __FBSDID("$FreeBSD$");
/* Async. Logging Queue */
struct alq {
+ char *aq_entbuf; /* Buffer for stored entries */
int aq_entmax; /* Max entries */
int aq_entlen; /* Entry length */
- char *aq_entbuf; /* Buffer for stored entries */
+ int aq_freebytes; /* Bytes available in buffer */
+ int aq_buflen; /* Total length of our buffer */
+ int aq_writehead; /* Location for next write */
+ int aq_writetail; /* Flush starts at this location */
+ int aq_wrapearly; /* # bytes left blank at end of buf */
int aq_flags; /* Queue flags */
+ int aq_waiters; /* Num threads waiting for resources
+ * NB: Used as a wait channel so must
+ * not be first field in the alq struct
+ */
+ struct ale aq_getpost; /* ALE for use by get/post */
struct mtx aq_mtx; /* Queue lock */
struct vnode *aq_vp; /* Open vnode handle */
struct ucred *aq_cred; /* Credentials of the opening thread */
- struct ale *aq_first; /* First ent */
- struct ale *aq_entfree; /* First free ent */
- struct ale *aq_entvalid; /* First ent valid for writing */
LIST_ENTRY(alq) aq_act; /* List of active queues */
LIST_ENTRY(alq) aq_link; /* List of all queues */
};
@@ -65,10 +80,14 @@ struct alq {
#define AQ_ACTIVE 0x0002 /* on the active list */
#define AQ_FLUSHING 0x0004 /* doing IO */
#define AQ_SHUTDOWN 0x0008 /* Queue no longer valid */
+#define AQ_ORDERED 0x0010 /* Queue enforces ordered writes */
+#define AQ_LEGACY 0x0020 /* Legacy queue (fixed length writes) */
#define ALQ_LOCK(alq) mtx_lock_spin(&(alq)->aq_mtx)
#define ALQ_UNLOCK(alq) mtx_unlock_spin(&(alq)->aq_mtx)
+#define HAS_PENDING_DATA(alq) ((alq)->aq_freebytes != (alq)->aq_buflen)
+
static MALLOC_DEFINE(M_ALD, "ALD", "ALD");
/*
@@ -95,6 +114,7 @@ static void ald_deactivate(struct alq *);
/* Internal queue functions */
static void alq_shutdown(struct alq *);
+static void alq_destroy(struct alq *);
static int alq_doio(struct alq *);
@@ -180,8 +200,15 @@ ald_daemon(void)
ALD_LOCK();
for (;;) {
- while ((alq = LIST_FIRST(&ald_active)) == NULL)
- msleep(&ald_active, &ald_mtx, PWAIT, "aldslp", 0);
+ while ((alq = LIST_FIRST(&ald_active)) == NULL &&
+ !ald_shutingdown)
+ mtx_sleep(&ald_active, &ald_mtx, PWAIT, "aldslp", 0);
+
+ /* Don't shutdown until all active ALQs are flushed. */
+ if (ald_shutingdown && alq == NULL) {
+ ALD_UNLOCK();
+ break;
+ }
ALQ_LOCK(alq);
ald_deactivate(alq);
@@ -189,9 +216,11 @@ ald_daemon(void)
needwakeup = alq_doio(alq);
ALQ_UNLOCK(alq);
if (needwakeup)
- wakeup(alq);
+ wakeup_one(alq);
ALD_LOCK();
}
+
+ kproc_exit(0);
}
static void
@@ -200,14 +229,29 @@ ald_shutdown(void *arg, int howto)
struct alq *alq;
ALD_LOCK();
+
+ /* Ensure no new queues can be created. */
ald_shutingdown = 1;
+ /* Shutdown all ALQs prior to terminating the ald_daemon. */
while ((alq = LIST_FIRST(&ald_queues)) != NULL) {
LIST_REMOVE(alq, aq_link);
ALD_UNLOCK();
alq_shutdown(alq);
ALD_LOCK();
}
+
+ /* At this point, all ALQs are flushed and shutdown. */
+
+ /*
+ * Wake ald_daemon so that it exits. It won't be able to do
+ * anything until we mtx_sleep because we hold the ald_mtx.
+ */
+ wakeup(&ald_active);
+
+ /* Wait for ald_daemon to exit. */
+ mtx_sleep(ald_proc, &ald_mtx, PWAIT, "aldslp", 0);
+
ALD_UNLOCK();
}
@@ -219,8 +263,22 @@ alq_shutdown(struct alq *alq)
/* Stop any new writers. */
alq->aq_flags |= AQ_SHUTDOWN;
+ /*
+ * If the ALQ isn't active but has unwritten data (possible if
+ * the ALQ_NOACTIVATE flag has been used), explicitly activate the
+ * ALQ here so that the pending data gets flushed by the ald_daemon.
+ */
+ if (!(alq->aq_flags & AQ_ACTIVE) && HAS_PENDING_DATA(alq)) {
+ alq->aq_flags |= AQ_ACTIVE;
+ ALQ_UNLOCK(alq);
+ ALD_LOCK();
+ ald_activate(alq);
+ ALD_UNLOCK();
+ ALQ_LOCK(alq);
+ }
+
/* Drain IO */
- while (alq->aq_flags & (AQ_FLUSHING|AQ_ACTIVE)) {
+ while (alq->aq_flags & AQ_ACTIVE) {
alq->aq_flags |= AQ_WANTED;
msleep_spin(alq, &alq->aq_mtx, "aldclose", 0);
}
@@ -231,6 +289,17 @@ alq_shutdown(struct alq *alq)
crfree(alq->aq_cred);
}
+void
+alq_destroy(struct alq *alq)
+{
+ /* Drain all pending IO. */
+ alq_shutdown(alq);
+
+ mtx_destroy(&alq->aq_mtx);
+ free(alq->aq_entbuf, M_ALD);
+ free(alq, M_ALD);
+}
+
/*
* Flush all pending data to disk. This operation will block.
*/
@@ -242,46 +311,54 @@ alq_doio(struct alq *alq)
struct vnode *vp;
struct uio auio;
struct iovec aiov[2];
- struct ale *ale;
- struct ale *alstart;
int totlen;
int iov;
int vfslocked;
+ int wrapearly;
+
+ KASSERT((HAS_PENDING_DATA(alq)), ("%s: queue empty!", __func__));
vp = alq->aq_vp;
td = curthread;
totlen = 0;
- iov = 0;
-
- alstart = ale = alq->aq_entvalid;
- alq->aq_entvalid = NULL;
+ iov = 1;
+ wrapearly = alq->aq_wrapearly;
bzero(&aiov, sizeof(aiov));
bzero(&auio, sizeof(auio));
- do {
- if (aiov[iov].iov_base == NULL)
- aiov[iov].iov_base = ale->ae_data;
- aiov[iov].iov_len += alq->aq_entlen;
- totlen += alq->aq_entlen;
- /* Check to see if we're wrapping the buffer */
- if (ale->ae_data + alq->aq_entlen != ale->ae_next->ae_data)
- iov++;
- ale->ae_flags &= ~AE_VALID;
- ale = ale->ae_next;
- } while (ale->ae_flags & AE_VALID);
+ /* Start the write from the location of our buffer tail pointer. */
+ aiov[0].iov_base = alq->aq_entbuf + alq->aq_writetail;
+
+ if (alq->aq_writetail < alq->aq_writehead) {
+ /* Buffer not wrapped. */
+ totlen = aiov[0].iov_len = alq->aq_writehead - alq->aq_writetail;
+ } else if (alq->aq_writehead == 0) {
+ /* Buffer not wrapped (special case to avoid an empty iov). */
+ totlen = aiov[0].iov_len = alq->aq_buflen - alq->aq_writetail -
+ wrapearly;
+ } else {
+ /*
+ * Buffer wrapped, requires 2 aiov entries:
+ * - first is from writetail to end of buffer
+ * - second is from start of buffer to writehead
+ */
+ aiov[0].iov_len = alq->aq_buflen - alq->aq_writetail -
+ wrapearly;
+ iov++;
+ aiov[1].iov_base = alq->aq_entbuf;
+ aiov[1].iov_len = alq->aq_writehead;
+ totlen = aiov[0].iov_len + aiov[1].iov_len;
+ }
alq->aq_flags |= AQ_FLUSHING;
ALQ_UNLOCK(alq);
- if (iov == 2 || aiov[iov].iov_base == NULL)
- iov--;
-
auio.uio_iov = &aiov[0];
auio.uio_offset = 0;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_rw = UIO_WRITE;
- auio.uio_iovcnt = iov + 1;
+ auio.uio_iovcnt = iov;
auio.uio_resid = totlen;
auio.uio_td = td;
@@ -305,8 +382,28 @@ alq_doio(struct alq *alq)
ALQ_LOCK(alq);
alq->aq_flags &= ~AQ_FLUSHING;
- if (alq->aq_entfree == NULL)
- alq->aq_entfree = alstart;
+ /* Adjust writetail as required, taking into account wrapping. */
+ alq->aq_writetail = (alq->aq_writetail + totlen + wrapearly) %
+ alq->aq_buflen;
+ alq->aq_freebytes += totlen + wrapearly;
+
+ /*
+ * If we just flushed part of the buffer which wrapped, reset the
+ * wrapearly indicator.
+ */
+ if (wrapearly)
+ alq->aq_wrapearly = 0;
+
+ /*
+ * If we just flushed the buffer completely, reset indexes to 0 to
+ * minimise buffer wraps.
+ * This is also required to ensure alq_getn() can't wedge itself.
+ */
+ if (!HAS_PENDING_DATA(alq))
+ alq->aq_writehead = alq->aq_writetail = 0;
+
+ KASSERT((alq->aq_writetail >= 0 && alq->aq_writetail < alq->aq_buflen),
+ ("%s: aq_writetail < 0 || aq_writetail >= aq_buflen", __func__));
if (alq->aq_flags & AQ_WANTED) {
alq->aq_flags &= ~AQ_WANTED;
@@ -331,27 +428,27 @@ SYSINIT(ald, SI_SUB_LOCK, SI_ORDER_ANY, ald_startup, NULL);
/*
* Create the queue data structure, allocate the buffer, and open the file.
*/
+
int
-alq_open(struct alq **alqp, const char *file, struct ucred *cred, int cmode,
- int size, int count)
+alq_open_flags(struct alq **alqp, const char *file, struct ucred *cred, int cmode,
+ int size, int flags)
{
struct thread *td;
struct nameidata nd;
- struct ale *ale;
- struct ale *alp;
struct alq *alq;
- char *bufp;
- int flags;
+ int oflags;
int error;
- int i, vfslocked;
+ int vfslocked;
+
+ KASSERT((size > 0), ("%s: size <= 0", __func__));
*alqp = NULL;
td = curthread;
NDINIT(&nd, LOOKUP, NOFOLLOW | MPSAFE, UIO_SYSSPACE, file, td);
- flags = FWRITE | O_NOFOLLOW | O_CREAT;
+ oflags = FWRITE | O_NOFOLLOW | O_CREAT;
- error = vn_open_cred(&nd, &flags, cmode, 0, cred, NULL);
+ error = vn_open_cred(&nd, &oflags, cmode, 0, cred, NULL);
if (error)
return (error);
@@ -362,110 +459,430 @@ alq_open(struct alq **alqp, const char *file, struct ucred *cred, int cmode,
VFS_UNLOCK_GIANT(vfslocked);
alq = malloc(sizeof(*alq), M_ALD, M_WAITOK|M_ZERO);
- alq->aq_entbuf = malloc(count * size, M_ALD, M_WAITOK|M_ZERO);
- alq->aq_first = malloc(sizeof(*ale) * count, M_ALD, M_WAITOK|M_ZERO);
alq->aq_vp = nd.ni_vp;
alq->aq_cred = crhold(cred);
- alq->aq_entmax = count;
- alq->aq_entlen = size;
- alq->aq_entfree = alq->aq_first;
mtx_init(&alq->aq_mtx, "ALD Queue", NULL, MTX_SPIN|MTX_QUIET);
- bufp = alq->aq_entbuf;
- ale = alq->aq_first;
- alp = NULL;
-
- /* Match up entries with buffers */
- for (i = 0; i < count; i++) {
- if (alp)
- alp->ae_next = ale;
- ale->ae_data = bufp;
- alp = ale;
- ale++;
- bufp += size;
- }
+ alq->aq_buflen = size;
+ alq->aq_entmax = 0;
+ alq->aq_entlen = 0;
- alp->ae_next = alq->aq_first;
+ alq->aq_freebytes = alq->aq_buflen;
+ alq->aq_entbuf = malloc(alq->aq_buflen, M_ALD, M_WAITOK|M_ZERO);
+ alq->aq_writehead = alq->aq_writetail = 0;
+ if (flags & ALQ_ORDERED)
+ alq->aq_flags |= AQ_ORDERED;
- if ((error = ald_add(alq)) != 0)
+ if ((error = ald_add(alq)) != 0) {
+ alq_destroy(alq);
return (error);
+ }
+
*alqp = alq;
return (0);
}
+int
+alq_open(struct alq **alqp, const char *file, struct ucred *cred, int cmode,
+ int size, int count)
+{
+ int ret;
+
+ KASSERT((count >= 0), ("%s: count < 0", __func__));
+
+ if (count > 0) {
+ ret = alq_open_flags(alqp, file, cred, cmode, size*count, 0);
+ (*alqp)->aq_flags |= AQ_LEGACY;
+ (*alqp)->aq_entmax = count;
+ (*alqp)->aq_entlen = size;
+ } else
+ ret = alq_open_flags(alqp, file, cred, cmode, size, 0);
+
+ return (ret);
+}
+
+
/*
* Copy a new entry into the queue. If the operation would block either
* wait or return an error depending on the value of waitok.
*/
int
-alq_write(struct alq *alq, void *data, int waitok)
+alq_writen(struct alq *alq, void *data, int len, int flags)
{
- struct ale *ale;
+ int activate, copy, ret;
+ void *waitchan;
- if ((ale = alq_get(alq, waitok)) == NULL)
+ KASSERT((len > 0 && len <= alq->aq_buflen),
+ ("%s: len <= 0 || len > aq_buflen", __func__));
+
+ activate = ret = 0;
+ copy = len;
+ waitchan = NULL;
+
+ ALQ_LOCK(alq);
+
+ /*
+ * Fail to perform the write and return EWOULDBLOCK if:
+ * - The message is larger than our underlying buffer.
+ * - The ALQ is being shutdown.
+ * - There is insufficient free space in our underlying buffer
+ * to accept the message and the user can't wait for space.
+ * - There is insufficient free space in our underlying buffer
+ * to accept the message and the alq is inactive due to prior
+ * use of the ALQ_NOACTIVATE flag (which would lead to deadlock).
+ */
+ if (len > alq->aq_buflen ||
+ alq->aq_flags & AQ_SHUTDOWN ||
+ (((flags & ALQ_NOWAIT) || (!(alq->aq_flags & AQ_ACTIVE) &&
+ HAS_PENDING_DATA(alq))) && alq->aq_freebytes < len)) {
+ ALQ_UNLOCK(alq);
return (EWOULDBLOCK);
+ }
- bcopy(data, ale->ae_data, alq->aq_entlen);
- alq_post(alq, ale);
+ /*
+ * If we want ordered writes and there is already at least one thread
+ * waiting for resources to become available, sleep until we're woken.
+ */
+ if (alq->aq_flags & AQ_ORDERED && alq->aq_waiters > 0) {
+ KASSERT(!(flags & ALQ_NOWAIT),
+ ("%s: ALQ_NOWAIT set but incorrectly ignored!", __func__));
+ alq->aq_waiters++;
+ msleep_spin(&alq->aq_waiters, &alq->aq_mtx, "alqwnord", 0);
+ alq->aq_waiters--;
+ }
- return (0);
+ /*
+ * (ALQ_WAITOK && aq_freebytes < len) or aq_freebytes >= len, either
+ * enter while loop and sleep until we have enough free bytes (former)
+ * or skip (latter). If AQ_ORDERED is set, only 1 thread at a time will
+ * be in this loop. Otherwise, multiple threads may be sleeping here
+ * competing for ALQ resources.
+ */
+ while (alq->aq_freebytes < len && !(alq->aq_flags & AQ_SHUTDOWN)) {
+ KASSERT(!(flags & ALQ_NOWAIT),
+ ("%s: ALQ_NOWAIT set but incorrectly ignored!", __func__));
+ alq->aq_flags |= AQ_WANTED;
+ alq->aq_waiters++;
+ if (waitchan)
+ wakeup(waitchan);
+ msleep_spin(alq, &alq->aq_mtx, "alqwnres", 0);
+ alq->aq_waiters--;
+
+ /*
+ * If we're the first thread to wake after an AQ_WANTED wakeup
+ * but there isn't enough free space for us, we're going to loop
+ * and sleep again. If there are other threads waiting in this
+ * loop, schedule a wakeup so that they can see if the space
+ * they require is available.
+ */
+ if (alq->aq_waiters > 0 && !(alq->aq_flags & AQ_ORDERED) &&
+ alq->aq_freebytes < len && !(alq->aq_flags & AQ_WANTED))
+ waitchan = alq;
+ else
+ waitchan = NULL;
+ }
+
+ /*
+ * If there are waiters, we need to signal the waiting threads after we
+ * complete our work. The alq ptr is used as a wait channel for threads
+ * requiring resources to be freed up. In the AQ_ORDERED case, threads
+ * are not allowed to concurrently compete for resources in the above
+ * while loop, so we use a different wait channel in this case.
+ */
+ if (alq->aq_waiters > 0) {
+ if (alq->aq_flags & AQ_ORDERED)
+ waitchan = &alq->aq_waiters;
+ else
+ waitchan = alq;
+ } else
+ waitchan = NULL;
+
+ /* Bail if we're shutting down. */
+ if (alq->aq_flags & AQ_SHUTDOWN) {
+ ret = EWOULDBLOCK;
+ goto unlock;
+ }
+
+ /*
+ * If we need to wrap the buffer to accommodate the write,
+ * we'll need 2 calls to bcopy.
+ */
+ if ((alq->aq_buflen - alq->aq_writehead) < len)
+ copy = alq->aq_buflen - alq->aq_writehead;
+
+ /* Copy message (or part thereof if wrap required) to the buffer. */
+ bcopy(data, alq->aq_entbuf + alq->aq_writehead, copy);
+ alq->aq_writehead += copy;
+
+ if (alq->aq_writehead >= alq->aq_buflen) {
+ KASSERT((alq->aq_writehead == alq->aq_buflen),
+ ("%s: alq->aq_writehead (%d) > alq->aq_buflen (%d)",
+ __func__,
+ alq->aq_writehead,
+ alq->aq_buflen));
+ alq->aq_writehead = 0;
+ }
+
+ if (copy != len) {
+ /*
+ * Wrap the buffer by copying the remainder of our message
+ * to the start of the buffer and resetting aq_writehead.
+ */
+ bcopy(((uint8_t *)data)+copy, alq->aq_entbuf, len - copy);
+ alq->aq_writehead = len - copy;
+ }
+
+ KASSERT((alq->aq_writehead >= 0 && alq->aq_writehead < alq->aq_buflen),
+ ("%s: aq_writehead < 0 || aq_writehead >= aq_buflen", __func__));
+
+ alq->aq_freebytes -= len;
+
+ if (!(alq->aq_flags & AQ_ACTIVE) && !(flags & ALQ_NOACTIVATE)) {
+ alq->aq_flags |= AQ_ACTIVE;
+ activate = 1;
+ }
+
+ KASSERT((HAS_PENDING_DATA(alq)), ("%s: queue empty!", __func__));
+
+unlock:
+ ALQ_UNLOCK(alq);
+
+ if (activate) {
+ ALD_LOCK();
+ ald_activate(alq);
+ ALD_UNLOCK();
+ }
+
+ /* NB: We rely on wakeup_one waking threads in a FIFO manner. */
+ if (waitchan != NULL)
+ wakeup_one(waitchan);
+
+ return (ret);
+}
+
+int
+alq_write(struct alq *alq, void *data, int flags)
+{
+ /* Should only be called in fixed length message (legacy) mode. */
+ KASSERT((alq->aq_flags & AQ_LEGACY),
+ ("%s: fixed length write on variable length queue", __func__));
+ return (alq_writen(alq, data, alq->aq_entlen, flags));
}
+/*
+ * Retrieve a pointer for the ALQ to write directly into, avoiding bcopy.
+ */
struct ale *
-alq_get(struct alq *alq, int waitok)
+alq_getn(struct alq *alq, int len, int flags)
{
- struct ale *ale;
- struct ale *aln;
+ int contigbytes;
+ void *waitchan;
- ale = NULL;
+ KASSERT((len > 0 && len <= alq->aq_buflen),
+ ("%s: len <= 0 || len > alq->aq_buflen", __func__));
+
+ waitchan = NULL;
ALQ_LOCK(alq);
- /* Loop until we get an entry or we're shutting down */
- while ((alq->aq_flags & AQ_SHUTDOWN) == 0 &&
- (ale = alq->aq_entfree) == NULL &&
- (waitok & ALQ_WAITOK)) {
+ /*
+ * Determine the number of free contiguous bytes.
+ * We ensure elsewhere that if aq_writehead == aq_writetail because
+ * the buffer is empty, they will both be set to 0 and therefore
+ * aq_freebytes == aq_buflen and is fully contiguous.
+ * If they are equal and the buffer is not empty, aq_freebytes will
+ * be 0 indicating the buffer is full.
+ */
+ if (alq->aq_writehead <= alq->aq_writetail)
+ contigbytes = alq->aq_freebytes;
+ else {
+ contigbytes = alq->aq_buflen - alq->aq_writehead;
+
+ if (contigbytes < len) {
+ /*
+ * Insufficient space at end of buffer to handle a
+ * contiguous write. Wrap early if there's space at
+ * the beginning. This will leave a hole at the end
+ * of the buffer which we will have to skip over when
+ * flushing the buffer to disk.
+ */
+ if (alq->aq_writetail >= len || flags & ALQ_WAITOK) {
+ /* Keep track of # bytes left blank. */
+ alq->aq_wrapearly = contigbytes;
+ /* Do the wrap and adjust counters. */
+ contigbytes = alq->aq_freebytes =
+ alq->aq_writetail;
+ alq->aq_writehead = 0;
+ }
+ }
+ }
+
+ /*
+ * Return a NULL ALE if:
+ * - The message is larger than our underlying buffer.
+ * - The ALQ is being shutdown.
+ * - There is insufficient free space in our underlying buffer
+ * to accept the message and the user can't wait for space.
+ * - There is insufficient free space in our underlying buffer
+ * to accept the message and the alq is inactive due to prior
+ * use of the ALQ_NOACTIVATE flag (which would lead to deadlock).
+ */
+ if (len > alq->aq_buflen ||
+ alq->aq_flags & AQ_SHUTDOWN ||
+ (((flags & ALQ_NOWAIT) || (!(alq->aq_flags & AQ_ACTIVE) &&
+ HAS_PENDING_DATA(alq))) && contigbytes < len)) {
+ ALQ_UNLOCK(alq);
+ return (NULL);
+ }
+
+ /*
+ * If we want ordered writes and there is already at least one thread
+ * waiting for resources to become available, sleep until we're woken.
+ */
+ if (alq->aq_flags & AQ_ORDERED && alq->aq_waiters > 0) {
+ KASSERT(!(flags & ALQ_NOWAIT),
+ ("%s: ALQ_NOWAIT set but incorrectly ignored!", __func__));
+ alq->aq_waiters++;
+ msleep_spin(&alq->aq_waiters, &alq->aq_mtx, "alqgnord", 0);
+ alq->aq_waiters--;
+ }
+
+ /*
+ * (ALQ_WAITOK && contigbytes < len) or contigbytes >= len, either enter
+ * while loop and sleep until we have enough contiguous free bytes
+ * (former) or skip (latter). If AQ_ORDERED is set, only 1 thread at a
+ * time will be in this loop. Otherwise, multiple threads may be
+ * sleeping here competing for ALQ resources.
+ */
+ while (contigbytes < len && !(alq->aq_flags & AQ_SHUTDOWN)) {
+ KASSERT(!(flags & ALQ_NOWAIT),
+ ("%s: ALQ_NOWAIT set but incorrectly ignored!", __func__));
alq->aq_flags |= AQ_WANTED;
- msleep_spin(alq, &alq->aq_mtx, "alqget", 0);
+ alq->aq_waiters++;
+ if (waitchan)
+ wakeup(waitchan);
+ msleep_spin(alq, &alq->aq_mtx, "alqgnres", 0);
+ alq->aq_waiters--;
+
+ if (alq->aq_writehead <= alq->aq_writetail)
+ contigbytes = alq->aq_freebytes;
+ else
+ contigbytes = alq->aq_buflen - alq->aq_writehead;
+
+ /*
+ * If we're the first thread to wake after an AQ_WANTED wakeup
+ * but there isn't enough free space for us, we're going to loop
+ * and sleep again. If there are other threads waiting in this
+ * loop, schedule a wakeup so that they can see if the space
+ * they require is available.
+ */
+ if (alq->aq_waiters > 0 && !(alq->aq_flags & AQ_ORDERED) &&
+ contigbytes < len && !(alq->aq_flags & AQ_WANTED))
+ waitchan = alq;
+ else
+ waitchan = NULL;
}
- if (ale != NULL) {
- aln = ale->ae_next;
- if ((aln->ae_flags & AE_VALID) == 0)
- alq->aq_entfree = aln;
+ /*
+ * If there are waiters, we need to signal the waiting threads after we
+ * complete our work. The alq ptr is used as a wait channel for threads
+ * requiring resources to be freed up. In the AQ_ORDERED case, threads
+ * are not allowed to concurrently compete for resources in the above
+ * while loop, so we use a different wait channel in this case.
+ */
+ if (alq->aq_waiters > 0) {
+ if (alq->aq_flags & AQ_ORDERED)
+ waitchan = &alq->aq_waiters;
else
- alq->aq_entfree = NULL;
+ waitchan = alq;
} else
+ waitchan = NULL;
+
+ /* Bail if we're shutting down. */
+ if (alq->aq_flags & AQ_SHUTDOWN) {
ALQ_UNLOCK(alq);
+ if (waitchan != NULL)
+ wakeup_one(waitchan);
+ return (NULL);
+ }
+
+ /*
+ * If we are here, we have a contiguous number of bytes >= len
+ * available in our buffer starting at aq_writehead.
+ */
+ alq->aq_getpost.ae_data = alq->aq_entbuf + alq->aq_writehead;
+ alq->aq_getpost.ae_bytesused = len;
+ return (&alq->aq_getpost);
+}
- return (ale);
+struct ale *
+alq_get(struct alq *alq, int flags)
+{
+ /* Should only be called in fixed length message (legacy) mode. */
+ KASSERT((alq->aq_flags & AQ_LEGACY),
+ ("%s: fixed length get on variable length queue", __func__));
+ return (alq_getn(alq, alq->aq_entlen, flags));
}
void
-alq_post(struct alq *alq, struct ale *ale)
+alq_post_flags(struct alq *alq, struct ale *ale, int flags)
{
int activate;
+ void *waitchan;
- ale->ae_flags |= AE_VALID;
+ activate = 0;
- if (alq->aq_entvalid == NULL)
- alq->aq_entvalid = ale;
+ if (ale->ae_bytesused > 0) {
+ if (!(alq->aq_flags & AQ_ACTIVE) &&
+ !(flags & ALQ_NOACTIVATE)) {
+ alq->aq_flags |= AQ_ACTIVE;
+ activate = 1;
+ }
- if ((alq->aq_flags & AQ_ACTIVE) == 0) {
- alq->aq_flags |= AQ_ACTIVE;
- activate = 1;
+ alq->aq_writehead += ale->ae_bytesused;
+ alq->aq_freebytes -= ale->ae_bytesused;
+
+ /* Wrap aq_writehead if we filled to the end of the buffer. */
+ if (alq->aq_writehead == alq->aq_buflen)
+ alq->aq_writehead = 0;
+
+ KASSERT((alq->aq_writehead >= 0 &&
+ alq->aq_writehead < alq->aq_buflen),
+ ("%s: aq_writehead < 0 || aq_writehead >= aq_buflen",
+ __func__));
+
+ KASSERT((HAS_PENDING_DATA(alq)), ("%s: queue empty!", __func__));
+ }
+
+ /*
+ * If there are waiters, we need to signal the waiting threads after we
+ * complete our work. The alq ptr is used as a wait channel for threads
+ * requiring resources to be freed up. In the AQ_ORDERED case, threads
+ * are not allowed to concurrently compete for resources in the
+ * alq_getn() while loop, so we use a different wait channel in this case.
+ */
+ if (alq->aq_waiters > 0) {
+ if (alq->aq_flags & AQ_ORDERED)
+ waitchan = &alq->aq_waiters;
+ else
+ waitchan = alq;
} else
- activate = 0;
+ waitchan = NULL;
ALQ_UNLOCK(alq);
+
if (activate) {
ALD_LOCK();
ald_activate(alq);
ALD_UNLOCK();
}
+
+ /* NB: We rely on wakeup_one waking threads in a FIFO manner. */
+ if (waitchan != NULL)
+ wakeup_one(waitchan);
}
void
@@ -475,16 +892,24 @@ alq_flush(struct alq *alq)
ALD_LOCK();
ALQ_LOCK(alq);
- if (alq->aq_flags & AQ_ACTIVE) {
- ald_deactivate(alq);
+
+ /*
+ * Pull the lever iff there is data to flush and we're
+ * not already in the middle of a flush operation.
+ */
+ if (HAS_PENDING_DATA(alq) && !(alq->aq_flags & AQ_FLUSHING)) {
+ if (alq->aq_flags & AQ_ACTIVE)
+ ald_deactivate(alq);
+
ALD_UNLOCK();
needwakeup = alq_doio(alq);
} else
ALD_UNLOCK();
+
ALQ_UNLOCK(alq);
if (needwakeup)
- wakeup(alq);
+ wakeup_one(alq);
}
/*
@@ -493,20 +918,57 @@ alq_flush(struct alq *alq)
void
alq_close(struct alq *alq)
{
- /*
- * If we're already shuting down someone else will flush and close
- * the vnode.
- */
- if (ald_rem(alq) != 0)
- return;
+ /* Only flush and destroy alq if not already shutting down. */
+ if (ald_rem(alq) == 0)
+ alq_destroy(alq);
+}
- /*
- * Drain all pending IO.
- */
- alq_shutdown(alq);
+static int
+alq_load_handler(module_t mod, int what, void *arg)
+{
+ int ret;
+
+ ret = 0;
- mtx_destroy(&alq->aq_mtx);
- free(alq->aq_first, M_ALD);
- free(alq->aq_entbuf, M_ALD);
- free(alq, M_ALD);
+ switch (what) {
+ case MOD_LOAD:
+ case MOD_SHUTDOWN:
+ break;
+
+ case MOD_QUIESCE:
+ ALD_LOCK();
+ /* Only allow unload if there are no open queues. */
+ if (LIST_FIRST(&ald_queues) == NULL) {
+ ald_shutingdown = 1;
+ ALD_UNLOCK();
+ ald_shutdown(NULL, 0);
+ mtx_destroy(&ald_mtx);
+ } else {
+ ALD_UNLOCK();
+ ret = EBUSY;
+ }
+ break;
+
+ case MOD_UNLOAD:
+ /* If MOD_QUIESCE failed we must fail here too. */
+ if (ald_shutingdown == 0)
+ ret = EBUSY;
+ break;
+
+ default:
+ ret = EINVAL;
+ break;
+ }
+
+ return (ret);
}
+
+static moduledata_t alq_mod =
+{
+ "alq",
+ alq_load_handler,
+ NULL
+};
+
+DECLARE_MODULE(alq, alq_mod, SI_SUB_SMP, SI_ORDER_ANY);
+MODULE_VERSION(alq, 1);