aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorAlexander Motin <mav@FreeBSD.org>2019-02-27 21:29:21 +0000
committerAlexander Motin <mav@FreeBSD.org>2019-02-27 21:29:21 +0000
commit321f819ba54695178e5e9c20b268019748475ec7 (patch)
tree1f153466285952e991ed41a0a9e8c32bcad423a7 /sys
parentdec9dcabf8fc669f6da3bfd03ab943d67e01e787 (diff)
downloadsrc-321f819ba54695178e5e9c20b268019748475ec7.tar.gz
src-321f819ba54695178e5e9c20b268019748475ec7.zip
Refactor command ordering/blocking mechanism in CTL.
Replace long per-LUN queue of blocked commands, scanned on each command completion and sometimes even twice, causing up to O(n^^2) processing cost, by much shorter per-command blocked queues, scanned only when respective command completes, and check only commands before the previous blocker, reducing cost to O(n). While there, unblock aborted commands to make them "complete" ASAP to be removed from the OOA queue and so not waste time ordering other commands against them. Aborted commands that were not sent to execution yet should have no visible side effects, so this is safe and easy optimization now, comparing to commands already in processing, which are a still pain. Together those two optimizations should fix quite pathological case, when due to backend slowness CTL accumulated many thousands of blocked requests, partially aborted by initiator and so supposedly not even existing, but still wasting CTL CPU time. MFC after: 2 weeks Sponsored by: iXsystems, Inc.
Notes
Notes: svn path=/head/; revision=344636
Diffstat (limited to 'sys')
-rw-r--r--sys/cam/ctl/ctl.c322
-rw-r--r--sys/cam/ctl/ctl_frontend_ioctl.c1
-rw-r--r--sys/cam/ctl/ctl_io.h10
-rw-r--r--sys/cam/ctl/ctl_private.h1
4 files changed, 174 insertions, 160 deletions
diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c
index a65ff6b8af32..e020c6077968 100644
--- a/sys/cam/ctl/ctl.c
+++ b/sys/cam/ctl/ctl.c
@@ -500,8 +500,11 @@ static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2);
static ctl_action ctl_check_for_blockage(struct ctl_lun *lun,
union ctl_io *pending_io, union ctl_io *ooa_io);
static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
- union ctl_io *starting_io);
-static int ctl_check_blocked(struct ctl_lun *lun);
+ union ctl_io **starting_io);
+static void ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io,
+ bool skip);
+static void ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *io,
+ bool skip);
static int ctl_scsiio_lun_check(struct ctl_lun *lun,
const struct ctl_cmd_entry *entry,
struct ctl_scsiio *ctsio);
@@ -2279,6 +2282,7 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
union ctl_ha_msg msg_info;
struct ctl_lun *lun;
const struct ctl_cmd_entry *entry;
+ union ctl_io *bio;
uint32_t targ_lun;
targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
@@ -2337,12 +2341,11 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
#endif
TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
- switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
- (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq,
- ooa_links))) {
+ bio = (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links);
+ switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) {
case CTL_ACTION_BLOCK:
- ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
- TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
+ ctsio->io_hdr.blocker = bio;
+ TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr,
blocked_links);
mtx_unlock(&lun->lun_lock);
break;
@@ -2424,7 +2427,7 @@ ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
#endif
bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len);
entry->cdb_len = io->scsiio.cdb_len;
- if (io->io_hdr.flags & CTL_FLAG_BLOCKED)
+ if (io->io_hdr.blocker != NULL)
entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED;
if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG)
@@ -3889,6 +3892,7 @@ ctl_alloc_io(void *pool_ref)
if (io != NULL) {
io->io_hdr.pool = pool_ref;
CTL_SOFTC(io) = pool->ctl_softc;
+ TAILQ_INIT(&io->io_hdr.blocked_queue);
}
return (io);
}
@@ -3903,6 +3907,7 @@ ctl_alloc_io_nowait(void *pool_ref)
if (io != NULL) {
io->io_hdr.pool = pool_ref;
CTL_SOFTC(io) = pool->ctl_softc;
+ TAILQ_INIT(&io->io_hdr.blocked_queue);
}
return (io);
}
@@ -3934,6 +3939,7 @@ ctl_zero_io(union ctl_io *io)
memset(io, 0, sizeof(*io));
io->io_hdr.pool = pool;
CTL_SOFTC(io) = pool->ctl_softc;
+ TAILQ_INIT(&io->io_hdr.blocked_queue);
}
int
@@ -4696,7 +4702,6 @@ fail:
lun->last_busy = getsbinuptime();
#endif
TAILQ_INIT(&lun->ooa_queue);
- TAILQ_INIT(&lun->blocked_queue);
STAILQ_INIT(&lun->error_list);
lun->ie_reported = 1;
callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0);
@@ -5870,7 +5875,7 @@ ctl_unmap(struct ctl_scsiio *ctsio)
ptrlen->ptr = (void *)buf;
ptrlen->len = len;
ptrlen->flags = byte2;
- ctl_check_blocked(lun);
+ ctl_try_unblock_others(lun, (union ctl_io *)ctsio, FALSE);
mtx_unlock(&lun->lun_lock);
retval = lun->backend->config_write((union ctl_io *)ctsio);
@@ -10759,6 +10764,14 @@ ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io,
const ctl_serialize_action *serialize_row;
/*
+ * Aborted commands are not going to be executed and may even
+ * not report completion, so we don't care about their order.
+ * Let them complete ASAP to clean the OOA queue.
+ */
+ if (pending_io->io_hdr.flags & CTL_FLAG_ABORT)
+ return (CTL_ACTION_SKIP);
+
+ /*
* The initiator attempted multiple untagged commands at the same
* time. Can't do that.
*/
@@ -10888,7 +10901,7 @@ ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io,
*/
static ctl_action
ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
- union ctl_io *starting_io)
+ union ctl_io **starting_io)
{
union ctl_io *ooa_io;
ctl_action action;
@@ -10901,150 +10914,152 @@ ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
* queue. If starting_io is NULL, we'll just end up returning
* CTL_ACTION_PASS.
*/
- for (ooa_io = starting_io; ooa_io != NULL;
+ for (ooa_io = *starting_io; ooa_io != NULL;
ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq,
ooa_links)){
-
- /*
- * This routine just checks to see whether
- * cur_blocked is blocked by ooa_io, which is ahead
- * of it in the queue. It doesn't queue/dequeue
- * cur_blocked.
- */
action = ctl_check_for_blockage(lun, pending_io, ooa_io);
- switch (action) {
- case CTL_ACTION_BLOCK:
- case CTL_ACTION_OVERLAP:
- case CTL_ACTION_OVERLAP_TAG:
- case CTL_ACTION_SKIP:
- case CTL_ACTION_ERROR:
+ if (action != CTL_ACTION_PASS) {
+ *starting_io = ooa_io;
return (action);
- break; /* NOTREACHED */
- case CTL_ACTION_PASS:
- break;
- default:
- panic("%s: Invalid action %d\n", __func__, action);
}
}
+ *starting_io = NULL;
return (CTL_ACTION_PASS);
}
/*
- * Assumptions:
- * - An I/O has just completed, and has been removed from the per-LUN OOA
- * queue, so some items on the blocked queue may now be unblocked.
+ * Try to unblock the specified I/O.
+ *
+ * skip parameter allows explicitly skip present blocker of the I/O,
+ * starting from the previous one on OOA queue. It can be used when
+ * we know for sure that the blocker I/O does no longer count.
*/
-static int
-ctl_check_blocked(struct ctl_lun *lun)
+static void
+ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, bool skip)
{
struct ctl_softc *softc = lun->ctl_softc;
- union ctl_io *cur_blocked, *next_blocked;
+ union ctl_io *bio, *obio;
+ const struct ctl_cmd_entry *entry;
+ union ctl_ha_msg msg_info;
+ ctl_action action;
mtx_assert(&lun->lun_lock, MA_OWNED);
- /*
- * Run forward from the head of the blocked queue, checking each
- * entry against the I/Os prior to it on the OOA queue to see if
- * there is still any blockage.
- *
- * We cannot use the TAILQ_FOREACH() macro, because it can't deal
- * with our removing a variable on it while it is traversing the
- * list.
- */
- for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue);
- cur_blocked != NULL; cur_blocked = next_blocked) {
- union ctl_io *prev_ooa;
- ctl_action action;
+ if (io->io_hdr.blocker == NULL)
+ return;
- next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr,
- blocked_links);
+ obio = bio = io->io_hdr.blocker;
+ if (skip)
+ bio = (union ctl_io *)TAILQ_PREV(&bio->io_hdr, ctl_ooaq,
+ ooa_links);
+ action = ctl_check_ooa(lun, io, &bio);
+ if (action == CTL_ACTION_BLOCK) {
+ /* Still blocked, but may be by different I/O now. */
+ if (bio != obio) {
+ TAILQ_REMOVE(&obio->io_hdr.blocked_queue,
+ &io->io_hdr, blocked_links);
+ TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue,
+ &io->io_hdr, blocked_links);
+ io->io_hdr.blocker = bio;
+ }
+ return;
+ }
- prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr,
- ctl_ooaq, ooa_links);
+ /* No longer blocked, one way or another. */
+ TAILQ_REMOVE(&obio->io_hdr.blocked_queue, &io->io_hdr, blocked_links);
+ io->io_hdr.blocker = NULL;
- /*
- * If cur_blocked happens to be the first item in the OOA
- * queue now, prev_ooa will be NULL, and the action
- * returned will just be CTL_ACTION_PASS.
- */
- action = ctl_check_ooa(lun, cur_blocked, prev_ooa);
+ switch (action) {
+ case CTL_ACTION_OVERLAP:
+ ctl_set_overlapped_cmd(&io->scsiio);
+ goto error;
+ case CTL_ACTION_OVERLAP_TAG:
+ ctl_set_overlapped_tag(&io->scsiio,
+ io->scsiio.tag_num & 0xff);
+ goto error;
+ case CTL_ACTION_PASS:
+ case CTL_ACTION_SKIP:
- switch (action) {
- case CTL_ACTION_BLOCK:
- /* Nothing to do here, still blocked */
- break;
- case CTL_ACTION_OVERLAP:
- case CTL_ACTION_OVERLAP_TAG:
- /*
- * This shouldn't happen! In theory we've already
- * checked this command for overlap...
- */
+ /* Serializing commands from the other SC retire there. */
+ if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) &&
+ (softc->ha_mode != CTL_HA_MODE_XFER)) {
+ io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
+ msg_info.hdr.original_sc = io->io_hdr.remote_io;
+ msg_info.hdr.serializing_sc = io;
+ msg_info.hdr.msg_type = CTL_MSG_R2R;
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.hdr), M_NOWAIT);
break;
- case CTL_ACTION_PASS:
- case CTL_ACTION_SKIP: {
- const struct ctl_cmd_entry *entry;
-
- /*
- * The skip case shouldn't happen, this transaction
- * should have never made it onto the blocked queue.
- */
- /*
- * This I/O is no longer blocked, we can remove it
- * from the blocked queue. Since this is a TAILQ
- * (doubly linked list), we can do O(1) removals
- * from any place on the list.
- */
- TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr,
- blocked_links);
- cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
-
- if ((softc->ha_mode != CTL_HA_MODE_XFER) &&
- (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){
- /*
- * Need to send IO back to original side to
- * run
- */
- union ctl_ha_msg msg_info;
-
- cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
- msg_info.hdr.original_sc =
- cur_blocked->io_hdr.remote_io;
- msg_info.hdr.serializing_sc = cur_blocked;
- msg_info.hdr.msg_type = CTL_MSG_R2R;
- ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
- sizeof(msg_info.hdr), M_NOWAIT);
- break;
- }
- entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL);
+ }
- /*
- * Check this I/O for LUN state changes that may
- * have happened while this command was blocked.
- * The LUN state may have been changed by a command
- * ahead of us in the queue, so we need to re-check
- * for any states that can be caused by SCSI
- * commands.
- */
- if (ctl_scsiio_lun_check(lun, entry,
- &cur_blocked->scsiio) == 0) {
- cur_blocked->io_hdr.flags |=
- CTL_FLAG_IS_WAS_ON_RTR;
- ctl_enqueue_rtr(cur_blocked);
- } else
- ctl_done(cur_blocked);
+ /*
+ * Check this I/O for LUN state changes that may have happened
+ * while this command was blocked. The LUN state may have been
+ * changed by a command ahead of us in the queue.
+ */
+ entry = ctl_get_cmd_entry(&io->scsiio, NULL);
+ if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) {
+ ctl_done(io);
break;
}
- default:
- /*
- * This probably shouldn't happen -- we shouldn't
- * get CTL_ACTION_ERROR, or anything else.
- */
+
+ io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
+ ctl_enqueue_rtr(io);
+ break;
+ case CTL_ACTION_ERROR:
+ default:
+ ctl_set_internal_failure(&io->scsiio,
+ /*sks_valid*/ 0,
+ /*retry_count*/ 0);
+
+error:
+ /* Serializing commands from the other SC are done here. */
+ if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) &&
+ (softc->ha_mode != CTL_HA_MODE_XFER)) {
+ ctl_try_unblock_others(lun, io, TRUE);
+ TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
+
+ ctl_copy_sense_data_back(io, &msg_info);
+ msg_info.hdr.original_sc = io->io_hdr.remote_io;
+ msg_info.hdr.serializing_sc = NULL;
+ msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
+ ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
+ sizeof(msg_info.scsi), M_WAITOK);
+ ctl_free_io(io);
break;
}
+
+ ctl_done(io);
+ break;
}
+}
- return (CTL_RETVAL_COMPLETE);
+/*
+ * Try to unblock I/Os blocked by the specified I/O.
+ *
+ * skip parameter allows explicitly skip the specified I/O as blocker,
+ * starting from the previous one on the OOA queue. It can be used when
+ * we know for sure that the specified I/O does no longer count (done).
+ * It has to be still on OOA queue though so that we know where to start.
+ */
+static void
+ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *bio, bool skip)
+{
+ union ctl_io *io, *next_io;
+
+ mtx_assert(&lun->lun_lock, MA_OWNED);
+
+ for (io = (union ctl_io *)TAILQ_FIRST(&bio->io_hdr.blocked_queue);
+ io != NULL; io = next_io) {
+ next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, blocked_links);
+
+ KASSERT(io->io_hdr.blocker != NULL,
+ ("I/O %p on blocked list without blocker", io));
+ ctl_try_unblock_io(lun, io, skip);
+ }
+ KASSERT(!skip || TAILQ_EMPTY(&bio->io_hdr.blocked_queue),
+ ("blocked_queue is not empty after skipping %p", bio));
}
/*
@@ -11212,6 +11227,8 @@ ctl_failover_lun(union ctl_io *rio)
if (io->flags & CTL_FLAG_IO_ACTIVE) {
io->flags |= CTL_FLAG_ABORT;
io->flags |= CTL_FLAG_FAILOVER;
+ ctl_try_unblock_io(lun,
+ (union ctl_io *)io, FALSE);
} else { /* This can be only due to DATAMOVE */
io->msg_type = CTL_MSG_DATAMOVE_DONE;
io->flags &= ~CTL_FLAG_DMA_INPROG;
@@ -11219,7 +11236,7 @@ ctl_failover_lun(union ctl_io *rio)
io->port_status = 31340;
ctl_enqueue_isc((union ctl_io *)io);
}
- }
+ } else
/* We are slave */
if (io->flags & CTL_FLAG_SENT_2OTHER_SC) {
io->flags &= ~CTL_FLAG_SENT_2OTHER_SC;
@@ -11233,23 +11250,19 @@ ctl_failover_lun(union ctl_io *rio)
}
}
} else { /* SERIALIZE modes */
- TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links,
- next_io) {
- /* We are master */
- if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
- TAILQ_REMOVE(&lun->blocked_queue, io,
- blocked_links);
- io->flags &= ~CTL_FLAG_BLOCKED;
- TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links);
- ctl_free_io((union ctl_io *)io);
- }
- }
TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) {
/* We are master */
if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
+ if (io->blocker != NULL) {
+ TAILQ_REMOVE(&io->blocker->io_hdr.blocked_queue,
+ io, blocked_links);
+ io->blocker = NULL;
+ }
+ ctl_try_unblock_others(lun, (union ctl_io *)io,
+ TRUE);
TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links);
ctl_free_io((union ctl_io *)io);
- }
+ } else
/* We are slave */
if (io->flags & CTL_FLAG_SENT_2OTHER_SC) {
io->flags &= ~CTL_FLAG_SENT_2OTHER_SC;
@@ -11260,7 +11273,6 @@ ctl_failover_lun(union ctl_io *rio)
}
}
}
- ctl_check_blocked(lun);
}
mtx_unlock(&lun->lun_lock);
}
@@ -11270,6 +11282,7 @@ ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio)
{
struct ctl_lun *lun;
const struct ctl_cmd_entry *entry;
+ union ctl_io *bio;
uint32_t initidx, targ_lun;
int retval = 0;
@@ -11445,12 +11458,11 @@ ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio)
return (retval);
}
- switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
- (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr,
- ctl_ooaq, ooa_links))) {
+ bio = (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links);
+ switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) {
case CTL_ACTION_BLOCK:
- ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
- TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
+ ctsio->io_hdr.blocker = bio;
+ TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr,
blocked_links);
mtx_unlock(&lun->lun_lock);
return (retval);
@@ -11663,6 +11675,7 @@ ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type)
for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS;
+ ctl_try_unblock_io(lun, xio, FALSE);
}
/* Clear CA. */
for (i = 0; i < ctl_max_ports; i++) {
@@ -11761,6 +11774,7 @@ ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id,
ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
sizeof(msg_info.task), M_NOWAIT);
}
+ ctl_try_unblock_io(lun, xio, FALSE);
}
}
}
@@ -11933,6 +11947,7 @@ ctl_abort_task(union ctl_io *io)
ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
sizeof(msg_info.task), M_NOWAIT);
}
+ ctl_try_unblock_io(lun, xio, FALSE);
}
}
mtx_unlock(&lun->lun_lock);
@@ -12108,8 +12123,8 @@ ctl_handle_isc(union ctl_io *io)
break;
}
mtx_lock(&lun->lun_lock);
+ ctl_try_unblock_others(lun, io, TRUE);
TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
- ctl_check_blocked(lun);
mtx_unlock(&lun->lun_lock);
ctl_free_io(io);
break;
@@ -12933,6 +12948,13 @@ ctl_process_done(union ctl_io *io)
}
/*
+ * Run through the blocked queue of this I/O and see if anything
+ * can be unblocked, now that this I/O is done and will be removed.
+ * We need to do it before removal to have OOA position to start.
+ */
+ ctl_try_unblock_others(lun, io, TRUE);
+
+ /*
* Remove this from the OOA queue.
*/
TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
@@ -12942,12 +12964,6 @@ ctl_process_done(union ctl_io *io)
#endif
/*
- * Run through the blocked queue on this LUN and see if anything
- * has become unblocked, now that this transaction is done.
- */
- ctl_check_blocked(lun);
-
- /*
* If the LUN has been invalidated, free it if there is nothing
* left on its OOA queue.
*/
@@ -13102,7 +13118,7 @@ ctl_serseq_done(union ctl_io *io)
return;
mtx_lock(&lun->lun_lock);
io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE;
- ctl_check_blocked(lun);
+ ctl_try_unblock_others(lun, io, FALSE);
mtx_unlock(&lun->lun_lock);
}
diff --git a/sys/cam/ctl/ctl_frontend_ioctl.c b/sys/cam/ctl/ctl_frontend_ioctl.c
index 221d912714b7..24c624cc38d2 100644
--- a/sys/cam/ctl/ctl_frontend_ioctl.c
+++ b/sys/cam/ctl/ctl_frontend_ioctl.c
@@ -620,6 +620,7 @@ ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
memcpy(io, (void *)addr, sizeof(*io));
io->io_hdr.pool = pool_tmp;
CTL_SOFTC(io) = sc_tmp;
+ TAILQ_INIT(&io->io_hdr.blocked_queue);
/*
* No status yet, so make sure the status is set properly.
diff --git a/sys/cam/ctl/ctl_io.h b/sys/cam/ctl/ctl_io.h
index 7dfa45da0f70..e892662d79f2 100644
--- a/sys/cam/ctl/ctl_io.h
+++ b/sys/cam/ctl/ctl_io.h
@@ -87,7 +87,6 @@ typedef enum {
CTL_FLAG_DO_AUTOSENSE = 0x00000020, /* grab sense info */
CTL_FLAG_USER_REQ = 0x00000040, /* request came from userland */
CTL_FLAG_ALLOCATED = 0x00000100, /* data space allocated */
- CTL_FLAG_BLOCKED = 0x00000200, /* on the blocked queue */
CTL_FLAG_ABORT_STATUS = 0x00000400, /* return TASK ABORTED status */
CTL_FLAG_ABORT = 0x00000800, /* this I/O should be aborted */
CTL_FLAG_DMA_INPROG = 0x00001000, /* DMA in progress */
@@ -239,14 +238,13 @@ struct ctl_io_hdr {
#endif /* CTL_TIME_IO */
uint32_t num_dmas; /* Number of DMAs */
union ctl_io *remote_io; /* I/O counterpart on remote HA side */
- void *pad1;
+ union ctl_io *blocker; /* I/O blocking this one */
void *pool; /* I/O pool */
union ctl_priv ctl_private[CTL_NUM_PRIV];/* CTL private area */
- void *pad2;
- void *pad3;
+ TAILQ_HEAD(, ctl_io_hdr) blocked_queue; /* I/Os blocked by this one */
STAILQ_ENTRY(ctl_io_hdr) links; /* linked list pointer */
- TAILQ_ENTRY(ctl_io_hdr) ooa_links;
- TAILQ_ENTRY(ctl_io_hdr) blocked_links;
+ TAILQ_ENTRY(ctl_io_hdr) ooa_links; /* ooa_queue links */
+ TAILQ_ENTRY(ctl_io_hdr) blocked_links; /* blocked_queue links */
};
typedef enum {
diff --git a/sys/cam/ctl/ctl_private.h b/sys/cam/ctl/ctl_private.h
index f719988ac626..34d4209ea7f2 100644
--- a/sys/cam/ctl/ctl_private.h
+++ b/sys/cam/ctl/ctl_private.h
@@ -390,7 +390,6 @@ struct ctl_lun {
sbintime_t last_busy;
#endif
TAILQ_HEAD(ctl_ooaq, ctl_io_hdr) ooa_queue;
- TAILQ_HEAD(ctl_blockq,ctl_io_hdr) blocked_queue;
STAILQ_ENTRY(ctl_lun) links;
struct scsi_sense_data **pending_sense;
ctl_ua_type **pending_ua;