aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexander Motin <mav@FreeBSD.org>2021-02-27 15:14:05 +0000
committerAlexander Motin <mav@FreeBSD.org>2021-03-15 02:46:00 +0000
commit7b4859b4a26a1725f8e09bc6b8ae45007a8c43b4 (patch)
treef76049939fa6f69dc497be9ede56b8612d76f9f8
parent6469aab051dee71c88e27aa906c18efa09e77189 (diff)
downloadsrc-7b4859b4a26a1725f8e09bc6b8ae45007a8c43b4.tar.gz
src-7b4859b4a26a1725f8e09bc6b8ae45007a8c43b4.zip
Micro-optimize OOA queue processing.
- Move ctl_get_cmd_entry() calls from every OOA traversal to when the requests first inserted, storing seridx in struct ctl_scsiio. - Move some checks out of the loop in ctl_check_ooa(). - Replace checks for errors that can not happen with asserts. - Transpose ctl_serialize_table, so that any OOA traversal accessed only one row (cache line). Compact it from enum to uint8_t. - Optimize static branch predictions in hottest places. Due to O(n) nature on deep LUN queues this can be the hottest code path in CTL, and additional 20% of IOPS I see in some 4KB I/O tests are good to have in reserve. About 50% of CPU time here according to the profiles is now spent in two memory accesses per traversed request in OOA. Sponsored by: iXsystems, Inc. MFC after: 2 weeks (cherry picked from commit 9d9fd8b79f0ebe59f791c8225fa01ab59858b7b5)
-rw-r--r--sys/cam/ctl/ctl.c238
-rw-r--r--sys/cam/ctl/ctl_io.h2
-rw-r--r--sys/cam/ctl/ctl_private.h18
-rw-r--r--sys/cam/ctl/ctl_ser_table.c40
4 files changed, 134 insertions, 164 deletions
diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c
index 07331091fc54..46f044814d3a 100644
--- a/sys/cam/ctl/ctl.c
+++ b/sys/cam/ctl/ctl.c
@@ -499,9 +499,10 @@ static int ctl_inquiry_std(struct ctl_scsiio *ctsio);
static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len);
static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2,
bool seq);
-static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2);
+static ctl_action ctl_seq_check(union ctl_io *io1, union ctl_io *io2);
static ctl_action ctl_check_for_blockage(struct ctl_lun *lun,
- union ctl_io *pending_io, union ctl_io *ooa_io);
+ union ctl_io *pending_io, const uint8_t *serialize_row,
+ union ctl_io *ooa_io);
static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
union ctl_io **starting_io);
static void ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io,
@@ -2313,6 +2314,7 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
}
entry = ctl_get_cmd_entry(ctsio, NULL);
+ ctsio->seridx = entry->seridx;
if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) {
mtx_unlock(&lun->lun_lock);
goto badjuju;
@@ -2333,12 +2335,6 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
bio = (union ctl_io *)LIST_NEXT(&ctsio->io_hdr, ooa_links);
switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) {
- case CTL_ACTION_BLOCK:
- ctsio->io_hdr.blocker = bio;
- TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr,
- blocked_links);
- mtx_unlock(&lun->lun_lock);
- break;
case CTL_ACTION_PASS:
case CTL_ACTION_SKIP:
if (softc->ha_mode == CTL_HA_MODE_XFER) {
@@ -2357,6 +2353,12 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
sizeof(msg_info.hdr), M_WAITOK);
}
break;
+ case CTL_ACTION_BLOCK:
+ ctsio->io_hdr.blocker = bio;
+ TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr,
+ blocked_links);
+ mtx_unlock(&lun->lun_lock);
+ break;
case CTL_ACTION_OVERLAP:
LIST_REMOVE(&ctsio->io_hdr, ooa_links);
mtx_unlock(&lun->lun_lock);
@@ -2366,14 +2368,6 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
LIST_REMOVE(&ctsio->io_hdr, ooa_links);
mtx_unlock(&lun->lun_lock);
ctl_set_overlapped_tag(ctsio, ctsio->tag_num);
- goto badjuju;
- case CTL_ACTION_ERROR:
- default:
- LIST_REMOVE(&ctsio->io_hdr, ooa_links);
- mtx_unlock(&lun->lun_lock);
-
- ctl_set_internal_failure(ctsio, /*sks_valid*/ 0,
- /*retry_count*/ 0);
badjuju:
ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
msg_info.hdr.original_sc = ctsio->io_hdr.remote_io;
@@ -2383,6 +2377,8 @@ badjuju:
sizeof(msg_info.scsi), M_WAITOK);
ctl_free_io((union ctl_io *)ctsio);
break;
+ default:
+ __assert_unreachable();
}
}
@@ -10831,8 +10827,9 @@ ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len)
break;
}
default:
+ *lba = 0;
+ *len = UINT64_MAX;
return (1);
- break; /* NOTREACHED */
}
return (0);
@@ -10866,7 +10863,7 @@ ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2)
/* If not UNMAP -- go other way. */
if (io->scsiio.cdb[0] != UNMAP)
- return (CTL_ACTION_ERROR);
+ return (CTL_ACTION_SKIP);
/* If UNMAP without data -- block and wait for data. */
ptrlen = (struct ctl_ptr_len_flags *)
@@ -10894,33 +10891,34 @@ ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq)
uint64_t len1, len2;
int retval;
- if (ctl_get_lba_len(io2, &lba2, &len2) != 0)
- return (CTL_ACTION_ERROR);
+ retval = ctl_get_lba_len(io2, &lba2, &len2);
+ KASSERT(retval == 0, ("ctl_get_lba_len() error"));
retval = ctl_extent_check_unmap(io1, lba2, len2);
- if (retval != CTL_ACTION_ERROR)
+ if (retval != CTL_ACTION_SKIP)
return (retval);
- if (ctl_get_lba_len(io1, &lba1, &len1) != 0)
- return (CTL_ACTION_ERROR);
+ retval = ctl_get_lba_len(io1, &lba1, &len1);
+ KASSERT(retval == 0, ("ctl_get_lba_len() error"));
- if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE)
+ if (seq && (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE))
seq = FALSE;
return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq));
}
static ctl_action
-ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2)
+ctl_seq_check(union ctl_io *io1, union ctl_io *io2)
{
uint64_t lba1, lba2;
uint64_t len1, len2;
+ int retval;
if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE)
return (CTL_ACTION_PASS);
- if (ctl_get_lba_len(io1, &lba1, &len1) != 0)
- return (CTL_ACTION_ERROR);
- if (ctl_get_lba_len(io2, &lba2, &len2) != 0)
- return (CTL_ACTION_ERROR);
+ retval = ctl_get_lba_len(io1, &lba1, &len1);
+ KASSERT(retval == 0, ("ctl_get_lba_len() error"));
+ retval = ctl_get_lba_len(io2, &lba2, &len2);
+ KASSERT(retval == 0, ("ctl_get_lba_len() error"));
if (lba1 + len1 == lba2)
return (CTL_ACTION_BLOCK);
@@ -10929,25 +10927,15 @@ ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2)
static ctl_action
ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io,
- union ctl_io *ooa_io)
+ const uint8_t *serialize_row, union ctl_io *ooa_io)
{
- const struct ctl_cmd_entry *pending_entry, *ooa_entry;
- const ctl_serialize_action *serialize_row;
-
- /*
- * Aborted commands are not going to be executed and may even
- * not report completion, so we don't care about their order.
- * Let them complete ASAP to clean the OOA queue.
- */
- if (pending_io->io_hdr.flags & CTL_FLAG_ABORT)
- return (CTL_ACTION_SKIP);
/*
* The initiator attempted multiple untagged commands at the same
* time. Can't do that.
*/
- if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
- && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
+ if (__predict_false(pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
+ && __predict_false(ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
&& ((pending_io->io_hdr.nexus.targ_port ==
ooa_io->io_hdr.nexus.targ_port)
&& (pending_io->io_hdr.nexus.initid ==
@@ -10967,9 +10955,9 @@ ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io,
* command with the same tag number as long as the previous
* instance of this tag number has been aborted somehow.
*/
- if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
- && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
- && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num)
+ if (__predict_true(pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
+ && __predict_true(ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
+ && __predict_false(pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num)
&& ((pending_io->io_hdr.nexus.targ_port ==
ooa_io->io_hdr.nexus.targ_port)
&& (pending_io->io_hdr.nexus.initid ==
@@ -10992,75 +10980,47 @@ ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io,
*
* XXX KDM check for other types of blockage first??
*/
- if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
+ if (__predict_false(pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE))
return (CTL_ACTION_PASS);
/*
- * Ordered tags have to block until all items ahead of them
- * have completed. If we get called with an ordered tag, we always
- * block, if something else is ahead of us in the queue.
- */
- if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED)
- return (CTL_ACTION_BLOCK);
-
- /*
* Simple tags get blocked until all head of queue and ordered tags
* ahead of them have completed. I'm lumping untagged commands in
* with simple tags here. XXX KDM is that the right thing to do?
*/
- if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
- || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE))
- && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
- || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED)))
+ if (__predict_false(ooa_io->scsiio.tag_type == CTL_TAG_ORDERED) ||
+ __predict_false(ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE))
return (CTL_ACTION_BLOCK);
- pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL);
- KASSERT(pending_entry->seridx < CTL_SERIDX_COUNT,
- ("%s: Invalid seridx %d for pending CDB %02x %02x @ %p",
- __func__, pending_entry->seridx, pending_io->scsiio.cdb[0],
- pending_io->scsiio.cdb[1], pending_io));
- ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL);
- if (ooa_entry->seridx == CTL_SERIDX_INVLD)
- return (CTL_ACTION_PASS); /* Unsupported command in OOA queue */
- KASSERT(ooa_entry->seridx < CTL_SERIDX_COUNT,
- ("%s: Invalid seridx %d for ooa CDB %02x %02x @ %p",
- __func__, ooa_entry->seridx, ooa_io->scsiio.cdb[0],
- ooa_io->scsiio.cdb[1], ooa_io));
-
- serialize_row = ctl_serialize_table[ooa_entry->seridx];
-
- switch (serialize_row[pending_entry->seridx]) {
- case CTL_SER_BLOCK:
- return (CTL_ACTION_BLOCK);
- case CTL_SER_EXTENT:
- return (ctl_extent_check(ooa_io, pending_io,
- (lun->be_lun->serseq == CTL_LUN_SERSEQ_ON)));
- case CTL_SER_EXTENTOPT:
- if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) !=
- SCP_QUEUE_ALG_UNRESTRICTED)
- return (ctl_extent_check(ooa_io, pending_io,
- (lun->be_lun->serseq == CTL_LUN_SERSEQ_ON)));
+ /* Unsupported command in OOA queue. */
+ if (__predict_false(ooa_io->scsiio.seridx == CTL_SERIDX_INVLD))
return (CTL_ACTION_PASS);
- case CTL_SER_EXTENTSEQ:
+
+ switch (serialize_row[ooa_io->scsiio.seridx]) {
+ case CTL_SER_SEQ:
if (lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF)
- return (ctl_extent_check_seq(ooa_io, pending_io));
- return (CTL_ACTION_PASS);
+ return (ctl_seq_check(ooa_io, pending_io));
+ /* FALLTHROUGH */
case CTL_SER_PASS:
return (CTL_ACTION_PASS);
+ case CTL_SER_EXTENTOPT:
+ if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) ==
+ SCP_QUEUE_ALG_UNRESTRICTED)
+ return (CTL_ACTION_PASS);
+ /* FALLTHROUGH */
+ case CTL_SER_EXTENT:
+ return (ctl_extent_check(ooa_io, pending_io,
+ (lun->be_lun->serseq == CTL_LUN_SERSEQ_ON)));
case CTL_SER_BLOCKOPT:
- if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) !=
+ if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) ==
SCP_QUEUE_ALG_UNRESTRICTED)
- return (CTL_ACTION_BLOCK);
- return (CTL_ACTION_PASS);
- case CTL_SER_SKIP:
- return (CTL_ACTION_SKIP);
+ return (CTL_ACTION_PASS);
+ /* FALLTHROUGH */
+ case CTL_SER_BLOCK:
+ return (CTL_ACTION_BLOCK);
default:
- panic("%s: Invalid serialization value %d for %d => %d",
- __func__, serialize_row[pending_entry->seridx],
- pending_entry->seridx, ooa_entry->seridx);
+ __assert_unreachable();
}
-
- return (CTL_ACTION_ERROR);
}
/*
@@ -11073,20 +11033,41 @@ static ctl_action
ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
union ctl_io **starting_io)
{
- union ctl_io *ooa_io;
+ union ctl_io *ooa_io = *starting_io;
+ const uint8_t *serialize_row;
ctl_action action;
mtx_assert(&lun->lun_lock, MA_OWNED);
/*
+ * Aborted commands are not going to be executed and may even
+ * not report completion, so we don't care about their order.
+ * Let them complete ASAP to clean the OOA queue.
+ */
+ if (__predict_false(pending_io->io_hdr.flags & CTL_FLAG_ABORT))
+ return (CTL_ACTION_SKIP);
+
+ /*
+ * Ordered tags have to block until all items ahead of them have
+ * completed. If we get called with an ordered tag, we always
+ * block, if something else is ahead of us in the queue.
+ */
+ if ((pending_io->scsiio.tag_type == CTL_TAG_ORDERED) &&
+ (ooa_io != NULL))
+ return (CTL_ACTION_BLOCK);
+
+ serialize_row = ctl_serialize_table[pending_io->scsiio.seridx];
+
+ /*
* Run back along the OOA queue, starting with the current
* blocked I/O and going through every I/O before it on the
* queue. If starting_io is NULL, we'll just end up returning
* CTL_ACTION_PASS.
*/
- for (ooa_io = *starting_io; ooa_io != NULL;
+ for (; ooa_io != NULL;
ooa_io = (union ctl_io *)LIST_NEXT(&ooa_io->io_hdr, ooa_links)) {
- action = ctl_check_for_blockage(lun, pending_io, ooa_io);
+ action = ctl_check_for_blockage(lun, pending_io, serialize_row,
+ ooa_io);
if (action != CTL_ACTION_PASS) {
*starting_io = ooa_io;
return (action);
@@ -11139,13 +11120,6 @@ ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, bool skip)
io->io_hdr.blocker = NULL;
switch (action) {
- case CTL_ACTION_OVERLAP:
- ctl_set_overlapped_cmd(&io->scsiio);
- goto error;
- case CTL_ACTION_OVERLAP_TAG:
- ctl_set_overlapped_tag(&io->scsiio,
- io->scsiio.tag_num & 0xff);
- goto error;
case CTL_ACTION_PASS:
case CTL_ACTION_SKIP:
@@ -11175,12 +11149,14 @@ ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, bool skip)
io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
ctl_enqueue_rtr(io);
break;
- case CTL_ACTION_ERROR:
default:
- ctl_set_internal_failure(&io->scsiio,
- /*sks_valid*/ 0,
- /*retry_count*/ 0);
-
+ __assert_unreachable();
+ case CTL_ACTION_OVERLAP:
+ ctl_set_overlapped_cmd(&io->scsiio);
+ goto error;
+ case CTL_ACTION_OVERLAP_TAG:
+ ctl_set_overlapped_tag(&io->scsiio,
+ io->scsiio.tag_num & 0xff);
error:
/* Serializing commands from the other SC are done here. */
if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) &&
@@ -11392,8 +11368,8 @@ ctl_failover_lun(union ctl_io *rio)
/* We are master */
if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
if (io->flags & CTL_FLAG_IO_ACTIVE) {
- io->flags |= CTL_FLAG_ABORT;
- io->flags |= CTL_FLAG_FAILOVER;
+ io->flags |= CTL_FLAG_ABORT |
+ CTL_FLAG_FAILOVER;
ctl_try_unblock_io(lun,
(union ctl_io *)io, FALSE);
} else { /* This can be only due to DATAMOVE */
@@ -11628,18 +11604,18 @@ ctl_scsiio_precheck(struct ctl_scsiio *ctsio)
bio = (union ctl_io *)LIST_NEXT(&ctsio->io_hdr, ooa_links);
switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) {
- case CTL_ACTION_BLOCK:
- ctsio->io_hdr.blocker = bio;
- TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr,
- blocked_links);
- mtx_unlock(&lun->lun_lock);
- break;
case CTL_ACTION_PASS:
case CTL_ACTION_SKIP:
ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
mtx_unlock(&lun->lun_lock);
ctl_enqueue_rtr((union ctl_io *)ctsio);
break;
+ case CTL_ACTION_BLOCK:
+ ctsio->io_hdr.blocker = bio;
+ TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr,
+ blocked_links);
+ mtx_unlock(&lun->lun_lock);
+ break;
case CTL_ACTION_OVERLAP:
mtx_unlock(&lun->lun_lock);
ctl_set_overlapped_cmd(ctsio);
@@ -11650,14 +11626,8 @@ ctl_scsiio_precheck(struct ctl_scsiio *ctsio)
ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff);
ctl_done((union ctl_io *)ctsio);
break;
- case CTL_ACTION_ERROR:
default:
- mtx_unlock(&lun->lun_lock);
- ctl_set_internal_failure(ctsio,
- /*sks_valid*/ 0,
- /*retry_count*/ 0);
- ctl_done((union ctl_io *)ctsio);
- break;
+ __assert_unreachable();
}
}
@@ -11686,6 +11656,7 @@ ctl_validate_command(struct ctl_scsiio *ctsio)
uint8_t diff;
entry = ctl_get_cmd_entry(ctsio, &sa);
+ ctsio->seridx = entry->seridx;
if (entry->execute == NULL) {
if (sa)
ctl_set_invalid_field(ctsio,
@@ -13319,10 +13290,15 @@ ctl_serseq_done(union ctl_io *io)
if (lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF)
return;
- mtx_lock(&lun->lun_lock);
- io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE;
- ctl_try_unblock_others(lun, io, FALSE);
- mtx_unlock(&lun->lun_lock);
+
+ /* This is racy, but should not be a problem. */
+ if (!TAILQ_EMPTY(&io->io_hdr.blocked_queue)) {
+ mtx_lock(&lun->lun_lock);
+ io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE;
+ ctl_try_unblock_others(lun, io, FALSE);
+ mtx_unlock(&lun->lun_lock);
+ } else
+ io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE;
}
void
diff --git a/sys/cam/ctl/ctl_io.h b/sys/cam/ctl/ctl_io.h
index b275d80ad4a0..714ce12da95a 100644
--- a/sys/cam/ctl/ctl_io.h
+++ b/sys/cam/ctl/ctl_io.h
@@ -325,7 +325,7 @@ struct ctl_scsiio {
struct scsi_sense_data sense_data; /* sense data */
uint8_t sense_len; /* Returned sense length */
uint8_t scsi_status; /* SCSI status byte */
- uint8_t sense_residual; /* Unused. */
+ uint8_t seridx; /* Serialization index. */
uint8_t priority; /* Command priority */
uint32_t residual; /* Unused */
uint32_t tag_num; /* tag number */
diff --git a/sys/cam/ctl/ctl_private.h b/sys/cam/ctl/ctl_private.h
index 8940babd4904..a891041e8f50 100644
--- a/sys/cam/ctl/ctl_private.h
+++ b/sys/cam/ctl/ctl_private.h
@@ -65,22 +65,20 @@ struct ctl_io_pool {
};
typedef enum {
- CTL_SER_BLOCK,
- CTL_SER_BLOCKOPT,
- CTL_SER_EXTENT,
- CTL_SER_EXTENTOPT,
- CTL_SER_EXTENTSEQ,
+ CTL_SER_SEQ,
CTL_SER_PASS,
- CTL_SER_SKIP
+ CTL_SER_EXTENTOPT,
+ CTL_SER_EXTENT,
+ CTL_SER_BLOCKOPT,
+ CTL_SER_BLOCK,
} ctl_serialize_action;
typedef enum {
- CTL_ACTION_BLOCK,
- CTL_ACTION_OVERLAP,
- CTL_ACTION_OVERLAP_TAG,
CTL_ACTION_PASS,
CTL_ACTION_SKIP,
- CTL_ACTION_ERROR
+ CTL_ACTION_BLOCK,
+ CTL_ACTION_OVERLAP,
+ CTL_ACTION_OVERLAP_TAG
} ctl_action;
/*
diff --git a/sys/cam/ctl/ctl_ser_table.c b/sys/cam/ctl/ctl_ser_table.c
index 2793bfa1e281..be9ca6b34631 100644
--- a/sys/cam/ctl/ctl_ser_table.c
+++ b/sys/cam/ctl/ctl_ser_table.c
@@ -43,11 +43,8 @@
/* TABLE ctlSerTbl */
/* */
/* The matrix which drives the serialization algorithm. The major index */
-/* (the first) into this table is the command being checked and the minor */
-/* index is the command against which the first command is being checked. */
-/* i.e., the major index (row) command is ahead of the minor index command */
-/* (column) in the queue. This allows the code to optimize by capturing */
-/* the result of the first indexing operation into a pointer. */
+/* (the first, row) into this table is the new command. The minor index */
+/* (column) is the older, possibly already running, command. */
/* */
/* Whenever a new value is added to the IDX_T type, this matrix must be */
/* expanded by one row AND one column -- Because of this, some effort */
@@ -55,30 +52,29 @@
/* */
/****************************************************************************/
-#define sK CTL_SER_SKIP /* Skip */
#define pS CTL_SER_PASS /* Pass */
#define bK CTL_SER_BLOCK /* Blocked */
#define bO CTL_SER_BLOCKOPT /* Optional block */
#define xT CTL_SER_EXTENT /* Extent check */
#define xO CTL_SER_EXTENTOPT /* Optional extent check */
-#define xS CTL_SER_EXTENTSEQ /* Sequential extent check */
+#define xS CTL_SER_SEQ /* Sequential check */
-const static ctl_serialize_action
+const static uint8_t
ctl_serialize_table[CTL_SERIDX_COUNT][CTL_SERIDX_COUNT] = {
/**>IDX_ :: 2nd:TUR RD WRT UNM SYN MDSN MDSL RQSN INQ RDCP RES LSNS FMT STR*/
-/*TUR */{ pS, pS, pS, pS, pS, bK, bK, bK, pS, pS, bK, pS, bK, bK},
-/*READ */{ pS, xS, xT, bO, pS, bK, bK, bK, pS, pS, bK, pS, bK, bK},
-/*WRITE */{ pS, xT, xT, bO, bO, bK, bK, bK, pS, pS, bK, pS, bK, bK},
-/*UNMAP */{ pS, xO, xO, pS, pS, bK, bK, bK, pS, pS, bK, pS, bK, bK},
-/*SYNC */{ pS, pS, pS, pS, pS, bK, bK, bK, pS, pS, bK, pS, bK, bK},
-/*MD_SNS */{ bK, bK, bK, bK, bK, pS, bK, bK, pS, pS, bK, pS, bK, bK},
-/*MD_SEL */{ bK, bK, bK, bK, bK, bK, bK, bK, pS, pS, bK, pS, bK, bK},
-/*RQ_SNS */{ pS, pS, pS, pS, pS, pS, pS, bK, pS, pS, bK, pS, bK, bK},
-/*INQ */{ pS, pS, pS, pS, pS, pS, pS, bK, pS, pS, pS, pS, bK, bK},
-/*RD_CAP */{ pS, pS, pS, pS, pS, pS, pS, bK, pS, pS, pS, pS, bK, pS},
-/*RES */{ bK, bK, bK, bK, bK, bK, bK, bK, pS, bK, bK, bK, bK, bK},
-/*LOG_SNS */{ pS, pS, pS, pS, pS, pS, bK, bK, pS, pS, bK, pS, bK, bK},
-/*FORMAT */{ pS, bK, bK, bK, bK, bK, bK, pS, pS, bK, bK, bK, bK, bK},
-/*START */{ bK, bK, bK, bK, bK, bK, bK, bK, pS, bK, bK, bK, bK, bK},
+/*TUR */{ pS, pS, pS, pS, pS, bK, bK, pS, pS, pS, bK, pS, pS, bK},
+/*READ */{ pS, xS, xT, xO, pS, bK, bK, pS, pS, pS, bK, pS, bK, bK},
+/*WRITE */{ pS, xT, xT, xO, pS, bK, bK, pS, pS, pS, bK, pS, bK, bK},
+/*UNMAP */{ pS, bO, bO, pS, pS, bK, bK, pS, pS, pS, bK, pS, bK, bK},
+/*SYNC */{ pS, pS, bO, pS, pS, bK, bK, pS, pS, pS, bK, pS, bK, bK},
+/*MD_SNS */{ bK, bK, bK, bK, bK, pS, bK, pS, pS, pS, bK, pS, bK, bK},
+/*MD_SEL */{ bK, bK, bK, bK, bK, bK, bK, pS, pS, pS, bK, bK, bK, bK},
+/*RQ_SNS */{ bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, pS, bK},
+/*INQ */{ pS, pS, pS, pS, pS, pS, pS, pS, pS, pS, pS, pS, pS, pS},
+/*RD_CAP */{ pS, pS, pS, pS, pS, pS, pS, pS, pS, pS, bK, pS, bK, bK},
+/*RES */{ bK, bK, bK, bK, bK, bK, bK, bK, pS, pS, bK, bK, bK, bK},
+/*LOG_SNS */{ pS, pS, pS, pS, pS, pS, pS, pS, pS, pS, bK, pS, bK, bK},
+/*FORMAT */{ bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK, bK},
+/*START */{ bK, bK, bK, bK, bK, bK, bK, bK, bK, pS, bK, bK, bK, bK},
};