aboutsummaryrefslogtreecommitdiff
path: root/sys/cam/ctl
diff options
context:
space:
mode:
authorAlexander Motin <mav@FreeBSD.org>2014-12-06 20:39:25 +0000
committerAlexander Motin <mav@FreeBSD.org>2014-12-06 20:39:25 +0000
commitbfbfc4a3cbb69406ebcaa041326731048022e434 (patch)
treeaa8717b9be75a4e2f5efd46cb675324c4d61806b /sys/cam/ctl
parentd59107f700c226ebf009c823dd4d72c7a23c6eed (diff)
downloadsrc-bfbfc4a3cbb69406ebcaa041326731048022e434.tar.gz
src-bfbfc4a3cbb69406ebcaa041326731048022e434.zip
Count consecutive read requests as blocking in CTL for files and ZVOLs.
Technically read requests can be executed in any order or simultaneously since they are not changing any data. But ZFS prefetcher goes crasy when it receives consecutive requests from different threads. Since prefetcher works on level of separate blocks, instead of two consecutive 128K requests it may receive 32 8K requests in mixed order. This patch is more workaround then a real fix, and it does not fix all of prefetcher problems, but it improves sequential read speed by 3-4x times in some configurations. On the other side it may hurt performance if some backing store has no prefetch, that is why it is disabled by default for raw devices. MFC after: 2 weeks
Notes
Notes: svn path=/head/; revision=275568
Diffstat (limited to 'sys/cam/ctl')
-rw-r--r--sys/cam/ctl/ctl.c61
-rw-r--r--sys/cam/ctl/ctl_backend.h3
-rw-r--r--sys/cam/ctl/ctl_backend_block.c2
-rw-r--r--sys/cam/ctl/ctl_private.h8
-rw-r--r--sys/cam/ctl/ctl_ser_table.c3
5 files changed, 62 insertions, 15 deletions
diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c
index 9b156faf3b4b..2a8aba908a2e 100644
--- a/sys/cam/ctl/ctl.c
+++ b/sys/cam/ctl/ctl.c
@@ -433,7 +433,9 @@ static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len);
static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio);
static int ctl_inquiry_std(struct ctl_scsiio *ctsio);
static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len);
-static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2);
+static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2,
+ bool seq);
+static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2);
static ctl_action ctl_check_for_blockage(struct ctl_lun *lun,
union ctl_io *pending_io, union ctl_io *ooa_io);
static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
@@ -4590,6 +4592,17 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
if (value != NULL && strcmp(value, "on") == 0)
lun->flags |= CTL_LUN_READONLY;
+ lun->serseq = CTL_LUN_SERSEQ_OFF;
+ if (be_lun->flags & CTL_LUN_FLAG_SERSEQ_READ)
+ lun->serseq = CTL_LUN_SERSEQ_READ;
+ value = ctl_get_opt(&be_lun->options, "serseq");
+ if (value != NULL && strcmp(value, "on") == 0)
+ lun->serseq = CTL_LUN_SERSEQ_ON;
+ else if (value != NULL && strcmp(value, "read") == 0)
+ lun->serseq = CTL_LUN_SERSEQ_READ;
+ else if (value != NULL && strcmp(value, "off") == 0)
+ lun->serseq = CTL_LUN_SERSEQ_OFF;
+
lun->ctl_softc = ctl_softc;
TAILQ_INIT(&lun->ooa_queue);
TAILQ_INIT(&lun->blocked_queue);
@@ -10752,15 +10765,15 @@ ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len)
}
static ctl_action
-ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2)
+ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2,
+ bool seq)
{
uint64_t endlba1, endlba2;
- endlba1 = lba1 + len1 - 1;
+ endlba1 = lba1 + len1 - (seq ? 0 : 1);
endlba2 = lba2 + len2 - 1;
- if ((endlba1 < lba2)
- || (endlba2 < lba1))
+ if ((endlba1 < lba2) || (endlba2 < lba1))
return (CTL_ACTION_PASS);
else
return (CTL_ACTION_BLOCK);
@@ -10799,23 +10812,39 @@ ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2)
}
static ctl_action
-ctl_extent_check(union ctl_io *io1, union ctl_io *io2)
+ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq)
{
uint64_t lba1, lba2;
uint64_t len1, len2;
int retval;
- if (ctl_get_lba_len(io1, &lba1, &len1) != 0)
+ if (ctl_get_lba_len(io2, &lba2, &len2) != 0)
return (CTL_ACTION_ERROR);
- retval = ctl_extent_check_unmap(io2, lba1, len1);
+ retval = ctl_extent_check_unmap(io1, lba2, len2);
if (retval != CTL_ACTION_ERROR)
return (retval);
+ if (ctl_get_lba_len(io1, &lba1, &len1) != 0)
+ return (CTL_ACTION_ERROR);
+
+ return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq));
+}
+
+static ctl_action
+ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2)
+{
+ uint64_t lba1, lba2;
+ uint64_t len1, len2;
+
+ if (ctl_get_lba_len(io1, &lba1, &len1) != 0)
+ return (CTL_ACTION_ERROR);
if (ctl_get_lba_len(io2, &lba2, &len2) != 0)
return (CTL_ACTION_ERROR);
- return (ctl_extent_check_lba(lba1, len1, lba2, len2));
+ if (lba1 + len1 == lba2)
+ return (CTL_ACTION_BLOCK);
+ return (CTL_ACTION_PASS);
}
static ctl_action
@@ -10904,12 +10933,18 @@ ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io,
case CTL_SER_BLOCK:
return (CTL_ACTION_BLOCK);
case CTL_SER_EXTENT:
- return (ctl_extent_check(pending_io, ooa_io));
+ return (ctl_extent_check(ooa_io, pending_io,
+ (lun->serseq == CTL_LUN_SERSEQ_ON)));
case CTL_SER_EXTENTOPT:
if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags
& SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED)
- return (ctl_extent_check(pending_io, ooa_io));
- /* FALLTHROUGH */
+ return (ctl_extent_check(ooa_io, pending_io,
+ (lun->serseq == CTL_LUN_SERSEQ_ON)));
+ return (CTL_ACTION_PASS);
+ case CTL_SER_EXTENTSEQ:
+ if (lun->serseq != CTL_LUN_SERSEQ_OFF)
+ return (ctl_extent_check_seq(ooa_io, pending_io));
+ return (CTL_ACTION_PASS);
case CTL_SER_PASS:
return (CTL_ACTION_PASS);
case CTL_SER_BLOCKOPT:
@@ -12440,7 +12475,7 @@ ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc)
return (CTL_LUN_PAT_NONE);
action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba,
- desc->lba_range.len);
+ desc->lba_range.len, FALSE);
/*
* A "pass" means that the LBA ranges don't overlap, so
* this doesn't match the user's range criteria.
diff --git a/sys/cam/ctl/ctl_backend.h b/sys/cam/ctl/ctl_backend.h
index f32d1209bb93..77975f9ffea0 100644
--- a/sys/cam/ctl/ctl_backend.h
+++ b/sys/cam/ctl/ctl_backend.h
@@ -85,7 +85,8 @@ typedef enum {
CTL_LUN_FLAG_DEVID = 0x20,
CTL_LUN_FLAG_DEV_TYPE = 0x40,
CTL_LUN_FLAG_UNMAP = 0x80,
- CTL_LUN_FLAG_OFFLINE = 0x100
+ CTL_LUN_FLAG_OFFLINE = 0x100,
+ CTL_LUN_FLAG_SERSEQ_READ = 0x200
} ctl_backend_lun_flags;
#ifdef _KERNEL
diff --git a/sys/cam/ctl/ctl_backend_block.c b/sys/cam/ctl/ctl_backend_block.c
index ef9efab26083..1eb5ed2e40ba 100644
--- a/sys/cam/ctl/ctl_backend_block.c
+++ b/sys/cam/ctl/ctl_backend_block.c
@@ -2204,6 +2204,8 @@ ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_OFFLINE;
if (unmap)
be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP;
+ if (be_lun->dispatch != ctl_be_block_dispatch_dev)
+ be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_SERSEQ_READ;
be_lun->ctl_be_lun.be_lun = be_lun;
be_lun->ctl_be_lun.maxlba = (be_lun->size_blocks == 0) ?
0 : (be_lun->size_blocks - 1);
diff --git a/sys/cam/ctl/ctl_private.h b/sys/cam/ctl/ctl_private.h
index 351b7f859e59..7bedf103b4e6 100644
--- a/sys/cam/ctl/ctl_private.h
+++ b/sys/cam/ctl/ctl_private.h
@@ -97,6 +97,7 @@ typedef enum {
CTL_SER_BLOCKOPT,
CTL_SER_EXTENT,
CTL_SER_EXTENTOPT,
+ CTL_SER_EXTENTSEQ,
CTL_SER_PASS,
CTL_SER_SKIP
} ctl_serialize_action;
@@ -183,6 +184,12 @@ typedef enum {
} ctl_lun_flags;
typedef enum {
+ CTL_LUN_SERSEQ_OFF,
+ CTL_LUN_SERSEQ_READ,
+ CTL_LUN_SERSEQ_ON
+} ctl_lun_serseq;
+
+typedef enum {
CTLBLOCK_FLAG_NONE = 0x00,
CTLBLOCK_FLAG_INVALID = 0x01
} ctlblock_flags;
@@ -386,6 +393,7 @@ struct ctl_lun {
struct ctl_id target;
uint64_t lun;
ctl_lun_flags flags;
+ ctl_lun_serseq serseq;
STAILQ_HEAD(,ctl_error_desc) error_list;
uint64_t error_serial;
struct ctl_softc *ctl_softc;
diff --git a/sys/cam/ctl/ctl_ser_table.c b/sys/cam/ctl/ctl_ser_table.c
index d3693ce9c194..ee2d019b528a 100644
--- a/sys/cam/ctl/ctl_ser_table.c
+++ b/sys/cam/ctl/ctl_ser_table.c
@@ -59,12 +59,13 @@
#define bO CTL_SER_BLOCKOPT /* Optional block */
#define xT CTL_SER_EXTENT /* Extent check */
#define xO CTL_SER_EXTENTOPT /* Optional extent check */
+#define xS CTL_SER_EXTENTSEQ /* Sequential extent check */
static ctl_serialize_action
ctl_serialize_table[CTL_SERIDX_COUNT][CTL_SERIDX_COUNT] = {
/**>IDX_ :: 2nd:TUR RD WRT UNM MDSN MDSL RQSN INQ RDCP RES LSNS FMT STR*/
/*TUR */{ pS, pS, pS, pS, bK, bK, bK, pS, pS, bK, pS, bK, bK},
-/*READ */{ pS, pS, xT, bO, bK, bK, bK, pS, pS, bK, pS, bK, bK},
+/*READ */{ pS, xS, xT, bO, bK, bK, bK, pS, pS, bK, pS, bK, bK},
/*WRITE */{ pS, xT, xT, bO, bK, bK, bK, pS, pS, bK, pS, bK, bK},
/*UNMAP */{ pS, xO, xO, pS, bK, bK, bK, pS, pS, bK, pS, bK, bK},
/*MD_SNS */{ bK, bK, bK, bK, pS, bK, bK, pS, pS, bK, pS, bK, bK},