aboutsummaryrefslogtreecommitdiff
path: root/sys/cam/ctl/ctl_frontend_ioctl.c
diff options
context:
space:
mode:
authorAlexander Motin <mav@FreeBSD.org>2017-01-16 16:19:55 +0000
committerAlexander Motin <mav@FreeBSD.org>2017-01-16 16:19:55 +0000
commiteb6ac6f9db762bf753e5b6837279f992eb8a5800 (patch)
tree503e9ffd3dd4c74fd5e968f0a0143cb0320e55b2 /sys/cam/ctl/ctl_frontend_ioctl.c
parent662e30fca3530b5dba603d97aeedcc3fbc81ee97 (diff)
downloadsrc-eb6ac6f9db762bf753e5b6837279f992eb8a5800.tar.gz
src-eb6ac6f9db762bf753e5b6837279f992eb8a5800.zip
Make CTL frontends report kern_data_resid for under-/overruns.
It seems like kern_data_resid was never really implemented. This change finally does it. Now frontends update this field while transferring data, while CTL/backends getting it can more flexibly handle the result. At this point behavior should not change significantly, still reporting errors on write overrun, but that may be changed later, if we decide so. CAM target frontend still does not properly handle overruns due to CAM API limitations. We may need to add some fields to struct ccb_accept_tio to pass information about initiator requested transfer size(s). MFC after: 2 weeks
Notes
Notes: svn path=/head/; revision=312291
Diffstat (limited to 'sys/cam/ctl/ctl_frontend_ioctl.c')
-rw-r--r--sys/cam/ctl/ctl_frontend_ioctl.c25
1 files changed, 7 insertions, 18 deletions
diff --git a/sys/cam/ctl/ctl_frontend_ioctl.c b/sys/cam/ctl/ctl_frontend_ioctl.c
index 6f90fe3a42bc..3f5c23c0c55e 100644
--- a/sys/cam/ctl/ctl_frontend_ioctl.c
+++ b/sys/cam/ctl/ctl_frontend_ioctl.c
@@ -138,7 +138,7 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
struct ctl_sg_entry ext_entry, kern_entry;
int ext_sglen, ext_sg_entries, kern_sg_entries;
int ext_sg_start, ext_offset;
- int len_to_copy, len_copied;
+ int len_to_copy;
int kern_watermark, ext_watermark;
int ext_sglist_malloced;
int i, j;
@@ -150,7 +150,8 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
*/
if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) {
ext_sglist_malloced = 0;
- ctsio->ext_data_filled = ctsio->ext_data_len;
+ ctsio->ext_data_filled += ctsio->kern_data_len;
+ ctsio->kern_data_resid = 0;
goto bailout;
}
@@ -204,7 +205,6 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
kern_watermark = 0;
ext_watermark = ext_offset;
- len_copied = 0;
for (i = ext_sg_start, j = 0;
i < ext_sg_entries && j < kern_sg_entries;) {
uint8_t *ext_ptr, *kern_ptr;
@@ -226,9 +226,6 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
kern_ptr = (uint8_t *)kern_sglist[j].addr;
kern_ptr = kern_ptr + kern_watermark;
- kern_watermark += len_to_copy;
- ext_watermark += len_to_copy;
-
if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
CTL_FLAG_DATA_IN) {
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
@@ -250,21 +247,22 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
}
}
- len_copied += len_to_copy;
+ ctsio->ext_data_filled += len_to_copy;
+ ctsio->kern_data_resid -= len_to_copy;
+ ext_watermark += len_to_copy;
if (ext_sglist[i].len == ext_watermark) {
i++;
ext_watermark = 0;
}
+ kern_watermark += len_to_copy;
if (kern_sglist[j].len == kern_watermark) {
j++;
kern_watermark = 0;
}
}
- ctsio->ext_data_filled += len_copied;
-
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, "
"kern_sg_entries: %d\n", ext_sg_entries,
kern_sg_entries));
@@ -272,15 +270,6 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
"kern_data_len = %d\n", ctsio->ext_data_len,
ctsio->kern_data_len));
- /*
- * Report write underflow as error, since CTL and backends don't
- * really support it.
- */
- if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
- j < kern_sg_entries) {
- ctsio->io_hdr.port_status = 43;
- }
-
bailout:
if (ext_sglist_malloced != 0)
free(ext_sglist, M_CTL);