aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexander Motin <mav@FreeBSD.org>2021-01-31 17:46:57 +0000
committerAlexander Motin <mav@FreeBSD.org>2021-01-31 17:55:06 +0000
commit9dc7c250b8bd2d5e669c7633e189a700a02c0571 (patch)
tree3ca2f1775b77630ce714541df3a30cc6fedc9d45
parent8eeeee38f4c5b2b48d03f3c5a3fa678962e8c9ed (diff)
cxgb(4): Remove assumption of physically contiguous mbufs.
Investigation of iSCSI target data corruption reports brought me to discovery that cxgb(4) expects mbufs to be physically contiguous, that is not true after I've started using m_extaddref() in software iSCSI for large zero-copy transmissions. In case of fragmented memory the driver transmitted garbage from pages following the first one due to simple use of pmap_kextract() for the first pointer instead of proper bus_dmamap_load_mbuf_sg(). Seems like it was done as some optimization many years ago, and at very least it is wrong in a world of IOMMUs. This patch just removes that optimization, plus limits packet coalescing for mbufs crossing page boundary, also depending on assumption of one segment per packet. MFC after: 3 days Sponsored by: iXsystems, Inc. Reviewed by: mmacy, np Differential revision: https://reviews.freebsd.org/D28428
-rw-r--r--sys/dev/cxgb/cxgb_sge.c3
-rw-r--r--sys/dev/cxgb/sys/mvec.h14
-rw-r--r--sys/dev/cxgb/sys/uipc_mvec.c24
3 files changed, 5 insertions, 36 deletions
diff --git a/sys/dev/cxgb/cxgb_sge.c b/sys/dev/cxgb/cxgb_sge.c
index 7f456ccff4ca..491d1a751f4a 100644
--- a/sys/dev/cxgb/cxgb_sge.c
+++ b/sys/dev/cxgb/cxgb_sge.c
@@ -322,7 +322,8 @@ coalesce_check(struct mbuf *m, void *arg)
int *nbytes = &ci->nbytes;
if ((*nbytes == 0) || ((*nbytes + m->m_len <= 10500) &&
- (*count < 7) && (m->m_next == NULL))) {
+ (*count < 7) && (m->m_next == NULL) &&
+ ((mtod(m, vm_offset_t) & PAGE_MASK) + m->m_len <= PAGE_SIZE))) {
*count += 1;
*nbytes += m->m_len;
return (1);
diff --git a/sys/dev/cxgb/sys/mvec.h b/sys/dev/cxgb/sys/mvec.h
index bdd0b55c5489..4989bff29ec4 100644
--- a/sys/dev/cxgb/sys/mvec.h
+++ b/sys/dev/cxgb/sys/mvec.h
@@ -33,20 +33,6 @@
#define _MVEC_H_
#include <machine/bus.h>
-static __inline void
-busdma_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map,
- struct mbuf *m, bus_dma_segment_t *seg)
-{
-#if defined(__i386__) || defined(__amd64__)
- seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t));
- seg->ds_len = m->m_len;
-#else
- int nsegstmp;
-
- bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0);
-#endif
-}
-
int busdma_map_sg_collapse(bus_dma_tag_t tag, bus_dmamap_t map,
struct mbuf **m, bus_dma_segment_t *segs, int *nsegs);
void busdma_map_sg_vec(bus_dma_tag_t tag, bus_dmamap_t map,
diff --git a/sys/dev/cxgb/sys/uipc_mvec.c b/sys/dev/cxgb/sys/uipc_mvec.c
index ca31cf1897c7..02f437079468 100644
--- a/sys/dev/cxgb/sys/uipc_mvec.c
+++ b/sys/dev/cxgb/sys/uipc_mvec.c
@@ -65,26 +65,7 @@ busdma_map_sg_collapse(bus_dma_tag_t tag, bus_dmamap_t map,
retry:
psegs = segs;
seg_count = 0;
- if (n->m_next == NULL) {
- busdma_map_mbuf_fast(tag, map, n, segs);
- *nsegs = 1;
- return (0);
- }
-#if defined(__i386__) || defined(__amd64__)
- while (n && seg_count < TX_MAX_SEGS) {
- /*
- * firmware doesn't like empty segments
- */
- if (__predict_true(n->m_len != 0)) {
- seg_count++;
- busdma_map_mbuf_fast(tag, map, n, psegs);
- psegs++;
- }
- n = n->m_next;
- }
-#else
err = bus_dmamap_load_mbuf_sg(tag, map, *m, segs, &seg_count, 0);
-#endif
if (seg_count == 0) {
if (cxgb_debug)
printf("empty segment chain\n");
@@ -117,8 +98,9 @@ void
busdma_map_sg_vec(bus_dma_tag_t tag, bus_dmamap_t map,
struct mbuf *m, bus_dma_segment_t *segs, int *nsegs)
{
+ int n = 0;
- for (*nsegs = 0; m != NULL ; segs++, *nsegs += 1, m = m->m_nextpkt)
- busdma_map_mbuf_fast(tag, map, m, segs);
+ for (*nsegs = 0; m != NULL; segs += n, *nsegs += n, m = m->m_nextpkt)
+ bus_dmamap_load_mbuf_sg(tag, map, m, segs, &n, 0);
}