aboutsummaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorGleb Smirnoff <glebius@FreeBSD.org>2020-05-03 00:12:56 +0000
committerGleb Smirnoff <glebius@FreeBSD.org>2020-05-03 00:12:56 +0000
commit7b6c99d08d57bd6aeee333734b801208914fd788 (patch)
treed170efb569044b2f93ad9efe75a46b771ff6f3a9 /sys/kern
parentbccf6e26e9ea8fe9f4cccc18c761f84c6c095045 (diff)
downloadsrc-7b6c99d08d57bd6aeee333734b801208914fd788.tar.gz
src-7b6c99d08d57bd6aeee333734b801208914fd788.zip
Step 3: anonymize struct mbuf_ext_pgs and move all its fields into mbuf
within m_epg namespace. All edits except the 'struct mbuf' declaration and mb_dupcl() were done mechanically with sed: s/->m_ext_pgs.nrdy/->m_epg_nrdy/g s/->m_ext_pgs.hdr_len/->m_epg_hdrlen/g s/->m_ext_pgs.trail_len/->m_epg_trllen/g s/->m_ext_pgs.first_pg_off/->m_epg_1st_off/g s/->m_ext_pgs.last_pg_len/->m_epg_last_len/g s/->m_ext_pgs.flags/->m_epg_flags/g s/->m_ext_pgs.record_type/->m_epg_record_type/g s/->m_ext_pgs.enc_cnt/->m_epg_enc_cnt/g s/->m_ext_pgs.tls/->m_epg_tls/g s/->m_ext_pgs.so/->m_epg_so/g s/->m_ext_pgs.seqno/->m_epg_seqno/g s/->m_ext_pgs.stailq/->m_epg_stailq/g Reviewed by: gallatin Differential Revision: https://reviews.freebsd.org/D24598
Notes
Notes: svn path=/head/; revision=360579
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_mbuf.c42
-rw-r--r--sys/kern/kern_sendfile.c14
-rw-r--r--sys/kern/subr_bus_dma.c16
-rw-r--r--sys/kern/subr_sglist.c28
-rw-r--r--sys/kern/uipc_ktls.c70
-rw-r--r--sys/kern/uipc_mbuf.c38
-rw-r--r--sys/kern/uipc_sockbuf.c16
7 files changed, 112 insertions, 112 deletions
diff --git a/sys/kern/kern_mbuf.c b/sys/kern/kern_mbuf.c
index 0cfbb2f761c3..1f323c7a03c1 100644
--- a/sys/kern/kern_mbuf.c
+++ b/sys/kern/kern_mbuf.c
@@ -833,8 +833,8 @@ mb_free_notready(struct mbuf *m, int count)
for (i = 0; i < count && m != NULL; i++) {
if ((m->m_flags & M_EXT) != 0 &&
m->m_ext.ext_type == EXT_PGS) {
- m->m_ext_pgs.nrdy--;
- if (m->m_ext_pgs.nrdy != 0)
+ m->m_epg_nrdy--;
+ if (m->m_epg_nrdy != 0)
continue;
}
m = m_free(m);
@@ -943,7 +943,7 @@ _mb_unmapped_to_ext(struct mbuf *m)
MBUF_EXT_PGS_ASSERT(m);
len = m->m_len;
- KASSERT(m->m_ext_pgs.tls == NULL, ("%s: can't convert TLS mbuf %p",
+ KASSERT(m->m_epg_tls == NULL, ("%s: can't convert TLS mbuf %p",
__func__, m));
/* See if this is the mbuf that holds the embedded refcount. */
@@ -961,11 +961,11 @@ _mb_unmapped_to_ext(struct mbuf *m)
off = mtod(m, vm_offset_t);
top = NULL;
- if (m->m_ext_pgs.hdr_len != 0) {
- if (off >= m->m_ext_pgs.hdr_len) {
- off -= m->m_ext_pgs.hdr_len;
+ if (m->m_epg_hdrlen != 0) {
+ if (off >= m->m_epg_hdrlen) {
+ off -= m->m_epg_hdrlen;
} else {
- seglen = m->m_ext_pgs.hdr_len - off;
+ seglen = m->m_epg_hdrlen - off;
segoff = off;
seglen = min(seglen, len);
off = 0;
@@ -979,8 +979,8 @@ _mb_unmapped_to_ext(struct mbuf *m)
seglen);
}
}
- pgoff = m->m_ext_pgs.first_pg_off;
- for (i = 0; i < m->m_ext_pgs.npgs && len > 0; i++) {
+ pgoff = m->m_epg_1st_off;
+ for (i = 0; i < m->m_epg_npgs && len > 0; i++) {
pglen = m_epg_pagelen(m, i, pgoff);
if (off >= pglen) {
off -= pglen;
@@ -1016,9 +1016,9 @@ _mb_unmapped_to_ext(struct mbuf *m)
pgoff = 0;
};
if (len != 0) {
- KASSERT((off + len) <= m->m_ext_pgs.trail_len,
+ KASSERT((off + len) <= m->m_epg_trllen,
("off + len > trail (%d + %d > %d)", off, len,
- m->m_ext_pgs.trail_len));
+ m->m_epg_trllen));
m_new = m_get(M_NOWAIT, MT_DATA);
if (m_new == NULL)
goto fail;
@@ -1122,15 +1122,15 @@ mb_alloc_ext_pgs(int how, m_ext_free_t ext_free)
if (m == NULL)
return (NULL);
- m->m_ext_pgs.npgs = 0;
- m->m_ext_pgs.nrdy = 0;
- m->m_ext_pgs.first_pg_off = 0;
- m->m_ext_pgs.last_pg_len = 0;
- m->m_ext_pgs.flags = 0;
- m->m_ext_pgs.hdr_len = 0;
- m->m_ext_pgs.trail_len = 0;
- m->m_ext_pgs.tls = NULL;
- m->m_ext_pgs.so = NULL;
+ m->m_epg_npgs = 0;
+ m->m_epg_nrdy = 0;
+ m->m_epg_1st_off = 0;
+ m->m_epg_last_len = 0;
+ m->m_epg_flags = 0;
+ m->m_epg_hdrlen = 0;
+ m->m_epg_trllen = 0;
+ m->m_epg_tls = NULL;
+ m->m_epg_so = NULL;
m->m_data = NULL;
m->m_flags |= (M_EXT | M_RDONLY | M_NOMAP);
m->m_ext.ext_type = EXT_PGS;
@@ -1215,7 +1215,7 @@ mb_free_ext(struct mbuf *m)
("%s: ext_free not set", __func__));
mref->m_ext.ext_free(mref);
#ifdef KERN_TLS
- tls = mref->m_ext_pgs.tls;
+ tls = mref->m_epg_tls;
if (tls != NULL &&
!refcount_release_if_not_last(&tls->refcount))
ktls_enqueue_to_free(mref);
diff --git a/sys/kern/kern_sendfile.c b/sys/kern/kern_sendfile.c
index 305cdd48304f..d6492d47f40b 100644
--- a/sys/kern/kern_sendfile.c
+++ b/sys/kern/kern_sendfile.c
@@ -198,8 +198,8 @@ sendfile_free_mext_pg(struct mbuf *m)
cache_last = m->m_ext.ext_flags & EXT_FLAG_CACHE_LAST;
flags = (m->m_ext.ext_flags & EXT_FLAG_NOCACHE) != 0 ? VPR_TRYFREE : 0;
- for (i = 0; i < m->m_ext_pgs.npgs; i++) {
- if (cache_last && i == m->m_ext_pgs.npgs - 1)
+ for (i = 0; i < m->m_epg_npgs; i++) {
+ if (cache_last && i == m->m_epg_npgs - 1)
flags = 0;
pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
vm_page_release(pg, flags);
@@ -365,7 +365,7 @@ sendfile_iodone(void *arg, vm_page_t *pa, int count, int error)
#if defined(KERN_TLS) && defined(INVARIANTS)
if ((sfio->m->m_flags & M_EXT) != 0 &&
sfio->m->m_ext.ext_type == EXT_PGS)
- KASSERT(sfio->tls == sfio->m->m_ext_pgs.tls,
+ KASSERT(sfio->tls == sfio->m->m_epg_tls,
("TLS session mismatch"));
else
KASSERT(sfio->tls == NULL,
@@ -1034,18 +1034,18 @@ retry_space:
else
m = m0;
mtail = m0;
- m0->m_ext_pgs.first_pg_off =
+ m0->m_epg_1st_off =
vmoff(i, off) & PAGE_MASK;
}
if (nios) {
mtail->m_flags |= M_NOTREADY;
- m0->m_ext_pgs.nrdy++;
+ m0->m_epg_nrdy++;
}
m0->m_epg_pa[ext_pgs_idx] = VM_PAGE_TO_PHYS(pga);
- m0->m_ext_pgs.npgs++;
+ m0->m_epg_npgs++;
xfs = xfsize(i, npages, off, space);
- m0->m_ext_pgs.last_pg_len = xfs;
+ m0->m_epg_last_len = xfs;
MBUF_EXT_PGS_ASSERT_SANITY(m0);
mtail->m_len += xfs;
mtail->m_ext.ext_size += PAGE_SIZE;
diff --git a/sys/kern/subr_bus_dma.c b/sys/kern/subr_bus_dma.c
index 453725f97c9c..00bb95b1d664 100644
--- a/sys/kern/subr_bus_dma.c
+++ b/sys/kern/subr_bus_dma.c
@@ -129,11 +129,11 @@ _bus_dmamap_load_mbuf_epg(bus_dma_tag_t dmat, bus_dmamap_t map,
/* Skip over any data removed from the front. */
off = mtod(m, vm_offset_t);
- if (m->m_ext_pgs.hdr_len != 0) {
- if (off >= m->m_ext_pgs.hdr_len) {
- off -= m->m_ext_pgs.hdr_len;
+ if (m->m_epg_hdrlen != 0) {
+ if (off >= m->m_epg_hdrlen) {
+ off -= m->m_epg_hdrlen;
} else {
- seglen = m->m_ext_pgs.hdr_len - off;
+ seglen = m->m_epg_hdrlen - off;
segoff = off;
seglen = min(seglen, len);
off = 0;
@@ -143,8 +143,8 @@ _bus_dmamap_load_mbuf_epg(bus_dma_tag_t dmat, bus_dmamap_t map,
flags, segs, nsegs);
}
}
- pgoff = m->m_ext_pgs.first_pg_off;
- for (i = 0; i < m->m_ext_pgs.npgs && error == 0 && len > 0; i++) {
+ pgoff = m->m_epg_1st_off;
+ for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) {
pglen = m_epg_pagelen(m, i, pgoff);
if (off >= pglen) {
off -= pglen;
@@ -161,9 +161,9 @@ _bus_dmamap_load_mbuf_epg(bus_dma_tag_t dmat, bus_dmamap_t map,
pgoff = 0;
};
if (len != 0 && error == 0) {
- KASSERT((off + len) <= m->m_ext_pgs.trail_len,
+ KASSERT((off + len) <= m->m_epg_trllen,
("off + len > trail (%d + %d > %d)", off, len,
- m->m_ext_pgs.trail_len));
+ m->m_epg_trllen));
error = _bus_dmamap_load_buffer(dmat, map,
&m->m_epg_trail[off], len, kernel_pmap, flags, segs,
nsegs);
diff --git a/sys/kern/subr_sglist.c b/sys/kern/subr_sglist.c
index d9ed39c645ee..e83b4db515d7 100644
--- a/sys/kern/subr_sglist.c
+++ b/sys/kern/subr_sglist.c
@@ -233,11 +233,11 @@ sglist_count_mbuf_epg(struct mbuf *m, size_t off, size_t len)
return (0);
nsegs = 0;
- if (m->m_ext_pgs.hdr_len != 0) {
- if (off >= m->m_ext_pgs.hdr_len) {
- off -= m->m_ext_pgs.hdr_len;
+ if (m->m_epg_hdrlen != 0) {
+ if (off >= m->m_epg_hdrlen) {
+ off -= m->m_epg_hdrlen;
} else {
- seglen = m->m_ext_pgs.hdr_len - off;
+ seglen = m->m_epg_hdrlen - off;
segoff = off;
seglen = MIN(seglen, len);
off = 0;
@@ -247,8 +247,8 @@ sglist_count_mbuf_epg(struct mbuf *m, size_t off, size_t len)
}
}
nextaddr = 0;
- pgoff = m->m_ext_pgs.first_pg_off;
- for (i = 0; i < m->m_ext_pgs.npgs && len > 0; i++) {
+ pgoff = m->m_epg_1st_off;
+ for (i = 0; i < m->m_epg_npgs && len > 0; i++) {
pglen = m_epg_pagelen(m, i, pgoff);
if (off >= pglen) {
off -= pglen;
@@ -267,7 +267,7 @@ sglist_count_mbuf_epg(struct mbuf *m, size_t off, size_t len)
pgoff = 0;
};
if (len != 0) {
- seglen = MIN(len, m->m_ext_pgs.trail_len - off);
+ seglen = MIN(len, m->m_epg_trllen - off);
len -= seglen;
nsegs += sglist_count(&m->m_epg_trail[off], seglen);
}
@@ -391,11 +391,11 @@ sglist_append_mbuf_epg(struct sglist *sg, struct mbuf *m, size_t off,
MBUF_EXT_PGS_ASSERT(m);
error = 0;
- if (m->m_ext_pgs.hdr_len != 0) {
- if (off >= m->m_ext_pgs.hdr_len) {
- off -= m->m_ext_pgs.hdr_len;
+ if (m->m_epg_hdrlen != 0) {
+ if (off >= m->m_epg_hdrlen) {
+ off -= m->m_epg_hdrlen;
} else {
- seglen = m->m_ext_pgs.hdr_len - off;
+ seglen = m->m_epg_hdrlen - off;
segoff = off;
seglen = MIN(seglen, len);
off = 0;
@@ -404,8 +404,8 @@ sglist_append_mbuf_epg(struct sglist *sg, struct mbuf *m, size_t off,
&m->m_epg_hdr[segoff], seglen);
}
}
- pgoff = m->m_ext_pgs.first_pg_off;
- for (i = 0; i < m->m_ext_pgs.npgs && error == 0 && len > 0; i++) {
+ pgoff = m->m_epg_1st_off;
+ for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) {
pglen = m_epg_pagelen(m, i, pgoff);
if (off >= pglen) {
off -= pglen;
@@ -422,7 +422,7 @@ sglist_append_mbuf_epg(struct sglist *sg, struct mbuf *m, size_t off,
pgoff = 0;
};
if (error == 0 && len > 0) {
- seglen = MIN(len, m->m_ext_pgs.trail_len - off);
+ seglen = MIN(len, m->m_epg_trllen - off);
len -= seglen;
error = sglist_append(sg,
&m->m_epg_trail[off], seglen);
diff --git a/sys/kern/uipc_ktls.c b/sys/kern/uipc_ktls.c
index e8e5c5c24aa9..3cf1befaba32 100644
--- a/sys/kern/uipc_ktls.c
+++ b/sys/kern/uipc_ktls.c
@@ -1292,7 +1292,7 @@ ktls_seq(struct sockbuf *sb, struct mbuf *m)
KASSERT((m->m_flags & M_NOMAP) != 0,
("ktls_seq: mapped mbuf %p", m));
- m->m_ext_pgs.seqno = sb->sb_tls_seqno;
+ m->m_epg_seqno = sb->sb_tls_seqno;
sb->sb_tls_seqno++;
}
}
@@ -1340,10 +1340,10 @@ ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
tls_len = m->m_len;
/* Save a reference to the session. */
- m->m_ext_pgs.tls = ktls_hold(tls);
+ m->m_epg_tls = ktls_hold(tls);
- m->m_ext_pgs.hdr_len = tls->params.tls_hlen;
- m->m_ext_pgs.trail_len = tls->params.tls_tlen;
+ m->m_epg_hdrlen = tls->params.tls_hlen;
+ m->m_epg_trllen = tls->params.tls_tlen;
if (tls->params.cipher_algorithm == CRYPTO_AES_CBC) {
int bs, delta;
@@ -1365,9 +1365,9 @@ ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
*/
bs = tls->params.tls_bs;
delta = (tls_len + tls->params.tls_tlen) & (bs - 1);
- m->m_ext_pgs.trail_len -= delta;
+ m->m_epg_trllen -= delta;
}
- m->m_len += m->m_ext_pgs.hdr_len + m->m_ext_pgs.trail_len;
+ m->m_len += m->m_epg_hdrlen + m->m_epg_trllen;
/* Populate the TLS header. */
tlshdr = (void *)m->m_epg_hdr;
@@ -1382,7 +1382,7 @@ ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
tlshdr->tls_vminor = TLS_MINOR_VER_TWO;
tlshdr->tls_type = TLS_RLTYPE_APP;
/* save the real record type for later */
- m->m_ext_pgs.record_type = record_type;
+ m->m_epg_record_type = record_type;
m->m_epg_trail[0] = record_type;
} else {
tlshdr->tls_vminor = tls->params.tls_vminor;
@@ -1419,8 +1419,8 @@ ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
*/
if (tls->mode == TCP_TLS_MODE_SW) {
m->m_flags |= M_NOTREADY;
- m->m_ext_pgs.nrdy = m->m_ext_pgs.npgs;
- *enq_cnt += m->m_ext_pgs.npgs;
+ m->m_epg_nrdy = m->m_epg_npgs;
+ *enq_cnt += m->m_epg_npgs;
}
}
}
@@ -1432,10 +1432,10 @@ ktls_enqueue_to_free(struct mbuf *m)
bool running;
/* Mark it for freeing. */
- m->m_ext_pgs.flags |= EPG_FLAG_2FREE;
- wq = &ktls_wq[m->m_ext_pgs.tls->wq_index];
+ m->m_epg_flags |= EPG_FLAG_2FREE;
+ wq = &ktls_wq[m->m_epg_tls->wq_index];
mtx_lock(&wq->mtx);
- STAILQ_INSERT_TAIL(&wq->head, m, m_ext_pgs.stailq);
+ STAILQ_INSERT_TAIL(&wq->head, m, m_epg_stailq);
running = wq->running;
mtx_unlock(&wq->mtx);
if (!running)
@@ -1453,19 +1453,19 @@ ktls_enqueue(struct mbuf *m, struct socket *so, int page_count)
("ktls_enqueue: %p not unready & nomap mbuf\n", m));
KASSERT(page_count != 0, ("enqueueing TLS mbuf with zero page count"));
- KASSERT(m->m_ext_pgs.tls->mode == TCP_TLS_MODE_SW, ("!SW TLS mbuf"));
+ KASSERT(m->m_epg_tls->mode == TCP_TLS_MODE_SW, ("!SW TLS mbuf"));
- m->m_ext_pgs.enc_cnt = page_count;
+ m->m_epg_enc_cnt = page_count;
/*
* Save a pointer to the socket. The caller is responsible
* for taking an additional reference via soref().
*/
- m->m_ext_pgs.so = so;
+ m->m_epg_so = so;
- wq = &ktls_wq[m->m_ext_pgs.tls->wq_index];
+ wq = &ktls_wq[m->m_epg_tls->wq_index];
mtx_lock(&wq->mtx);
- STAILQ_INSERT_TAIL(&wq->head, m, m_ext_pgs.stailq);
+ STAILQ_INSERT_TAIL(&wq->head, m, m_epg_stailq);
running = wq->running;
mtx_unlock(&wq->mtx);
if (!running)
@@ -1486,14 +1486,14 @@ ktls_encrypt(struct mbuf *top)
int error, i, len, npages, off, total_pages;
bool is_anon;
- so = top->m_ext_pgs.so;
- tls = top->m_ext_pgs.tls;
+ so = top->m_epg_so;
+ tls = top->m_epg_tls;
KASSERT(tls != NULL, ("tls = NULL, top = %p\n", top));
KASSERT(so != NULL, ("so = NULL, top = %p\n", top));
#ifdef INVARIANTS
- top->m_ext_pgs.so = NULL;
+ top->m_epg_so = NULL;
#endif
- total_pages = top->m_ext_pgs.enc_cnt;
+ total_pages = top->m_epg_enc_cnt;
npages = 0;
/*
@@ -1515,13 +1515,13 @@ ktls_encrypt(struct mbuf *top)
*/
error = 0;
for (m = top; npages != total_pages; m = m->m_next) {
- KASSERT(m->m_ext_pgs.tls == tls,
+ KASSERT(m->m_epg_tls == tls,
("different TLS sessions in a single mbuf chain: %p vs %p",
- tls, m->m_ext_pgs.tls));
+ tls, m->m_epg_tls));
KASSERT((m->m_flags & (M_NOMAP | M_NOTREADY)) ==
(M_NOMAP | M_NOTREADY),
("%p not unready & nomap mbuf (top = %p)\n", m, top));
- KASSERT(npages + m->m_ext_pgs.npgs <= total_pages,
+ KASSERT(npages + m->m_epg_npgs <= total_pages,
("page count mismatch: top %p, total_pages %d, m %p", top,
total_pages, m));
@@ -1533,10 +1533,10 @@ ktls_encrypt(struct mbuf *top)
* (from sendfile), anonymous wired pages are
* allocated and assigned to the destination iovec.
*/
- is_anon = (m->m_ext_pgs.flags & EPG_FLAG_ANON) != 0;
+ is_anon = (m->m_epg_flags & EPG_FLAG_ANON) != 0;
- off = m->m_ext_pgs.first_pg_off;
- for (i = 0; i < m->m_ext_pgs.npgs; i++, off = 0) {
+ off = m->m_epg_1st_off;
+ for (i = 0; i < m->m_epg_npgs; i++, off = 0) {
len = m_epg_pagelen(m, i, off);
src_iov[i].iov_len = len;
src_iov[i].iov_base =
@@ -1565,8 +1565,8 @@ retry_page:
error = (*tls->sw_encrypt)(tls,
(const struct tls_record_layer *)m->m_epg_hdr,
- m->m_epg_trail, src_iov, dst_iov, i, m->m_ext_pgs.seqno,
- m->m_ext_pgs.record_type);
+ m->m_epg_trail, src_iov, dst_iov, i, m->m_epg_seqno,
+ m->m_epg_record_type);
if (error) {
counter_u64_add(ktls_offload_failed_crypto, 1);
break;
@@ -1582,14 +1582,14 @@ retry_page:
m->m_ext.ext_free(m);
/* Replace them with the new pages. */
- for (i = 0; i < m->m_ext_pgs.npgs; i++)
+ for (i = 0; i < m->m_epg_npgs; i++)
m->m_epg_pa[i] = parray[i];
/* Use the basic free routine. */
m->m_ext.ext_free = mb_free_mext_pgs;
/* Pages are now writable. */
- m->m_ext_pgs.flags |= EPG_FLAG_ANON;
+ m->m_epg_flags |= EPG_FLAG_ANON;
}
/*
@@ -1599,7 +1599,7 @@ retry_page:
* yet-to-be-encrypted records having an associated
* session.
*/
- m->m_ext_pgs.tls = NULL;
+ m->m_epg_tls = NULL;
ktls_free(tls);
}
@@ -1639,9 +1639,9 @@ ktls_work_thread(void *ctx)
STAILQ_CONCAT(&local_head, &wq->head);
mtx_unlock(&wq->mtx);
- STAILQ_FOREACH_SAFE(m, &local_head, m_ext_pgs.stailq, n) {
- if (m->m_ext_pgs.flags & EPG_FLAG_2FREE) {
- ktls_free(m->m_ext_pgs.tls);
+ STAILQ_FOREACH_SAFE(m, &local_head, m_epg_stailq, n) {
+ if (m->m_epg_flags & EPG_FLAG_2FREE) {
+ ktls_free(m->m_epg_tls);
uma_zfree(zone_mbuf, m);
} else {
ktls_encrypt(m);
diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
index 5ff84d088a84..bd6283742616 100644
--- a/sys/kern/uipc_mbuf.c
+++ b/sys/kern/uipc_mbuf.c
@@ -208,9 +208,9 @@ mb_dupcl(struct mbuf *n, struct mbuf *m)
*/
switch (m->m_ext.ext_type) {
case EXT_PGS:
- bcopy(&m->m_ext, &n->m_ext, m_epg_copylen);
- bcopy(&m->m_ext_pgs, &n->m_ext_pgs,
- sizeof(struct mbuf_ext_pgs));
+ bcopy(&m->m_epg_startcopy, &n->m_epg_startcopy,
+ __rangeof(struct mbuf, m_epg_startcopy, m_epg_endcopy));
+ bcopy(&m->m_ext, &n->m_ext, m_epg_ext_copylen);
break;
case EXT_EXTREF:
bcopy(&m->m_ext, &n->m_ext, sizeof(struct m_ext));
@@ -1440,10 +1440,10 @@ frags_per_mbuf(struct mbuf *m)
* all the backing physical pages are disjoint.
*/
frags = 0;
- if (m->m_ext_pgs.hdr_len != 0)
+ if (m->m_epg_hdrlen != 0)
frags++;
- frags += m->m_ext_pgs.npgs;
- if (m->m_ext_pgs.trail_len != 0)
+ frags += m->m_epg_npgs;
+ if (m->m_epg_trllen != 0)
frags++;
return (frags);
@@ -1629,7 +1629,7 @@ mb_free_mext_pgs(struct mbuf *m)
vm_page_t pg;
MBUF_EXT_PGS_ASSERT(m);
- for (int i = 0; i < m->m_ext_pgs.npgs; i++) {
+ for (int i = 0; i < m->m_epg_npgs; i++) {
pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
vm_page_unwire_noq(pg);
vm_page_free(pg);
@@ -1672,7 +1672,7 @@ m_uiotombuf_nomap(struct uio *uio, int how, int len, int maxseg, int flags)
else
prev->m_next = mb;
prev = mb;
- mb->m_ext_pgs.flags = EPG_FLAG_ANON;
+ mb->m_epg_flags = EPG_FLAG_ANON;
needed = length = MIN(maxseg, total);
for (i = 0; needed > 0; i++, needed -= PAGE_SIZE) {
retry_page:
@@ -1687,16 +1687,16 @@ retry_page:
}
pg_array[i]->flags &= ~PG_ZERO;
mb->m_epg_pa[i] = VM_PAGE_TO_PHYS(pg_array[i]);
- mb->m_ext_pgs.npgs++;
+ mb->m_epg_npgs++;
}
- mb->m_ext_pgs.last_pg_len = length - PAGE_SIZE * (mb->m_ext_pgs.npgs - 1);
+ mb->m_epg_last_len = length - PAGE_SIZE * (mb->m_epg_npgs - 1);
MBUF_EXT_PGS_ASSERT_SANITY(mb);
total -= length;
error = uiomove_fromphys(pg_array, 0, length, uio);
if (error != 0)
goto failed;
mb->m_len = length;
- mb->m_ext.ext_size += PAGE_SIZE * mb->m_ext_pgs.npgs;
+ mb->m_ext.ext_size += PAGE_SIZE * mb->m_epg_npgs;
if (flags & M_PKTHDR)
m->m_pkthdr.len += length;
}
@@ -1782,11 +1782,11 @@ m_unmappedtouio(const struct mbuf *m, int m_off, struct uio *uio, int len)
off = mtod(m, vm_offset_t);
off += m_off;
- if (m->m_ext_pgs.hdr_len != 0) {
- if (off >= m->m_ext_pgs.hdr_len) {
- off -= m->m_ext_pgs.hdr_len;
+ if (m->m_epg_hdrlen != 0) {
+ if (off >= m->m_epg_hdrlen) {
+ off -= m->m_epg_hdrlen;
} else {
- seglen = m->m_ext_pgs.hdr_len - off;
+ seglen = m->m_epg_hdrlen - off;
segoff = off;
seglen = min(seglen, len);
off = 0;
@@ -1795,8 +1795,8 @@ m_unmappedtouio(const struct mbuf *m, int m_off, struct uio *uio, int len)
&m->m_epg_hdr[segoff]), seglen, uio);
}
}
- pgoff = m->m_ext_pgs.first_pg_off;
- for (i = 0; i < m->m_ext_pgs.npgs && error == 0 && len > 0; i++) {
+ pgoff = m->m_epg_1st_off;
+ for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) {
pglen = m_epg_pagelen(m, i, pgoff);
if (off >= pglen) {
off -= pglen;
@@ -1813,9 +1813,9 @@ m_unmappedtouio(const struct mbuf *m, int m_off, struct uio *uio, int len)
pgoff = 0;
};
if (len != 0 && error == 0) {
- KASSERT((off + len) <= m->m_ext_pgs.trail_len,
+ KASSERT((off + len) <= m->m_epg_trllen,
("off + len > trail (%d + %d > %d, m_off = %d)", off, len,
- m->m_ext_pgs.trail_len, m_off));
+ m->m_epg_trllen, m_off));
error = uiomove(__DECONST(void *, &m->m_epg_trail[off]),
len, uio);
}
diff --git a/sys/kern/uipc_sockbuf.c b/sys/kern/uipc_sockbuf.c
index 53b1f00995db..fc671ecdb069 100644
--- a/sys/kern/uipc_sockbuf.c
+++ b/sys/kern/uipc_sockbuf.c
@@ -130,16 +130,16 @@ sbready_compress(struct sockbuf *sb, struct mbuf *m0, struct mbuf *end)
!mbuf_has_tls_session(n)) {
int hdr_len, trail_len;
- hdr_len = n->m_ext_pgs.hdr_len;
- trail_len = m->m_ext_pgs.trail_len;
+ hdr_len = n->m_epg_hdrlen;
+ trail_len = m->m_epg_trllen;
if (trail_len != 0 && hdr_len != 0 &&
trail_len + hdr_len <= MBUF_PEXT_TRAIL_LEN) {
/* copy n's header to m's trailer */
memcpy(&m->m_epg_trail[trail_len],
n->m_epg_hdr, hdr_len);
- m->m_ext_pgs.trail_len += hdr_len;
+ m->m_epg_trllen += hdr_len;
m->m_len += hdr_len;
- n->m_ext_pgs.hdr_len = 0;
+ n->m_epg_hdrlen = 0;
n->m_len -= hdr_len;
}
}
@@ -211,13 +211,13 @@ sbready(struct sockbuf *sb, struct mbuf *m0, int count)
("%s: m %p !M_NOTREADY", __func__, m));
if ((m->m_flags & M_EXT) != 0 &&
m->m_ext.ext_type == EXT_PGS) {
- if (count < m->m_ext_pgs.nrdy) {
- m->m_ext_pgs.nrdy -= count;
+ if (count < m->m_epg_nrdy) {
+ m->m_epg_nrdy -= count;
count = 0;
break;
}
- count -= m->m_ext_pgs.nrdy;
- m->m_ext_pgs.nrdy = 0;
+ count -= m->m_epg_nrdy;
+ m->m_epg_nrdy = 0;
} else
count--;