aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/cxgbe
diff options
context:
space:
mode:
authorGleb Smirnoff <glebius@FreeBSD.org>2020-05-03 00:12:56 +0000
committerGleb Smirnoff <glebius@FreeBSD.org>2020-05-03 00:12:56 +0000
commit7b6c99d08d57bd6aeee333734b801208914fd788 (patch)
treed170efb569044b2f93ad9efe75a46b771ff6f3a9 /sys/dev/cxgbe
parentbccf6e26e9ea8fe9f4cccc18c761f84c6c095045 (diff)
downloadsrc-7b6c99d08d57bd6aeee333734b801208914fd788.tar.gz
src-7b6c99d08d57bd6aeee333734b801208914fd788.zip
Step 3: anonymize struct mbuf_ext_pgs and move all its fields into mbuf
within m_epg namespace. All edits except the 'struct mbuf' declaration and mb_dupcl() were done mechanically with sed: s/->m_ext_pgs.nrdy/->m_epg_nrdy/g s/->m_ext_pgs.hdr_len/->m_epg_hdrlen/g s/->m_ext_pgs.trail_len/->m_epg_trllen/g s/->m_ext_pgs.first_pg_off/->m_epg_1st_off/g s/->m_ext_pgs.last_pg_len/->m_epg_last_len/g s/->m_ext_pgs.flags/->m_epg_flags/g s/->m_ext_pgs.record_type/->m_epg_record_type/g s/->m_ext_pgs.enc_cnt/->m_epg_enc_cnt/g s/->m_ext_pgs.tls/->m_epg_tls/g s/->m_ext_pgs.so/->m_epg_so/g s/->m_ext_pgs.seqno/->m_epg_seqno/g s/->m_ext_pgs.stailq/->m_epg_stailq/g Reviewed by: gallatin Differential Revision: https://reviews.freebsd.org/D24598
Notes
Notes: svn path=/head/; revision=360579
Diffstat (limited to 'sys/dev/cxgbe')
-rw-r--r--sys/dev/cxgbe/crypto/t4_kern_tls.c60
-rw-r--r--sys/dev/cxgbe/t4_sge.c14
-rw-r--r--sys/dev/cxgbe/tom/t4_cpl_io.c12
-rw-r--r--sys/dev/cxgbe/tom/t4_tls.c22
4 files changed, 54 insertions, 54 deletions
diff --git a/sys/dev/cxgbe/crypto/t4_kern_tls.c b/sys/dev/cxgbe/crypto/t4_kern_tls.c
index 2878be1426ff..9e190f59a2ba 100644
--- a/sys/dev/cxgbe/crypto/t4_kern_tls.c
+++ b/sys/dev/cxgbe/crypto/t4_kern_tls.c
@@ -922,8 +922,8 @@ ktls_tcp_payload_length(struct tlspcb *tlsp, struct mbuf *m_tls)
* trim the length to avoid sending any of the trailer. There
* is no way to send a partial trailer currently.
*/
- if (mlen > TLS_HEADER_LENGTH + plen - m_tls->m_ext_pgs.trail_len)
- mlen = TLS_HEADER_LENGTH + plen - m_tls->m_ext_pgs.trail_len;
+ if (mlen > TLS_HEADER_LENGTH + plen - m_tls->m_epg_trllen)
+ mlen = TLS_HEADER_LENGTH + plen - m_tls->m_epg_trllen;
/*
@@ -964,7 +964,7 @@ ktls_payload_offset(struct tlspcb *tlsp, struct mbuf *m_tls)
mlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
MPASS(mlen < TLS_HEADER_LENGTH + plen);
#endif
- if (mtod(m_tls, vm_offset_t) <= m_tls->m_ext_pgs.hdr_len)
+ if (mtod(m_tls, vm_offset_t) <= m_tls->m_epg_hdrlen)
return (0);
if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
/*
@@ -975,8 +975,8 @@ ktls_payload_offset(struct tlspcb *tlsp, struct mbuf *m_tls)
* the offset at the last byte of the record payload
* to send the last cipher block.
*/
- offset = min(mtod(m_tls, vm_offset_t) - m_tls->m_ext_pgs.hdr_len,
- (plen - TLS_HEADER_LENGTH - m_tls->m_ext_pgs.trail_len) - 1);
+ offset = min(mtod(m_tls, vm_offset_t) - m_tls->m_epg_hdrlen,
+ (plen - TLS_HEADER_LENGTH - m_tls->m_epg_trllen) - 1);
return (rounddown(offset, AES_BLOCK_LEN));
}
return (0);
@@ -1009,7 +1009,7 @@ ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struct mbuf *m_tls,
* excluding header and trailer.
*/
tlen = ktls_tcp_payload_length(tlsp, m_tls);
- if (tlen <= m_tls->m_ext_pgs.hdr_len) {
+ if (tlen <= m_tls->m_epg_hdrlen) {
/*
* For requests that only want to send the TLS header,
* send a tunnelled packet as immediate data.
@@ -1035,7 +1035,7 @@ ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struct mbuf *m_tls,
}
hdr = (void *)m_tls->m_epg_hdr;
- plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - m_tls->m_ext_pgs.trail_len;
+ plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - m_tls->m_epg_trllen;
if (tlen < plen) {
plen = tlen;
offset = ktls_payload_offset(tlsp, m_tls);
@@ -1052,14 +1052,14 @@ ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struct mbuf *m_tls,
*/
imm_len = 0;
if (offset == 0)
- imm_len += m_tls->m_ext_pgs.hdr_len;
+ imm_len += m_tls->m_epg_hdrlen;
if (plen == tlen)
imm_len += AES_BLOCK_LEN;
wr_len += roundup2(imm_len, 16);
/* TLS record payload via DSGL. */
- *nsegsp = sglist_count_mbuf_epg(m_tls, m_tls->m_ext_pgs.hdr_len + offset,
- plen - (m_tls->m_ext_pgs.hdr_len + offset));
+ *nsegsp = sglist_count_mbuf_epg(m_tls, m_tls->m_epg_hdrlen + offset,
+ plen - (m_tls->m_epg_hdrlen + offset));
wr_len += ktls_sgl_size(*nsegsp);
wr_len = roundup2(wr_len, 16);
@@ -1595,18 +1595,18 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
/* Locate the TLS header. */
MBUF_EXT_PGS_ASSERT(m_tls);
hdr = (void *)m_tls->m_epg_hdr;
- plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - m_tls->m_ext_pgs.trail_len;
+ plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - m_tls->m_epg_trllen;
/* Determine how much of the TLS record to send. */
tlen = ktls_tcp_payload_length(tlsp, m_tls);
- if (tlen <= m_tls->m_ext_pgs.hdr_len) {
+ if (tlen <= m_tls->m_epg_hdrlen) {
/*
* For requests that only want to send the TLS header,
* send a tunnelled packet as immediate data.
*/
#ifdef VERBOSE_TRACES
CTR3(KTR_CXGBE, "%s: tid %d header-only TLS record %u",
- __func__, tlsp->tid, (u_int)m_tls->m_ext_pgs.seqno);
+ __func__, tlsp->tid, (u_int)m_tls->m_epg_seqno);
#endif
return (ktls_write_tunnel_packet(txq, dst, m, m_tls, available,
tcp_seqno, pidx));
@@ -1616,7 +1616,7 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
offset = ktls_payload_offset(tlsp, m_tls);
#ifdef VERBOSE_TRACES
CTR4(KTR_CXGBE, "%s: tid %d short TLS record %u with offset %u",
- __func__, tlsp->tid, (u_int)m_tls->m_ext_pgs.seqno, offset);
+ __func__, tlsp->tid, (u_int)m_tls->m_epg_seqno, offset);
#endif
if (m_tls->m_next == NULL && (tcp->th_flags & TH_FIN) != 0) {
txq->kern_tls_fin_short++;
@@ -1671,10 +1671,10 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
*/
tx_max_offset = mtod(m_tls, vm_offset_t);
if (tx_max_offset > TLS_HEADER_LENGTH + ntohs(hdr->tls_length) -
- m_tls->m_ext_pgs.trail_len) {
+ m_tls->m_epg_trllen) {
/* Always send the full trailer. */
tx_max_offset = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) -
- m_tls->m_ext_pgs.trail_len;
+ m_tls->m_epg_trllen;
}
if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_CBC &&
tx_max_offset > TLS_HEADER_LENGTH) {
@@ -1789,15 +1789,15 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
/* Recalculate 'nsegs' if cached value is not available. */
if (nsegs == 0)
- nsegs = sglist_count_mbuf_epg(m_tls, m_tls->m_ext_pgs.hdr_len +
- offset, plen - (m_tls->m_ext_pgs.hdr_len + offset));
+ nsegs = sglist_count_mbuf_epg(m_tls, m_tls->m_epg_hdrlen +
+ offset, plen - (m_tls->m_epg_hdrlen + offset));
/* Calculate the size of the TLS work request. */
twr_len = ktls_base_wr_size(tlsp);
imm_len = 0;
if (offset == 0)
- imm_len += m_tls->m_ext_pgs.hdr_len;
+ imm_len += m_tls->m_epg_hdrlen;
if (plen == tlen)
imm_len += AES_BLOCK_LEN;
twr_len += roundup2(imm_len, 16);
@@ -1913,13 +1913,13 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
cipher_stop = 0;
sec_pdu->pldlen = htobe32(16 + plen -
- (m_tls->m_ext_pgs.hdr_len + offset));
+ (m_tls->m_epg_hdrlen + offset));
/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
sec_pdu->seqno_numivs = tlsp->scmd0_short.seqno_numivs;
sec_pdu->ivgen_hdrlen = htobe32(
tlsp->scmd0_short.ivgen_hdrlen |
- V_SCMD_HDR_LEN(offset == 0 ? m_tls->m_ext_pgs.hdr_len : 0));
+ V_SCMD_HDR_LEN(offset == 0 ? m_tls->m_epg_hdrlen : 0));
txq->kern_tls_short++;
} else {
@@ -1932,7 +1932,7 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
aad_start = 1;
aad_stop = TLS_HEADER_LENGTH;
iv_offset = TLS_HEADER_LENGTH + 1;
- cipher_start = m_tls->m_ext_pgs.hdr_len + 1;
+ cipher_start = m_tls->m_epg_hdrlen + 1;
if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
cipher_stop = 0;
auth_start = cipher_start;
@@ -1971,7 +1971,7 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
- sec_pdu->scmd1 = htobe64(m_tls->m_ext_pgs.seqno);
+ sec_pdu->scmd1 = htobe64(m_tls->m_epg_seqno);
/* Key context */
out = (void *)(sec_pdu + 1);
@@ -2011,8 +2011,8 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
tx_data->rsvd = htobe32(tcp_seqno);
} else {
tx_data->len = htobe32(V_TX_DATA_MSS(mss) |
- V_TX_LENGTH(tlen - (m_tls->m_ext_pgs.hdr_len + offset)));
- tx_data->rsvd = htobe32(tcp_seqno + m_tls->m_ext_pgs.hdr_len + offset);
+ V_TX_LENGTH(tlen - (m_tls->m_epg_hdrlen + offset)));
+ tx_data->rsvd = htobe32(tcp_seqno + m_tls->m_epg_hdrlen + offset);
}
tx_data->flags = htobe32(F_TX_BYPASS);
if (last_wr && tcp->th_flags & TH_PUSH)
@@ -2021,8 +2021,8 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
/* Populate the TLS header */
out = (void *)(tx_data + 1);
if (offset == 0) {
- memcpy(out, m_tls->m_epg_hdr, m_tls->m_ext_pgs.hdr_len);
- out += m_tls->m_ext_pgs.hdr_len;
+ memcpy(out, m_tls->m_epg_hdr, m_tls->m_epg_hdrlen);
+ out += m_tls->m_epg_hdrlen;
}
/* AES IV for a short record. */
@@ -2057,8 +2057,8 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
/* SGL for record payload */
sglist_reset(txq->gl);
- if (sglist_append_mbuf_epg(txq->gl, m_tls, m_tls->m_ext_pgs.hdr_len + offset,
- plen - (m_tls->m_ext_pgs.hdr_len + offset)) != 0) {
+ if (sglist_append_mbuf_epg(txq->gl, m_tls, m_tls->m_epg_hdrlen + offset,
+ plen - (m_tls->m_epg_hdrlen + offset)) != 0) {
#ifdef INVARIANTS
panic("%s: failed to append sglist", __func__);
#endif
@@ -2080,7 +2080,7 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
txq->kern_tls_waste += mtod(m_tls, vm_offset_t);
else
txq->kern_tls_waste += mtod(m_tls, vm_offset_t) -
- (m_tls->m_ext_pgs.hdr_len + offset);
+ (m_tls->m_epg_hdrlen + offset);
}
txsd = &txq->sdesc[pidx];
diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c
index 5d3f83bbcc46..c385fb064d89 100644
--- a/sys/dev/cxgbe/t4_sge.c
+++ b/sys/dev/cxgbe/t4_sge.c
@@ -2423,11 +2423,11 @@ count_mbuf_ext_pgs(struct mbuf *m, int skip, vm_paddr_t *nextaddr)
off += skip;
len -= skip;
- if (m->m_ext_pgs.hdr_len != 0) {
- if (off >= m->m_ext_pgs.hdr_len) {
- off -= m->m_ext_pgs.hdr_len;
+ if (m->m_epg_hdrlen != 0) {
+ if (off >= m->m_epg_hdrlen) {
+ off -= m->m_epg_hdrlen;
} else {
- seglen = m->m_ext_pgs.hdr_len - off;
+ seglen = m->m_epg_hdrlen - off;
segoff = off;
seglen = min(seglen, len);
off = 0;
@@ -2439,8 +2439,8 @@ count_mbuf_ext_pgs(struct mbuf *m, int skip, vm_paddr_t *nextaddr)
*nextaddr = paddr + seglen;
}
}
- pgoff = m->m_ext_pgs.first_pg_off;
- for (i = 0; i < m->m_ext_pgs.npgs && len > 0; i++) {
+ pgoff = m->m_epg_1st_off;
+ for (i = 0; i < m->m_epg_npgs && len > 0; i++) {
pglen = m_epg_pagelen(m, i, pgoff);
if (off >= pglen) {
off -= pglen;
@@ -2459,7 +2459,7 @@ count_mbuf_ext_pgs(struct mbuf *m, int skip, vm_paddr_t *nextaddr)
pgoff = 0;
};
if (len != 0) {
- seglen = min(len, m->m_ext_pgs.trail_len - off);
+ seglen = min(len, m->m_epg_trllen - off);
len -= seglen;
paddr = pmap_kextract((vm_offset_t)&m->m_epg_trail[off]);
if (*nextaddr != paddr)
diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c
index 4c0f1a7bfc1c..bd9dddb46488 100644
--- a/sys/dev/cxgbe/tom/t4_cpl_io.c
+++ b/sys/dev/cxgbe/tom/t4_cpl_io.c
@@ -733,7 +733,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
if (m->m_flags & M_NOMAP) {
#ifdef KERN_TLS
- if (m->m_ext_pgs.tls != NULL) {
+ if (m->m_epg_tls != NULL) {
toep->flags |= TPF_KTLS;
if (plen == 0) {
SOCKBUF_UNLOCK(sb);
@@ -1934,7 +1934,7 @@ aiotx_free_pgs(struct mbuf *m)
m->m_len, jobtotid(job));
#endif
- for (int i = 0; i < m->m_ext_pgs.npgs; i++) {
+ for (int i = 0; i < m->m_epg_npgs; i++) {
pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
vm_page_unwire(pg, PQ_ACTIVE);
}
@@ -1989,15 +1989,15 @@ alloc_aiotx_mbuf(struct kaiocb *job, int len)
break;
}
- m->m_ext_pgs.first_pg_off = pgoff;
- m->m_ext_pgs.npgs = npages;
+ m->m_epg_1st_off = pgoff;
+ m->m_epg_npgs = npages;
if (npages == 1) {
KASSERT(mlen + pgoff <= PAGE_SIZE,
("%s: single page is too large (off %d len %d)",
__func__, pgoff, mlen));
- m->m_ext_pgs.last_pg_len = mlen;
+ m->m_epg_last_len = mlen;
} else {
- m->m_ext_pgs.last_pg_len = mlen - (PAGE_SIZE - pgoff) -
+ m->m_epg_last_len = mlen - (PAGE_SIZE - pgoff) -
(npages - 2) * PAGE_SIZE;
}
for (i = 0; i < npages; i++)
diff --git a/sys/dev/cxgbe/tom/t4_tls.c b/sys/dev/cxgbe/tom/t4_tls.c
index 085d7e839748..c40871d4f34d 100644
--- a/sys/dev/cxgbe/tom/t4_tls.c
+++ b/sys/dev/cxgbe/tom/t4_tls.c
@@ -1628,10 +1628,10 @@ count_ext_pgs_segs(struct mbuf *m)
vm_paddr_t nextpa;
u_int i, nsegs;
- MPASS(m->m_ext_pgs.npgs > 0);
+ MPASS(m->m_epg_npgs > 0);
nsegs = 1;
nextpa = m->m_epg_pa[0] + PAGE_SIZE;
- for (i = 1; i < m->m_ext_pgs.npgs; i++) {
+ for (i = 1; i < m->m_epg_npgs; i++) {
if (nextpa != m->m_epg_pa[i])
nsegs++;
nextpa = m->m_epg_pa[i] + PAGE_SIZE;
@@ -1653,11 +1653,11 @@ write_ktlstx_sgl(void *dst, struct mbuf *m, int nsegs)
V_ULPTX_NSGE(nsegs));
/* Figure out the first S/G length. */
- pa = m->m_epg_pa[0] + m->m_ext_pgs.first_pg_off;
+ pa = m->m_epg_pa[0] + m->m_epg_1st_off;
usgl->addr0 = htobe64(pa);
- len = m_epg_pagelen(m, 0, m->m_ext_pgs.first_pg_off);
+ len = m_epg_pagelen(m, 0, m->m_epg_1st_off);
pa += len;
- for (i = 1; i < m->m_ext_pgs.npgs; i++) {
+ for (i = 1; i < m->m_epg_npgs; i++) {
if (m->m_epg_pa[i] != pa)
break;
len += m_epg_pagelen(m, i, 0);
@@ -1669,7 +1669,7 @@ write_ktlstx_sgl(void *dst, struct mbuf *m, int nsegs)
#endif
j = -1;
- for (; i < m->m_ext_pgs.npgs; i++) {
+ for (; i < m->m_epg_npgs; i++) {
if (j == -1 || m->m_epg_pa[i] != pa) {
if (j >= 0)
usgl->sge[j / 2].len[j & 1] = htobe32(len);
@@ -1798,7 +1798,7 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
KASSERT(m->m_flags & M_NOMAP, ("%s: mbuf %p is not NOMAP",
__func__, m));
- KASSERT(m->m_ext_pgs.tls != NULL,
+ KASSERT(m->m_epg_tls != NULL,
("%s: mbuf %p doesn't have TLS session", __func__, m));
/* Calculate WR length. */
@@ -1867,19 +1867,19 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
thdr = (struct tls_hdr *)&m->m_epg_hdr;
#ifdef VERBOSE_TRACES
CTR5(KTR_CXGBE, "%s: tid %d TLS record %ju type %d len %#x",
- __func__, toep->tid, m->m_ext_pgs.seqno, thdr->type,
+ __func__, toep->tid, m->m_epg_seqno, thdr->type,
m->m_len);
#endif
txwr = wrtod(wr);
cpl = (struct cpl_tx_tls_sfo *)(txwr + 1);
memset(txwr, 0, roundup2(wr_len, 16));
credits = howmany(wr_len, 16);
- expn_size = m->m_ext_pgs.hdr_len +
- m->m_ext_pgs.trail_len;
+ expn_size = m->m_epg_hdrlen +
+ m->m_epg_trllen;
tls_size = m->m_len - expn_size;
write_tlstx_wr(txwr, toep, 0,
tls_size, expn_size, 1, credits, shove, 1);
- toep->tls.tx_seq_no = m->m_ext_pgs.seqno;
+ toep->tls.tx_seq_no = m->m_epg_seqno;
write_tlstx_cpl(cpl, toep, thdr, tls_size, 1);
tls_copy_tx_key(toep, cpl + 1);