aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Baldwin <jhb@FreeBSD.org>2021-03-26 22:05:31 +0000
committerJohn Baldwin <jhb@FreeBSD.org>2021-03-26 22:19:58 +0000
commit077ba6a845fab8f1d3bd83e07f61730f202a46fc (patch)
treecd3b27a3278f7a0b4bd704feb8452a448f711064
parent720dc6bcb5a8c4283802576e2ef54f42b33fa8d4 (diff)
downloadsrc-077ba6a845fab8f1d3bd83e07f61730f202a46fc.tar.gz
src-077ba6a845fab8f1d3bd83e07f61730f202a46fc.zip
cxgbe: Add a struct sge_ofld_txq type.
This type mirrors struct sge_ofld_rxq and holds state for TCP offload transmit queues. Currently it only holds a work queue but will include additional state in future changes. Reviewed by: np Sponsored by: Chelsio Communications Differential Revision: https://reviews.freebsd.org/D29382
-rw-r--r--sys/dev/cxgbe/adapter.h11
-rw-r--r--sys/dev/cxgbe/cxgbei/icl_cxgbei.c10
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/qp.c6
-rw-r--r--sys/dev/cxgbe/offload.h2
-rw-r--r--sys/dev/cxgbe/t4_main.c15
-rw-r--r--sys/dev/cxgbe/t4_sge.c75
-rw-r--r--sys/dev/cxgbe/tom/t4_cpl_io.c25
-rw-r--r--sys/dev/cxgbe/tom/t4_listen.c9
-rw-r--r--sys/dev/cxgbe/tom/t4_tls.c8
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.h6
10 files changed, 107 insertions, 60 deletions
diff --git a/sys/dev/cxgbe/adapter.h b/sys/dev/cxgbe/adapter.h
index 1a90560a55d8..ed36c1e546d3 100644
--- a/sys/dev/cxgbe/adapter.h
+++ b/sys/dev/cxgbe/adapter.h
@@ -677,8 +677,8 @@ struct wrq_cookie {
};
/*
- * wrq: SGE egress queue that is given prebuilt work requests. Both the control
- * and offload tx queues are of this type.
+ * wrq: SGE egress queue that is given prebuilt work requests. Control queues
+ * are of this type.
*/
struct sge_wrq {
struct sge_eq eq; /* MUST be first */
@@ -712,6 +712,11 @@ struct sge_wrq {
} __aligned(CACHE_LINE_SIZE);
+/* ofld_txq: SGE egress queue + miscellaneous items */
+struct sge_ofld_txq {
+ struct sge_wrq wrq;
+} __aligned(CACHE_LINE_SIZE);
+
#define INVALID_NM_RXQ_CNTXT_ID ((uint16_t)(-1))
struct sge_nm_rxq {
/* Items used by the driver rx ithread are in this cacheline. */
@@ -792,7 +797,7 @@ struct sge {
struct sge_wrq *ctrlq; /* Control queues */
struct sge_txq *txq; /* NIC tx queues */
struct sge_rxq *rxq; /* NIC rx queues */
- struct sge_wrq *ofld_txq; /* TOE tx queues */
+ struct sge_ofld_txq *ofld_txq; /* TOE tx queues */
struct sge_ofld_rxq *ofld_rxq; /* TOE rx queues */
struct sge_nm_txq *nm_txq; /* netmap tx queues */
struct sge_nm_rxq *nm_rxq; /* netmap rx queues */
diff --git a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
index 6292dfc8dc75..94963f13b601 100644
--- a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
+++ b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
@@ -551,7 +551,7 @@ send_iscsi_flowc_wr(struct adapter *sc, struct toepcb *toep, int maxlen)
flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
- wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq);
+ wr = alloc_wrqe(roundup2(flowclen, 16), &toep->ofld_txq->wrq);
if (wr == NULL) {
/* XXX */
panic("%s: allocation failure.", __func__);
@@ -843,8 +843,8 @@ no_ddp:
goto no_ddp;
}
- rc = t4_write_page_pods_for_buf(sc, toep->ofld_txq, toep->tid, prsv,
- (vm_offset_t)csio->data_ptr, csio->dxfer_len);
+ rc = t4_write_page_pods_for_buf(sc, &toep->ofld_txq->wrq, toep->tid,
+ prsv, (vm_offset_t)csio->data_ptr, csio->dxfer_len);
if (rc != 0) {
t4_free_page_pods(prsv);
uma_zfree(prsv_zone, prsv);
@@ -957,8 +957,8 @@ no_ddp:
goto no_ddp;
}
- rc = t4_write_page_pods_for_buf(sc, toep->ofld_txq, toep->tid,
- prsv, buf, xferlen);
+ rc = t4_write_page_pods_for_buf(sc, &toep->ofld_txq->wrq,
+ toep->tid, prsv, buf, xferlen);
if (rc != 0) {
t4_free_page_pods(prsv);
uma_zfree(prsv_zone, prsv);
diff --git a/sys/dev/cxgbe/iw_cxgbe/qp.c b/sys/dev/cxgbe/iw_cxgbe/qp.c
index 43acb246e7bc..f999254a748c 100644
--- a/sys/dev/cxgbe/iw_cxgbe/qp.c
+++ b/sys/dev/cxgbe/iw_cxgbe/qp.c
@@ -1127,7 +1127,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp,
qhp->wq.sq.qid, qhp->ep->hwtid);
- wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq);
+ wr = alloc_wrqe(sizeof(*wqe), &toep->ofld_txq->wrq);
if (wr == NULL)
return;
wqe = wrtod(wr);
@@ -1259,7 +1259,7 @@ rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, struct c4iw_ep *ep)
CTR5(KTR_IW_CXGBE, "%s qhp %p qid 0x%x ep %p tid %u", __func__, qhp,
qhp->wq.sq.qid, ep, ep->hwtid);
- wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq);
+ wr = alloc_wrqe(sizeof(*wqe), &toep->ofld_txq->wrq);
if (wr == NULL)
return (0);
wqe = wrtod(wr);
@@ -1353,7 +1353,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
CTR5(KTR_IW_CXGBE, "%s qhp %p qid 0x%x ep %p tid %u", __func__, qhp,
qhp->wq.sq.qid, ep, ep->hwtid);
- wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq);
+ wr = alloc_wrqe(sizeof(*wqe), &toep->ofld_txq->wrq);
if (wr == NULL)
return (0);
wqe = wrtod(wr);
diff --git a/sys/dev/cxgbe/offload.h b/sys/dev/cxgbe/offload.h
index 968902cb10da..e264882fb5b4 100644
--- a/sys/dev/cxgbe/offload.h
+++ b/sys/dev/cxgbe/offload.h
@@ -96,7 +96,7 @@ struct cxgbe_rate_tag {
int etid;
struct mbufq pending_tx, pending_fwack;
int plen;
- struct sge_wrq *eo_txq;
+ struct sge_ofld_txq *eo_txq;
uint32_t ctrl0;
uint16_t iqid;
int8_t schedcl;
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index cdfceb5573fd..5d06d4b55d8a 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -1334,7 +1334,7 @@ t4_attach(device_t dev)
s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi;
s->neq += s->nofldtxq;
- s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
+ s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_ofld_txq),
M_CXGBE, M_ZERO | M_WAITOK);
}
#endif
@@ -6103,7 +6103,7 @@ vi_full_uninit(struct vi_info *vi)
struct sge_ofld_rxq *ofld_rxq;
#endif
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
- struct sge_wrq *ofld_txq;
+ struct sge_ofld_txq *ofld_txq;
#endif
if (vi->flags & VI_INIT_DONE) {
@@ -6120,7 +6120,7 @@ vi_full_uninit(struct vi_info *vi)
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
for_each_ofld_txq(vi, i, ofld_txq) {
- quiesce_wrq(sc, ofld_txq);
+ quiesce_wrq(sc, &ofld_txq->wrq);
}
#endif
@@ -10672,6 +10672,9 @@ clear_stats(struct adapter *sc, u_int port_id)
struct sge_rxq *rxq;
struct sge_txq *txq;
struct sge_wrq *wrq;
+#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
+ struct sge_ofld_txq *ofld_txq;
+#endif
#ifdef TCP_OFFLOAD
struct sge_ofld_rxq *ofld_rxq;
#endif
@@ -10759,9 +10762,9 @@ clear_stats(struct adapter *sc, u_int port_id)
}
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
- for_each_ofld_txq(vi, i, wrq) {
- wrq->tx_wrs_direct = 0;
- wrq->tx_wrs_copied = 0;
+ for_each_ofld_txq(vi, i, ofld_txq) {
+ ofld_txq->wrq.tx_wrs_direct = 0;
+ ofld_txq->wrq.tx_wrs_copied = 0;
}
#endif
#ifdef TCP_OFFLOAD
diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c
index b0f5b272410a..1f2abcb81078 100644
--- a/sys/dev/cxgbe/t4_sge.c
+++ b/sys/dev/cxgbe/t4_sge.c
@@ -270,6 +270,11 @@ static int free_wrq(struct adapter *, struct sge_wrq *);
static int alloc_txq(struct vi_info *, struct sge_txq *, int,
struct sysctl_oid *);
static int free_txq(struct vi_info *, struct sge_txq *);
+#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
+static int alloc_ofld_txq(struct vi_info *, struct sge_ofld_txq *, int,
+ struct sysctl_oid *);
+static int free_ofld_txq(struct vi_info *, struct sge_ofld_txq *);
+#endif
static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int);
static inline void ring_fl_db(struct adapter *, struct sge_fl *);
static int refill_fl(struct adapter *, struct sge_fl *, int);
@@ -1109,7 +1114,7 @@ t4_setup_vi_queues(struct vi_info *vi)
struct sge_ofld_rxq *ofld_rxq;
#endif
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
- struct sge_wrq *ofld_txq;
+ struct sge_ofld_txq *ofld_txq;
#endif
#ifdef DEV_NETMAP
int saved_idx;
@@ -1228,26 +1233,20 @@ t4_setup_vi_queues(struct vi_info *vi)
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_txq",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "tx queues for TOE/ETHOFLD");
for_each_ofld_txq(vi, i, ofld_txq) {
- struct sysctl_oid *oid2;
-
snprintf(name, sizeof(name), "%s ofld_txq%d",
device_get_nameunit(vi->dev), i);
if (vi->nofldrxq > 0) {
iqidx = vi->first_ofld_rxq + (i % vi->nofldrxq);
- init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq,
+ init_eq(sc, &ofld_txq->wrq.eq, EQ_OFLD, vi->qsize_txq,
pi->tx_chan, sc->sge.ofld_rxq[iqidx].iq.cntxt_id,
name);
} else {
iqidx = vi->first_rxq + (i % vi->nrxq);
- init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq,
+ init_eq(sc, &ofld_txq->wrq.eq, EQ_OFLD, vi->qsize_txq,
pi->tx_chan, sc->sge.rxq[iqidx].iq.cntxt_id, name);
}
- snprintf(name, sizeof(name), "%d", i);
- oid2 = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
- name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "offload tx queue");
-
- rc = alloc_wrq(sc, vi, ofld_txq, oid2);
+ rc = alloc_ofld_txq(vi, ofld_txq, i, oid);
if (rc != 0)
goto done;
}
@@ -1269,9 +1268,7 @@ t4_teardown_vi_queues(struct vi_info *vi)
struct sge_rxq *rxq;
struct sge_txq *txq;
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
- struct port_info *pi = vi->pi;
- struct adapter *sc = pi->adapter;
- struct sge_wrq *ofld_txq;
+ struct sge_ofld_txq *ofld_txq;
#endif
#ifdef TCP_OFFLOAD
struct sge_ofld_rxq *ofld_rxq;
@@ -1309,7 +1306,7 @@ t4_teardown_vi_queues(struct vi_info *vi)
}
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
for_each_ofld_txq(vi, i, ofld_txq) {
- free_wrq(sc, ofld_txq);
+ free_ofld_txq(vi, ofld_txq);
}
#endif
@@ -4482,6 +4479,44 @@ free_txq(struct vi_info *vi, struct sge_txq *txq)
return (0);
}
+#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
+static int
+alloc_ofld_txq(struct vi_info *vi, struct sge_ofld_txq *ofld_txq, int idx,
+ struct sysctl_oid *oid)
+{
+ struct adapter *sc = vi->adapter;
+ struct sysctl_oid_list *children;
+ char name[16];
+ int rc;
+
+ children = SYSCTL_CHILDREN(oid);
+
+ snprintf(name, sizeof(name), "%d", idx);
+ oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name,
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "offload tx queue");
+
+ rc = alloc_wrq(sc, vi, &ofld_txq->wrq, oid);
+ if (rc != 0)
+ return (rc);
+
+ return (rc);
+}
+
+static int
+free_ofld_txq(struct vi_info *vi, struct sge_ofld_txq *ofld_txq)
+{
+ struct adapter *sc = vi->adapter;
+ int rc;
+
+ rc = free_wrq(sc, &ofld_txq->wrq);
+ if (rc != 0)
+ return (rc);
+
+ bzero(ofld_txq, sizeof(*ofld_txq));
+ return (0);
+}
+#endif
+
static void
oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
@@ -6108,7 +6143,7 @@ send_etid_flowc_wr(struct cxgbe_rate_tag *cst, struct port_info *pi,
MPASS((cst->flags & (EO_FLOWC_PENDING | EO_FLOWC_RPL_PENDING)) ==
EO_FLOWC_PENDING);
- flowc = start_wrq_wr(cst->eo_txq, ETID_FLOWC_LEN16, &cookie);
+ flowc = start_wrq_wr(&cst->eo_txq->wrq, ETID_FLOWC_LEN16, &cookie);
if (__predict_false(flowc == NULL))
return (ENOMEM);
@@ -6130,7 +6165,7 @@ send_etid_flowc_wr(struct cxgbe_rate_tag *cst, struct port_info *pi,
flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
flowc->mnemval[5].val = htobe32(cst->schedcl);
- commit_wrq_wr(cst->eo_txq, flowc, &cookie);
+ commit_wrq_wr(&cst->eo_txq->wrq, flowc, &cookie);
cst->flags &= ~EO_FLOWC_PENDING;
cst->flags |= EO_FLOWC_RPL_PENDING;
@@ -6150,7 +6185,7 @@ send_etid_flush_wr(struct cxgbe_rate_tag *cst)
mtx_assert(&cst->lock, MA_OWNED);
- flowc = start_wrq_wr(cst->eo_txq, ETID_FLUSH_LEN16, &cookie);
+ flowc = start_wrq_wr(&cst->eo_txq->wrq, ETID_FLUSH_LEN16, &cookie);
if (__predict_false(flowc == NULL))
CXGBE_UNIMPLEMENTED(__func__);
@@ -6160,7 +6195,7 @@ send_etid_flush_wr(struct cxgbe_rate_tag *cst)
flowc->flowid_len16 = htobe32(V_FW_WR_LEN16(ETID_FLUSH_LEN16) |
V_FW_WR_FLOWID(cst->etid));
- commit_wrq_wr(cst->eo_txq, flowc, &cookie);
+ commit_wrq_wr(&cst->eo_txq->wrq, flowc, &cookie);
cst->flags |= EO_FLUSH_RPL_PENDING;
MPASS(cst->tx_credits >= ETID_FLUSH_LEN16);
@@ -6345,7 +6380,7 @@ ethofld_tx(struct cxgbe_rate_tag *cst)
MPASS(cst->ncompl > 0);
return;
}
- wr = start_wrq_wr(cst->eo_txq, next_credits, &cookie);
+ wr = start_wrq_wr(&cst->eo_txq->wrq, next_credits, &cookie);
if (__predict_false(wr == NULL)) {
/* XXX: wishful thinking, not a real assertion. */
MPASS(cst->ncompl > 0);
@@ -6356,7 +6391,7 @@ ethofld_tx(struct cxgbe_rate_tag *cst)
compl = cst->ncompl == 0 || cst->tx_nocompl >= cst->tx_total / 2;
ETHER_BPF_MTAP(cst->com.ifp, m);
write_ethofld_wr(cst, wr, m, compl);
- commit_wrq_wr(cst->eo_txq, wr, &cookie);
+ commit_wrq_wr(&cst->eo_txq->wrq, wr, &cookie);
if (compl) {
cst->ncompl++;
cst->tx_nocompl = 0;
diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c
index f1d4ce6825cc..14a8181b57ef 100644
--- a/sys/dev/cxgbe/tom/t4_cpl_io.c
+++ b/sys/dev/cxgbe/tom/t4_cpl_io.c
@@ -108,7 +108,7 @@ send_flowc_wr(struct toepcb *toep, struct tcpcb *tp)
flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
- wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq);
+ wr = alloc_wrqe(roundup2(flowclen, 16), &toep->ofld_txq->wrq);
if (wr == NULL) {
/* XXX */
panic("%s: allocation failure.", __func__);
@@ -202,7 +202,8 @@ update_tx_rate_limit(struct adapter *sc, struct toepcb *toep, u_int Bps)
fw_flowc_mnemval);
flowclen16 = howmany(flowclen, 16);
if (toep->tx_credits < flowclen16 || toep->txsd_avail == 0 ||
- (wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq)) == NULL) {
+ (wr = alloc_wrqe(roundup2(flowclen, 16),
+ &toep->ofld_txq->wrq)) == NULL) {
if (tc_idx >= 0)
t4_release_cl_rl(sc, port_id, tc_idx);
return (ENOMEM);
@@ -266,7 +267,7 @@ send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt)
KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
("%s: flowc_wr not sent for tid %d.", __func__, tid));
- wr = alloc_wrqe(sizeof(*req), toep->ofld_txq);
+ wr = alloc_wrqe(sizeof(*req), &toep->ofld_txq->wrq);
if (wr == NULL) {
/* XXX */
panic("%s: allocation failure.", __func__);
@@ -491,7 +492,7 @@ t4_close_conn(struct adapter *sc, struct toepcb *toep)
KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
("%s: flowc_wr not sent for tid %u.", __func__, tid));
- wr = alloc_wrqe(sizeof(*req), toep->ofld_txq);
+ wr = alloc_wrqe(sizeof(*req), &toep->ofld_txq->wrq);
if (wr == NULL) {
/* XXX */
panic("%s: allocation failure.", __func__);
@@ -823,7 +824,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
/* Immediate data tx */
wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16),
- toep->ofld_txq);
+ &toep->ofld_txq->wrq);
if (wr == NULL) {
/* XXX: how will we recover from this? */
toep->flags |= TPF_TX_SUSPENDED;
@@ -841,7 +842,8 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) +
((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
- wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
+ wr = alloc_wrqe(roundup2(wr_len, 16),
+ &toep->ofld_txq->wrq);
if (wr == NULL) {
/* XXX: how will we recover from this? */
toep->flags |= TPF_TX_SUSPENDED;
@@ -1018,7 +1020,7 @@ t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop)
/* Immediate data tx */
wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16),
- toep->ofld_txq);
+ &toep->ofld_txq->wrq);
if (wr == NULL) {
/* XXX: how will we recover from this? */
toep->flags |= TPF_TX_SUSPENDED;
@@ -1036,7 +1038,8 @@ t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop)
/* DSGL tx */
wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) +
((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
- wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
+ wr = alloc_wrqe(roundup2(wr_len, 16),
+ &toep->ofld_txq->wrq);
if (wr == NULL) {
/* XXX: how will we recover from this? */
toep->flags |= TPF_TX_SUSPENDED;
@@ -1351,13 +1354,13 @@ done:
}
void
-send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid,
+send_abort_rpl(struct adapter *sc, struct sge_ofld_txq *ofld_txq, int tid,
int rst_status)
{
struct wrqe *wr;
struct cpl_abort_rpl *cpl;
- wr = alloc_wrqe(sizeof(*cpl), ofld_txq);
+ wr = alloc_wrqe(sizeof(*cpl), &ofld_txq->wrq);
if (wr == NULL) {
/* XXX */
panic("%s: allocation failure.", __func__);
@@ -1397,7 +1400,7 @@ do_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1);
unsigned int tid = GET_TID(cpl);
struct toepcb *toep = lookup_tid(sc, tid);
- struct sge_wrq *ofld_txq = toep->ofld_txq;
+ struct sge_ofld_txq *ofld_txq = toep->ofld_txq;
struct inpcb *inp;
struct tcpcb *tp;
struct epoch_tracker et;
diff --git a/sys/dev/cxgbe/tom/t4_listen.c b/sys/dev/cxgbe/tom/t4_listen.c
index 51de83643253..9cf527925fcc 100644
--- a/sys/dev/cxgbe/tom/t4_listen.c
+++ b/sys/dev/cxgbe/tom/t4_listen.c
@@ -350,7 +350,7 @@ send_flowc_wr_synqe(struct adapter *sc, struct synq_entry *synqe)
struct port_info *pi = vi->pi;
struct wrqe *wr;
struct fw_flowc_wr *flowc;
- struct sge_wrq *ofld_txq;
+ struct sge_ofld_txq *ofld_txq;
struct sge_ofld_rxq *ofld_rxq;
const int nparams = 6;
const int flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
@@ -362,7 +362,7 @@ send_flowc_wr_synqe(struct adapter *sc, struct synq_entry *synqe)
ofld_txq = &sc->sge.ofld_txq[synqe->params.txq_idx];
ofld_rxq = &sc->sge.ofld_rxq[synqe->params.rxq_idx];
- wr = alloc_wrqe(roundup2(flowclen, 16), ofld_txq);
+ wr = alloc_wrqe(roundup2(flowclen, 16), &ofld_txq->wrq);
if (wr == NULL) {
/* XXX */
panic("%s: allocation failure.", __func__);
@@ -411,7 +411,8 @@ send_abort_rpl_synqe(struct toedev *tod, struct synq_entry *synqe,
if (!(synqe->flags & TPF_FLOWC_WR_SENT))
send_flowc_wr_synqe(sc, synqe);
- wr = alloc_wrqe(sizeof(*req), &sc->sge.ofld_txq[synqe->params.txq_idx]);
+ wr = alloc_wrqe(sizeof(*req),
+ &sc->sge.ofld_txq[synqe->params.txq_idx].wrq);
if (wr == NULL) {
/* XXX */
panic("%s: allocation failure.", __func__);
@@ -885,7 +886,7 @@ do_abort_req_synqe(struct sge_iq *iq, const struct rss_header *rss,
struct synq_entry *synqe = lookup_tid(sc, tid);
struct listen_ctx *lctx = synqe->lctx;
struct inpcb *inp = lctx->inp;
- struct sge_wrq *ofld_txq;
+ struct sge_ofld_txq *ofld_txq;
#ifdef INVARIANTS
unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
#endif
diff --git a/sys/dev/cxgbe/tom/t4_tls.c b/sys/dev/cxgbe/tom/t4_tls.c
index 4016a4f1995a..fff42386fab7 100644
--- a/sys/dev/cxgbe/tom/t4_tls.c
+++ b/sys/dev/cxgbe/tom/t4_tls.c
@@ -70,7 +70,7 @@ t4_set_tls_tcb_field(struct toepcb *toep, uint16_t word, uint64_t mask,
{
struct adapter *sc = td_adapter(toep->td);
- t4_set_tcb_field(sc, toep->ofld_txq, toep, word, mask, val, 0, 0);
+ t4_set_tcb_field(sc, &toep->ofld_txq->wrq, toep, word, mask, val, 0, 0);
}
/* TLS and DTLS common routines */
@@ -518,7 +518,7 @@ tls_program_key_id(struct toepcb *toep, struct tls_key_context *k_ctx)
keyid = get_keyid(tls_ofld, k_ctx->l_p_key);
}
- wr = alloc_wrqe(len, toep->ofld_txq);
+ wr = alloc_wrqe(len, &toep->ofld_txq->wrq);
if (wr == NULL) {
free_keyid(toep, keyid);
return (ENOMEM);
@@ -1596,7 +1596,7 @@ t4_push_tls_records(struct adapter *sc, struct toepcb *toep, int drop)
((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
}
- wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
+ wr = alloc_wrqe(roundup2(wr_len, 16), &toep->ofld_txq->wrq);
if (wr == NULL) {
/* XXX: how will we recover from this? */
toep->flags |= TPF_TX_SUSPENDED;
@@ -1907,7 +1907,7 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
if (__predict_false(toep->flags & TPF_FIN_SENT))
panic("%s: excess tx.", __func__);
- wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
+ wr = alloc_wrqe(roundup2(wr_len, 16), &toep->ofld_txq->wrq);
if (wr == NULL) {
/* XXX: how will we recover from this? */
toep->flags |= TPF_TX_SUSPENDED;
diff --git a/sys/dev/cxgbe/tom/t4_tom.h b/sys/dev/cxgbe/tom/t4_tom.h
index 41187aa27e2d..628857cfae17 100644
--- a/sys/dev/cxgbe/tom/t4_tom.h
+++ b/sys/dev/cxgbe/tom/t4_tom.h
@@ -185,7 +185,7 @@ struct toepcb {
int refcount;
struct vnet *vnet;
struct vi_info *vi; /* virtual interface */
- struct sge_wrq *ofld_txq;
+ struct sge_ofld_txq *ofld_txq;
struct sge_ofld_rxq *ofld_rxq;
struct sge_wrq *ctrlq;
struct l2t_entry *l2te; /* L2 table entry used by this connection */
@@ -396,7 +396,7 @@ void aiotx_init_toep(struct toepcb *);
int t4_aio_queue_aiotx(struct socket *, struct kaiocb *);
void t4_init_cpl_io_handlers(void);
void t4_uninit_cpl_io_handlers(void);
-void send_abort_rpl(struct adapter *, struct sge_wrq *, int , int);
+void send_abort_rpl(struct adapter *, struct sge_ofld_txq *, int , int);
void send_flowc_wr(struct toepcb *, struct tcpcb *);
void send_reset(struct adapter *, struct toepcb *, uint32_t);
int send_rx_credits(struct adapter *, struct toepcb *, int);
@@ -422,7 +422,7 @@ int t4_alloc_page_pods_for_buf(struct ppod_region *, vm_offset_t, int,
struct ppod_reservation *);
int t4_write_page_pods_for_ps(struct adapter *, struct sge_wrq *, int,
struct pageset *);
-int t4_write_page_pods_for_buf(struct adapter *, struct sge_wrq *, int tid,
+int t4_write_page_pods_for_buf(struct adapter *, struct sge_wrq *, int,
struct ppod_reservation *, vm_offset_t, int);
void t4_free_page_pods(struct ppod_reservation *);
int t4_soreceive_ddp(struct socket *, struct sockaddr **, struct uio *,