diff options
Diffstat (limited to 'sys/dev/cxgbe/t4_sge.c')
-rw-r--r-- | sys/dev/cxgbe/t4_sge.c | 209 |
1 files changed, 155 insertions, 54 deletions
diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c index 86454bc4fe10..2f9cb1a4ebb5 100644 --- a/sys/dev/cxgbe/t4_sge.c +++ b/sys/dev/cxgbe/t4_sge.c @@ -1,8 +1,7 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * - * Copyright (c) 2011 Chelsio Communications, Inc. - * All rights reserved. + * Copyright (c) 2011, 2025 Chelsio Communications. * Written by: Navdeep Parhar <np@FreeBSD.org> * * Redistribution and use in source and binary forms, with or without @@ -259,17 +258,20 @@ static void free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *); static void add_ofld_rxq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *, struct sge_ofld_rxq *); #endif -static int ctrl_eq_alloc(struct adapter *, struct sge_eq *); -static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); +static int ctrl_eq_alloc(struct adapter *, struct sge_eq *, int); +static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *, + int); #if defined(TCP_OFFLOAD) || defined(RATELIMIT) -static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); +static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *, + int); #endif static int alloc_eq(struct adapter *, struct sge_eq *, struct sysctl_ctx_list *, struct sysctl_oid *); static void free_eq(struct adapter *, struct sge_eq *); static void add_eq_sysctls(struct adapter *, struct sysctl_ctx_list *, struct sysctl_oid *, struct sge_eq *); -static int alloc_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *); +static int alloc_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *, + int); static int free_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *); static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *, struct sysctl_ctx_list *, struct sysctl_oid *); @@ -348,6 +350,7 @@ cpl_handler_t l2t_write_rpl_handlers[NUM_CPL_COOKIES]; cpl_handler_t act_open_rpl_handlers[NUM_CPL_COOKIES]; cpl_handler_t abort_rpl_rss_handlers[NUM_CPL_COOKIES]; cpl_handler_t fw4_ack_handlers[NUM_CPL_COOKIES]; +cpl_handler_t fw6_pld_handlers[NUM_CPL_FW6_COOKIES]; void t4_register_an_handler(an_handler_t h) @@ -477,6 +480,21 @@ fw4_ack_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) return (fw4_ack_handlers[cookie](iq, rss, m)); } +static int +fw6_pld_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) +{ + const struct cpl_fw6_pld *cpl; + uint64_t cookie; + + if (m != NULL) + cpl = mtod(m, const void *); + else + cpl = (const void *)(rss + 1); + cookie = be64toh(cpl->data[1]) & CPL_FW6_COOKIE_MASK; + + return (fw6_pld_handlers[cookie](iq, rss, m)); +} + static void t4_init_shared_cpl_handlers(void) { @@ -486,6 +504,7 @@ t4_init_shared_cpl_handlers(void) t4_register_cpl_handler(CPL_ACT_OPEN_RPL, act_open_rpl_handler); t4_register_cpl_handler(CPL_ABORT_RPL_RSS, abort_rpl_rss_handler); t4_register_cpl_handler(CPL_FW4_ACK, fw4_ack_handler); + t4_register_cpl_handler(CPL_FW6_PLD, fw6_pld_handler); } void @@ -494,8 +513,12 @@ t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie) uintptr_t *loc; MPASS(opcode < nitems(t4_cpl_handler)); - MPASS(cookie > CPL_COOKIE_RESERVED); - MPASS(cookie < NUM_CPL_COOKIES); + if (opcode == CPL_FW6_PLD) { + MPASS(cookie < NUM_CPL_FW6_COOKIES); + } else { + MPASS(cookie > CPL_COOKIE_RESERVED); + MPASS(cookie < NUM_CPL_COOKIES); + } MPASS(t4_cpl_handler[opcode] != NULL); switch (opcode) { @@ -514,6 +537,9 @@ t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie) case CPL_FW4_ACK: loc = (uintptr_t *)&fw4_ack_handlers[cookie]; break; + case CPL_FW6_PLD: + loc = (uintptr_t *)&fw6_pld_handlers[cookie]; + break; default: MPASS(0); return; @@ -1064,9 +1090,9 @@ t4_setup_adapter_queues(struct adapter *sc) */ /* - * Control queues, one per port. + * Control queues. At least one per port and per internal core. */ - for_each_port(sc, i) { + for (i = 0; i < sc->sge.nctrlq; i++) { rc = alloc_ctrlq(sc, i); if (rc != 0) return (rc); @@ -1087,7 +1113,7 @@ t4_teardown_adapter_queues(struct adapter *sc) if (sc->sge.ctrlq != NULL) { MPASS(!(sc->flags & IS_VF)); /* VFs don't allocate ctrlq. */ - for_each_port(sc, i) + for (i = 0; i < sc->sge.nctrlq; i++) free_ctrlq(sc, i); } free_fwq(sc); @@ -2701,9 +2727,14 @@ restart: #endif #ifdef KERN_TLS if (mst != NULL && mst->sw->type == IF_SND_TAG_TYPE_TLS) { + struct vi_info *vi = if_getsoftc(mst->ifp); + cflags |= MC_TLS; set_mbuf_cflags(m0, cflags); - rc = t6_ktls_parse_pkt(m0); + if (is_t6(vi->pi->adapter)) + rc = t6_ktls_parse_pkt(m0); + else + rc = t7_ktls_parse_pkt(m0); if (rc != 0) goto fail; return (EINPROGRESS); @@ -3273,7 +3304,10 @@ skip_coalescing: #ifdef KERN_TLS } else if (mbuf_cflags(m0) & MC_TLS) { ETHER_BPF_MTAP(ifp, m0); - n = t6_ktls_write_wr(txq, wr, m0, avail); + if (is_t6(sc)) + n = t6_ktls_write_wr(txq, wr, m0, avail); + else + n = t7_ktls_write_wr(txq, wr, m0, avail); #endif } else { ETHER_BPF_MTAP(ifp, m0); @@ -3414,6 +3448,7 @@ init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize, eq->type = eqtype; eq->port_id = port_id; eq->tx_chan = sc->port[port_id]->tx_chan; + eq->hw_port = sc->port[port_id]->hw_port; eq->iq = iq; eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; strlcpy(eq->lockname, name, sizeof(eq->lockname)); @@ -3577,7 +3612,7 @@ alloc_iq_fl_hwq(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl) V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | V_FW_IQ_CMD_VIID(vi->viid) | V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); - c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | + c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->hw_port) | F_FW_IQ_CMD_IQGTSMODE | V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); @@ -3585,7 +3620,13 @@ alloc_iq_fl_hwq(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl) c.iqaddr = htobe64(iq->ba); c.iqns_to_fl0congen = htobe32(V_FW_IQ_CMD_IQTYPE(iq->qtype)); if (iq->cong_drop != -1) { - cong_map = iq->qtype == IQ_ETH ? pi->rx_e_chan_map : 0; + if (iq->qtype == IQ_ETH) { + if (chip_id(sc) >= CHELSIO_T7) + cong_map = 1 << pi->hw_port; + else + cong_map = pi->rx_e_chan_map; + } else + cong_map = 0; c.iqns_to_fl0congen |= htobe32(F_FW_IQ_CMD_IQFLINTCONGEN); } @@ -3842,7 +3883,7 @@ alloc_ctrlq(struct adapter *sc, int idx) struct sysctl_oid *oid; struct sge_wrq *ctrlq = &sc->sge.ctrlq[idx]; - MPASS(idx < sc->params.nports); + MPASS(idx < sc->sge.nctrlq); if (!(ctrlq->eq.flags & EQ_SW_ALLOCATED)) { MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED)); @@ -3854,8 +3895,8 @@ alloc_ctrlq(struct adapter *sc, int idx) snprintf(name, sizeof(name), "%s ctrlq%d", device_get_nameunit(sc->dev), idx); - init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, idx, - &sc->sge.fwq, name); + init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, + idx % sc->params.nports, &sc->sge.fwq, name); rc = alloc_wrq(sc, NULL, ctrlq, &sc->ctx, oid); if (rc != 0) { CH_ERR(sc, "failed to allocate ctrlq%d: %d\n", idx, rc); @@ -3870,7 +3911,7 @@ alloc_ctrlq(struct adapter *sc, int idx) MPASS(ctrlq->nwr_pending == 0); MPASS(ctrlq->ndesc_needed == 0); - rc = alloc_eq_hwq(sc, NULL, &ctrlq->eq); + rc = alloc_eq_hwq(sc, NULL, &ctrlq->eq, idx); if (rc != 0) { CH_ERR(sc, "failed to create hw ctrlq%d: %d\n", idx, rc); return (rc); @@ -3938,14 +3979,19 @@ t4_sge_set_conm_context(struct adapter *sc, int cntxt_id, int cong_drop, param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | V_FW_PARAMS_PARAM_YZ(cntxt_id); - val = V_CONMCTXT_CNGTPMODE(cong_mode); - if (cong_mode == X_CONMCTXT_CNGTPMODE_CHANNEL || - cong_mode == X_CONMCTXT_CNGTPMODE_BOTH) { - for (i = 0, ch_map = 0; i < 4; i++) { - if (cong_map & (1 << i)) - ch_map |= 1 << (i << cng_ch_bits_log); + if (chip_id(sc) >= CHELSIO_T7) { + val = V_T7_DMAQ_CONM_CTXT_CNGTPMODE(cong_mode) | + V_T7_DMAQ_CONM_CTXT_CH_VEC(cong_map); + } else { + val = V_CONMCTXT_CNGTPMODE(cong_mode); + if (cong_mode == X_CONMCTXT_CNGTPMODE_CHANNEL || + cong_mode == X_CONMCTXT_CNGTPMODE_BOTH) { + for (i = 0, ch_map = 0; i < 4; i++) { + if (cong_map & (1 << i)) + ch_map |= 1 << (i << cng_ch_bits_log); + } + val |= V_CONMCTXT_CNGCHMAP(ch_map); } - val |= V_CONMCTXT_CNGCHMAP(ch_map); } rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc != 0) { @@ -4253,24 +4299,26 @@ qsize_to_fthresh(int qsize) } static int -ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq) +ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq, int idx) { - int rc, cntxt_id; + int rc, cntxt_id, core; struct fw_eq_ctrl_cmd c; int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; + core = sc->params.tid_qid_sel_mask != 0 ? idx % sc->params.ncores : 0; bzero(&c, sizeof(c)); c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | V_FW_EQ_CTRL_CMD_VFN(0)); c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC | + V_FW_EQ_CTRL_CMD_COREGROUP(core) | F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); c.physeqid_pkd = htobe32(0); c.fetchszm_to_iqid = htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | - V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) | + V_FW_EQ_CTRL_CMD_PCIECHN(eq->hw_port) | F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); c.dcaen_to_eqsize = htobe32(V_FW_EQ_CTRL_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? @@ -4282,8 +4330,8 @@ ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq) rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); if (rc != 0) { - CH_ERR(sc, "failed to create hw ctrlq for tx_chan %d: %d\n", - eq->tx_chan, rc); + CH_ERR(sc, "failed to create hw ctrlq for port %d: %d\n", + eq->port_id, rc); return (rc); } @@ -4299,24 +4347,26 @@ ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq) } static int -eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) +eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq, int idx) { - int rc, cntxt_id; + int rc, cntxt_id, core; struct fw_eq_eth_cmd c; int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; + core = sc->params.ncores > 1 ? idx % sc->params.ncores : 0; bzero(&c, sizeof(c)); c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | V_FW_EQ_ETH_CMD_VFN(0)); c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | + V_FW_EQ_ETH_CMD_COREGROUP(core) | F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); c.fetchszm_to_iqid = htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | - V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | + V_FW_EQ_ETH_CMD_PCIECHN(eq->hw_port) | F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(eq->iqid)); c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? @@ -4344,23 +4394,44 @@ eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) } #if defined(TCP_OFFLOAD) || defined(RATELIMIT) +/* + * ncores number of uP cores. + * nq number of queues for this VI + * idx queue index + */ +static inline int +qidx_to_core(int ncores, int nq, int idx) +{ + MPASS(nq % ncores == 0); + MPASS(idx >= 0 && idx < nq); + + return (idx * ncores / nq); +} + static int -ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) +ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq, + int idx) { - int rc, cntxt_id; + int rc, cntxt_id, core; struct fw_eq_ofld_cmd c; int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; + if (sc->params.tid_qid_sel_mask != 0) + core = qidx_to_core(sc->params.ncores, vi->nofldtxq, idx); + else + core = 0; + bzero(&c, sizeof(c)); c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) | V_FW_EQ_OFLD_CMD_VFN(0)); c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC | + V_FW_EQ_OFLD_CMD_COREGROUP(core) | F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); c.fetchszm_to_iqid = htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | - V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) | + V_FW_EQ_OFLD_CMD_PCIECHN(eq->hw_port) | F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid)); c.dcaen_to_eqsize = htobe32(V_FW_EQ_OFLD_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? @@ -4449,7 +4520,7 @@ add_eq_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, } static int -alloc_eq_hwq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) +alloc_eq_hwq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq, int idx) { int rc; @@ -4464,16 +4535,16 @@ alloc_eq_hwq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) switch (eq->type) { case EQ_CTRL: - rc = ctrl_eq_alloc(sc, eq); + rc = ctrl_eq_alloc(sc, eq, idx); break; case EQ_ETH: - rc = eth_eq_alloc(sc, vi, eq); + rc = eth_eq_alloc(sc, vi, eq, idx); break; #if defined(TCP_OFFLOAD) || defined(RATELIMIT) case EQ_OFLD: - rc = ofld_eq_alloc(sc, vi, eq); + rc = ofld_eq_alloc(sc, vi, eq, idx); break; #endif @@ -4653,7 +4724,7 @@ failed: if (!(eq->flags & EQ_HW_ALLOCATED)) { MPASS(eq->flags & EQ_SW_ALLOCATED); - rc = alloc_eq_hwq(sc, vi, eq); + rc = alloc_eq_hwq(sc, vi, eq, idx); if (rc != 0) { CH_ERR(vi, "failed to create hw txq%d: %d\n", idx, rc); return (rc); @@ -4678,10 +4749,10 @@ failed: if (vi->flags & TX_USES_VM_WR) txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | - V_TXPKT_INTF(pi->tx_chan)); + V_TXPKT_INTF(pi->hw_port)); else txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | - V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | + V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) | V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); txq->tc_idx = -1; @@ -4788,18 +4859,46 @@ add_txq_sysctls(struct vi_info *vi, struct sysctl_ctx_list *ctx, SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_waste", CTLFLAG_RD, &txq->kern_tls_waste, "# of octets DMAd but not transmitted in NIC TLS records"); - SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_options", - CTLFLAG_RD, &txq->kern_tls_options, - "# of NIC TLS options-only packets transmitted"); SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_header", CTLFLAG_RD, &txq->kern_tls_header, "# of NIC TLS header-only packets transmitted"); - SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_fin", - CTLFLAG_RD, &txq->kern_tls_fin, - "# of NIC TLS FIN-only packets transmitted"); SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_fin_short", CTLFLAG_RD, &txq->kern_tls_fin_short, "# of NIC TLS padded FIN packets on short TLS records"); + if (is_t6(sc)) { + SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, + "kern_tls_options", CTLFLAG_RD, + &txq->kern_tls_options, + "# of NIC TLS options-only packets transmitted"); + SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, + "kern_tls_fin", CTLFLAG_RD, &txq->kern_tls_fin, + "# of NIC TLS FIN-only packets transmitted"); + } else { + SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, + "kern_tls_ghash_received", CTLFLAG_RD, + &txq->kern_tls_ghash_received, + "# of NIC TLS GHASHes received"); + SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, + "kern_tls_ghash_requested", CTLFLAG_RD, + &txq->kern_tls_ghash_requested, + "# of NIC TLS GHASHes requested"); + SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, + "kern_tls_lso", CTLFLAG_RD, + &txq->kern_tls_lso, + "# of NIC TLS records transmitted using LSO"); + SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, + "kern_tls_partial_ghash", CTLFLAG_RD, + &txq->kern_tls_partial_ghash, + "# of NIC TLS records encrypted using a partial GHASH"); + SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, + "kern_tls_splitmode", CTLFLAG_RD, + &txq->kern_tls_splitmode, + "# of NIC TLS records using SplitMode"); + SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, + "kern_tls_trailer", CTLFLAG_RD, + &txq->kern_tls_trailer, + "# of NIC TLS trailer-only packets transmitted"); + } SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_cbc", CTLFLAG_RD, &txq->kern_tls_cbc, "# of NIC TLS sessions using AES-CBC"); @@ -4869,7 +4968,7 @@ alloc_ofld_txq(struct vi_info *vi, struct sge_ofld_txq *ofld_txq, int idx) MPASS(eq->flags & EQ_SW_ALLOCATED); MPASS(ofld_txq->wrq.nwr_pending == 0); MPASS(ofld_txq->wrq.ndesc_needed == 0); - rc = alloc_eq_hwq(sc, vi, eq); + rc = alloc_eq_hwq(sc, vi, eq, idx); if (rc != 0) { CH_ERR(vi, "failed to create hw ofld_txq%d: %d\n", idx, rc); @@ -5418,7 +5517,8 @@ write_tnl_lso_cpl(void *cpl, struct mbuf *m0) m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen + m0->m_pkthdr.l5hlen) | V_CPL_TX_TNL_LSO_TNLTYPE(TX_TNL_TYPE_VXLAN)); - tnl_lso->r1 = 0; + tnl_lso->ipsecen_to_rocev2 = 0; + tnl_lso->roce_eth = 0; /* Inner headers. */ ctrl = V_CPL_TX_TNL_LSO_ETHHDRLEN( @@ -6583,10 +6683,11 @@ send_etid_flowc_wr(struct cxgbe_rate_tag *cst, struct port_info *pi, V_FW_WR_FLOWID(cst->etid)); flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; flowc->mnemval[0].val = htobe32(pfvf); + /* Firmware expects hw port and will translate to channel itself. */ flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; - flowc->mnemval[1].val = htobe32(pi->tx_chan); + flowc->mnemval[1].val = htobe32(pi->hw_port); flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; - flowc->mnemval[2].val = htobe32(pi->tx_chan); + flowc->mnemval[2].val = htobe32(pi->hw_port); flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; flowc->mnemval[3].val = htobe32(cst->iqid); flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_EOSTATE; |