aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNavdeep Parhar <np@FreeBSD.org>2021-06-22 05:07:56 +0000
committerNavdeep Parhar <np@FreeBSD.org>2021-06-25 23:04:09 +0000
commit6beb67c7e0ad4c3f8277ed1122ef5efcde0a269c (patch)
tree2274aec02d563eebea16381d11cb4e67e193d5da
parent68c4481aac28f5a088553b9c31579f6fbc8500fd (diff)
downloadsrc-6beb67c7e0ad4c3f8277ed1122ef5efcde0a269c.tar.gz
src-6beb67c7e0ad4c3f8277ed1122ef5efcde0a269c.zip
cxgbe(4): Get the number of usable traffic classes from the firmware.
Recent firmwares are able to utilize the traffic classes of tx channels that were previously unused. This effectively doubles the number of traffic classes available per port for 2 port cards. Stop using the raw per-channel value in the driver and ask the firmware for the number of usable traffic classes instead. MFC after: 2 weeks Sponsored by: Chelsio Communications
-rw-r--r--sys/dev/cxgbe/common/common.h1
-rw-r--r--sys/dev/cxgbe/t4_main.c12
-rw-r--r--sys/dev/cxgbe/t4_sched.c28
-rw-r--r--sys/dev/cxgbe/tom/t4_cpl_io.c4
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.c11
5 files changed, 32 insertions, 24 deletions
diff --git a/sys/dev/cxgbe/common/common.h b/sys/dev/cxgbe/common/common.h
index b803a7106a0c..c132cb779204 100644
--- a/sys/dev/cxgbe/common/common.h
+++ b/sys/dev/cxgbe/common/common.h
@@ -408,6 +408,7 @@ struct adapter_params {
bool dev_512sgl_mr; /* FW support for 512 SGL per FR MR */
bool viid_smt_extn_support; /* FW returns vin, vfvld & smt index? */
unsigned int max_pkts_per_eth_tx_pkts_wr;
+ uint8_t nsched_cls; /* # of usable sched classes per port */
};
#define CHELSIO_T4 0x4
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index c5a3e6dd200e..4c69a152963a 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -5236,6 +5236,14 @@ get_params__post_init(struct adapter *sc)
else
sc->params.max_pkts_per_eth_tx_pkts_wr = 15;
+ param[0] = FW_PARAM_DEV(NUM_TM_CLASS);
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
+ if (rc == 0) {
+ MPASS(val[0] > 0 && val[0] < 256); /* nsched_cls is 8b */
+ sc->params.nsched_cls = val[0];
+ } else
+ sc->params.nsched_cls = sc->chip_params->nsched_cls;
+
/* get capabilites */
bzero(&caps, sizeof(caps));
caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
@@ -7851,7 +7859,7 @@ cxgbe_sysctls(struct port_info *pi)
SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "burstsize",
CTLFLAG_RW, &pi->sched_params->burstsize, 0,
"burstsize for per-flow cl-rl (0 means up to the driver)");
- for (i = 0; i < sc->chip_params->nsched_cls; i++) {
+ for (i = 0; i < sc->params.nsched_cls; i++) {
struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i];
snprintf(name, sizeof(name), "%d", i);
@@ -11675,7 +11683,7 @@ error:
if ((s->offload != 0 && s->offload != 1) ||
s->cong_algo < -1 || s->cong_algo > CONG_ALG_HIGHSPEED ||
s->sched_class < -1 ||
- s->sched_class >= sc->chip_params->nsched_cls) {
+ s->sched_class >= sc->params.nsched_cls) {
rc = EINVAL;
goto error;
}
diff --git a/sys/dev/cxgbe/t4_sched.c b/sys/dev/cxgbe/t4_sched.c
index b320ff4a7c68..827add3c27ec 100644
--- a/sys/dev/cxgbe/t4_sched.c
+++ b/sys/dev/cxgbe/t4_sched.c
@@ -171,7 +171,7 @@ set_sched_class_params(struct adapter *sc, struct t4_sched_class_params *p,
*/
if (p->cl < 0)
return (EINVAL);
- if (!in_range(p->cl, 0, sc->chip_params->nsched_cls - 1))
+ if (!in_range(p->cl, 0, sc->params.nsched_cls - 1))
return (ERANGE);
}
@@ -243,7 +243,7 @@ update_tx_sched(void *context, int pending)
struct port_info *pi;
struct tx_cl_rl_params *tc;
struct adapter *sc = context;
- const int n = sc->chip_params->nsched_cls;
+ const int n = sc->params.nsched_cls;
mtx_lock(&sc->tc_lock);
for_each_port(sc, i) {
@@ -373,7 +373,7 @@ bind_txq_to_traffic_class(struct adapter *sc, struct sge_txq *txq, int idx)
txq->tc_idx = old_idx;
}
done:
- MPASS(txq->tc_idx >= -1 && txq->tc_idx < sc->chip_params->nsched_cls);
+ MPASS(txq->tc_idx >= -1 && txq->tc_idx < sc->params.nsched_cls);
mtx_unlock(&sc->tc_lock);
return (rc);
}
@@ -402,7 +402,7 @@ t4_set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
MPASS(vi->ntxq > 0);
if (!in_range(p->queue, 0, vi->ntxq - 1) ||
- !in_range(p->cl, 0, sc->chip_params->nsched_cls - 1))
+ !in_range(p->cl, 0, sc->params.nsched_cls - 1))
return (EINVAL);
if (p->queue < 0) {
@@ -431,7 +431,7 @@ int
t4_init_tx_sched(struct adapter *sc)
{
int i, j;
- const int n = sc->chip_params->nsched_cls;
+ const int n = sc->params.nsched_cls;
struct port_info *pi;
struct tx_cl_rl_params *tc;
@@ -507,7 +507,7 @@ t4_reserve_cl_rl_kbps(struct adapter *sc, int port_id, u_int maxrate,
update = false;
mtx_lock(&sc->tc_lock);
- for (i = 0; i < sc->chip_params->nsched_cls; i++, tc++) {
+ for (i = 0; i < sc->params.nsched_cls; i++, tc++) {
if (fa < 0 && tc->refcount == 0 && !(tc->flags & CLRL_USER))
fa = i; /* first available */
@@ -526,7 +526,7 @@ t4_reserve_cl_rl_kbps(struct adapter *sc, int port_id, u_int maxrate,
}
}
/* Not found */
- MPASS(i == sc->chip_params->nsched_cls);
+ MPASS(i == sc->params.nsched_cls);
if (fa != -1) {
tc = &pi->sched_params->cl_rl[fa];
tc->refcount = 1;
@@ -557,7 +557,7 @@ t4_release_cl_rl(struct adapter *sc, int port_id, int tc_idx)
struct tx_cl_rl_params *tc;
MPASS(port_id >= 0 && port_id < sc->params.nports);
- MPASS(tc_idx >= 0 && tc_idx < sc->chip_params->nsched_cls);
+ MPASS(tc_idx >= 0 && tc_idx < sc->params.nsched_cls);
mtx_lock(&sc->tc_lock);
tc = &sc->port[port_id]->sched_params->cl_rl[tc_idx];
@@ -584,7 +584,7 @@ sysctl_tc(SYSCTL_HANDLER_ARGS)
if (sc->flags & IS_VF)
return (EPERM);
- if (!in_range(tc_idx, 0, sc->chip_params->nsched_cls - 1))
+ if (!in_range(tc_idx, 0, sc->params.nsched_cls - 1))
return (EINVAL);
return (bind_txq_to_traffic_class(sc, txq, tc_idx));
@@ -610,7 +610,7 @@ sysctl_tc_params(SYSCTL_HANDLER_ARGS)
MPASS(port_id < sc->params.nports);
MPASS(sc->port[port_id] != NULL);
i = arg2 & 0xffff;
- MPASS(i < sc->chip_params->nsched_cls);
+ MPASS(i < sc->params.nsched_cls);
mtx_lock(&sc->tc_lock);
tc = sc->port[port_id]->sched_params->cl_rl[i];
@@ -772,7 +772,7 @@ cxgbe_rate_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
(params->rate_limit.max_rate * 8ULL / 1000), &schedcl);
if (rc != 0)
return (rc);
- MPASS(schedcl >= 0 && schedcl < sc->chip_params->nsched_cls);
+ MPASS(schedcl >= 0 && schedcl < sc->params.nsched_cls);
cst = malloc(sizeof(*cst), M_CXGBE, M_ZERO | M_NOWAIT);
if (cst == NULL) {
@@ -823,7 +823,7 @@ cxgbe_rate_tag_modify(struct m_snd_tag *mst,
struct adapter *sc = cst->adapter;
/* XXX: is schedcl -1 ok here? */
- MPASS(cst->schedcl >= 0 && cst->schedcl < sc->chip_params->nsched_cls);
+ MPASS(cst->schedcl >= 0 && cst->schedcl < sc->params.nsched_cls);
mtx_lock(&cst->lock);
MPASS(cst->flags & EO_SND_TAG_REF);
@@ -831,7 +831,7 @@ cxgbe_rate_tag_modify(struct m_snd_tag *mst,
(params->rate_limit.max_rate * 8ULL / 1000), &schedcl);
if (rc != 0)
return (rc);
- MPASS(schedcl >= 0 && schedcl < sc->chip_params->nsched_cls);
+ MPASS(schedcl >= 0 && schedcl < sc->params.nsched_cls);
t4_release_cl_rl(sc, cst->port_id, cst->schedcl);
cst->schedcl = schedcl;
cst->max_rate = params->rate_limit.max_rate;
@@ -919,7 +919,7 @@ cxgbe_ratelimit_query(struct ifnet *ifp, struct if_ratelimit_query_results *q)
* the card's cclk.
*/
q->max_flows = sc->tids.netids;
- q->number_of_rates = sc->chip_params->nsched_cls;
+ q->number_of_rates = sc->params.nsched_cls;
q->min_segment_burst = 4; /* matches PKTSCHED_BURST in the firmware. */
#if 1
diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c
index a1bc88bdea7f..3af127c6a3a3 100644
--- a/sys/dev/cxgbe/tom/t4_cpl_io.c
+++ b/sys/dev/cxgbe/tom/t4_cpl_io.c
@@ -102,7 +102,7 @@ send_flowc_wr(struct toepcb *toep, struct tcpcb *tp)
nparams++;
if (toep->params.tc_idx != -1) {
MPASS(toep->params.tc_idx >= 0 &&
- toep->params.tc_idx < sc->chip_params->nsched_cls);
+ toep->params.tc_idx < sc->params.nsched_cls);
nparams++;
}
@@ -189,7 +189,7 @@ update_tx_rate_limit(struct adapter *sc, struct toepcb *toep, u_int Bps)
rc = t4_reserve_cl_rl_kbps(sc, port_id, kbps, &tc_idx);
if (rc != 0)
return (rc);
- MPASS(tc_idx >= 0 && tc_idx < sc->chip_params->nsched_cls);
+ MPASS(tc_idx >= 0 && tc_idx < sc->params.nsched_cls);
}
if (toep->params.tc_idx != tc_idx) {
diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c
index f5c2c6804aa2..d6162ed457c8 100644
--- a/sys/dev/cxgbe/tom/t4_tom.c
+++ b/sys/dev/cxgbe/tom/t4_tom.c
@@ -168,7 +168,7 @@ init_toepcb(struct vi_info *vi, struct toepcb *toep)
struct adapter *sc = pi->adapter;
struct tx_cl_rl_params *tc;
- if (cp->tc_idx >= 0 && cp->tc_idx < sc->chip_params->nsched_cls) {
+ if (cp->tc_idx >= 0 && cp->tc_idx < sc->params.nsched_cls) {
tc = &pi->sched_params->cl_rl[cp->tc_idx];
mtx_lock(&sc->tc_lock);
if (tc->flags & CLRL_ERR) {
@@ -1314,11 +1314,10 @@ init_conn_params(struct vi_info *vi , struct offload_settings *s,
}
/* Tx traffic scheduling class. */
- if (s->sched_class >= 0 &&
- s->sched_class < sc->chip_params->nsched_cls) {
- cp->tc_idx = s->sched_class;
- } else
- cp->tc_idx = -1;
+ if (s->sched_class >= 0 && s->sched_class < sc->params.nsched_cls)
+ cp->tc_idx = s->sched_class;
+ else
+ cp->tc_idx = -1;
/* Nagle's algorithm. */
if (s->nagle >= 0)