aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/cxgbe/t4_sge.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/cxgbe/t4_sge.c')
-rw-r--r--sys/dev/cxgbe/t4_sge.c86
1 files changed, 82 insertions, 4 deletions
diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c
index 2f9cb1a4ebb5..e9754ace27c2 100644
--- a/sys/dev/cxgbe/t4_sge.c
+++ b/sys/dev/cxgbe/t4_sge.c
@@ -852,6 +852,11 @@ t4_tweak_chip_settings(struct adapter *sc)
/* We use multiple DDP page sizes both in plain-TOE and ISCSI modes. */
m = v = F_TDDPTAGTCB | F_ISCSITAGTCB;
+ if (sc->nvmecaps != 0) {
+ /* Request DDP status bit for NVMe PDU completions. */
+ m |= F_NVME_TCP_DDP_VAL_EN;
+ v |= F_NVME_TCP_DDP_VAL_EN;
+ }
t4_set_reg_field(sc, A_ULP_RX_CTL, m, v);
m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET |
@@ -1335,7 +1340,6 @@ t4_intr_err(void *arg)
{
struct adapter *sc = arg;
uint32_t v;
- const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0;
if (atomic_load_int(&sc->error_flags) & ADAP_FATAL_ERR)
return;
@@ -1346,7 +1350,7 @@ t4_intr_err(void *arg)
t4_write_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE), v);
}
- if (t4_slow_intr_handler(sc, verbose))
+ if (t4_slow_intr_handler(sc, sc->intr_flags))
t4_fatal_err(sc, false);
}
@@ -4170,6 +4174,20 @@ alloc_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq, int idx,
ofld_rxq->rx_iscsi_ddp_setup_ok = counter_u64_alloc(M_WAITOK);
ofld_rxq->rx_iscsi_ddp_setup_error =
counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_ddp_setup_ok = counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_ddp_setup_no_stag =
+ counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_ddp_setup_error =
+ counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_ddp_octets = counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_ddp_pdus = counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_fl_octets = counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_fl_pdus = counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_invalid_headers = counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_header_digest_errors =
+ counter_u64_alloc(M_WAITOK);
+ ofld_rxq->rx_nvme_data_digest_errors =
+ counter_u64_alloc(M_WAITOK);
ofld_rxq->ddp_buffer_alloc = counter_u64_alloc(M_WAITOK);
ofld_rxq->ddp_buffer_reuse = counter_u64_alloc(M_WAITOK);
ofld_rxq->ddp_buffer_free = counter_u64_alloc(M_WAITOK);
@@ -4207,6 +4225,16 @@ free_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq)
MPASS(!(ofld_rxq->iq.flags & IQ_SW_ALLOCATED));
counter_u64_free(ofld_rxq->rx_iscsi_ddp_setup_ok);
counter_u64_free(ofld_rxq->rx_iscsi_ddp_setup_error);
+ counter_u64_free(ofld_rxq->rx_nvme_ddp_setup_ok);
+ counter_u64_free(ofld_rxq->rx_nvme_ddp_setup_no_stag);
+ counter_u64_free(ofld_rxq->rx_nvme_ddp_setup_error);
+ counter_u64_free(ofld_rxq->rx_nvme_ddp_octets);
+ counter_u64_free(ofld_rxq->rx_nvme_ddp_pdus);
+ counter_u64_free(ofld_rxq->rx_nvme_fl_octets);
+ counter_u64_free(ofld_rxq->rx_nvme_fl_pdus);
+ counter_u64_free(ofld_rxq->rx_nvme_invalid_headers);
+ counter_u64_free(ofld_rxq->rx_nvme_header_digest_errors);
+ counter_u64_free(ofld_rxq->rx_nvme_data_digest_errors);
counter_u64_free(ofld_rxq->ddp_buffer_alloc);
counter_u64_free(ofld_rxq->ddp_buffer_reuse);
counter_u64_free(ofld_rxq->ddp_buffer_free);
@@ -4218,12 +4246,12 @@ static void
add_ofld_rxq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid,
struct sge_ofld_rxq *ofld_rxq)
{
- struct sysctl_oid_list *children;
+ struct sysctl_oid_list *children, *top;
if (ctx == NULL || oid == NULL)
return;
- children = SYSCTL_CHILDREN(oid);
+ top = children = SYSCTL_CHILDREN(oid);
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "rx_aio_ddp_jobs",
CTLFLAG_RD, &ofld_rxq->rx_aio_ddp_jobs, 0,
"# of aio_read(2) jobs completed via DDP");
@@ -4280,6 +4308,41 @@ add_ofld_rxq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid,
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "data_digest_errors",
CTLFLAG_RD, &ofld_rxq->rx_iscsi_data_digest_errors, 0,
"# of PDUs with invalid data digests");
+
+ oid = SYSCTL_ADD_NODE(ctx, top, OID_AUTO, "nvme",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE NVMe statistics");
+ children = SYSCTL_CHILDREN(oid);
+
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ddp_setup_ok",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_ddp_setup_ok,
+ "# of times DDP buffer was setup successfully");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ddp_setup_no_stag",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_ddp_setup_no_stag,
+ "# of times STAG was not available for DDP buffer setup");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ddp_setup_error",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_ddp_setup_error,
+ "# of times DDP buffer setup failed");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ddp_octets",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_ddp_octets,
+ "# of octets placed directly");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ddp_pdus",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_ddp_pdus,
+ "# of PDUs with data placed directly");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "fl_octets",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_fl_octets,
+ "# of data octets delivered in freelist");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "fl_pdus",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_fl_pdus,
+ "# of PDUs with data delivered in freelist");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "invalid_headers",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_invalid_headers,
+ "# of PDUs with invalid header field");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "header_digest_errors",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_header_digest_errors,
+ "# of PDUs with invalid header digests");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "data_digest_errors",
+ CTLFLAG_RD, &ofld_rxq->rx_nvme_data_digest_errors,
+ "# of PDUs with invalid data digests");
}
#endif
@@ -4957,6 +5020,9 @@ alloc_ofld_txq(struct vi_info *vi, struct sge_ofld_txq *ofld_txq, int idx)
ofld_txq->tx_iscsi_pdus = counter_u64_alloc(M_WAITOK);
ofld_txq->tx_iscsi_octets = counter_u64_alloc(M_WAITOK);
ofld_txq->tx_iscsi_iso_wrs = counter_u64_alloc(M_WAITOK);
+ ofld_txq->tx_nvme_pdus = counter_u64_alloc(M_WAITOK);
+ ofld_txq->tx_nvme_octets = counter_u64_alloc(M_WAITOK);
+ ofld_txq->tx_nvme_iso_wrs = counter_u64_alloc(M_WAITOK);
ofld_txq->tx_aio_jobs = counter_u64_alloc(M_WAITOK);
ofld_txq->tx_aio_octets = counter_u64_alloc(M_WAITOK);
ofld_txq->tx_toe_tls_records = counter_u64_alloc(M_WAITOK);
@@ -5000,6 +5066,9 @@ free_ofld_txq(struct vi_info *vi, struct sge_ofld_txq *ofld_txq)
counter_u64_free(ofld_txq->tx_iscsi_pdus);
counter_u64_free(ofld_txq->tx_iscsi_octets);
counter_u64_free(ofld_txq->tx_iscsi_iso_wrs);
+ counter_u64_free(ofld_txq->tx_nvme_pdus);
+ counter_u64_free(ofld_txq->tx_nvme_octets);
+ counter_u64_free(ofld_txq->tx_nvme_iso_wrs);
counter_u64_free(ofld_txq->tx_aio_jobs);
counter_u64_free(ofld_txq->tx_aio_octets);
counter_u64_free(ofld_txq->tx_toe_tls_records);
@@ -5029,6 +5098,15 @@ add_ofld_txq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid,
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_iscsi_iso_wrs",
CTLFLAG_RD, &ofld_txq->tx_iscsi_iso_wrs,
"# of iSCSI segmentation offload work requests");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_nvme_pdus",
+ CTLFLAG_RD, &ofld_txq->tx_nvme_pdus,
+ "# of NVMe PDUs transmitted");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_nvme_octets",
+ CTLFLAG_RD, &ofld_txq->tx_nvme_octets,
+ "# of payload octets in transmitted NVMe PDUs");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_nvme_iso_wrs",
+ CTLFLAG_RD, &ofld_txq->tx_nvme_iso_wrs,
+ "# of NVMe segmentation offload work requests");
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_aio_jobs",
CTLFLAG_RD, &ofld_txq->tx_aio_jobs,
"# of zero-copy aio_write(2) jobs transmitted");