aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNavdeep Parhar <np@FreeBSD.org>2021-04-28 04:33:10 +0000
committerNavdeep Parhar <np@FreeBSD.org>2021-04-28 05:48:51 +0000
commit83b5cda106a2dc0c8ace1718485c2ef05c5aa62b (patch)
tree431516d6a3b3fb180c9c7d3b3bb4b197a09e7dae
parentf33f2365eeefa38730e365bf6813cd0b90c9e9bc (diff)
downloadsrc-83b5cda106a2dc0c8ace1718485c2ef05c5aa62b.tar.gz
src-83b5cda106a2dc0c8ace1718485c2ef05c5aa62b.zip
cxgbe(4): Add support for NIC suspend/resume and live reset.
Add suspend/resume callbacks to the driver and a live reset built around them. This commit covers the basic NIC and future commits will expand this functionality to other stateful parts of the chip. Suspend and resume operate on the chip (the t?nex nexus device) and affect all its ports. It is not possible to suspend/resume or reset individual ports. All these operations can be performed on a running NIC. A reset will look like a link bounce to the networking stack. Here are some ways to exercise this functionality: /* Manual suspend and resume. */ # devctl suspend t6nex0 # devctl resume t6nex0 /* Manual reset. */ # devctl reset t6nex0 /* Manual reset with driver sysctl. */ # sysctl dev.t6nex.0.reset=1 /* Automatic adapter reset on any fatal error. */ # hw.cxgbe.reset_on_fatal_err=1 Suspend disables the adapter (DMA, interrupts, and the port PHYs) and marks the hardware as unavailable to the driver. All ifnets associated with the adapter are still visible to the kernel but operations that require hardware interaction will fail with ENXIO. All ifnets report link-down while the adapter is suspended. Resume will reattach to the card, reconfigure it as before, and recreate the queues servicing the existing ifnets. The ifnets are able to send and receive traffic as soon as the link comes back up. Reset is roughly the same as a suspend and a resume with at least one of these events in between: D0->D3Hot->D0, FLR, PCIe link retrain. MFC after: 1 month Relnotes: yes Sponsored by: Chelsio Communications
-rw-r--r--sys/dev/cxgbe/adapter.h52
-rw-r--r--sys/dev/cxgbe/t4_clip.c2
-rw-r--r--sys/dev/cxgbe/t4_filter.c36
-rw-r--r--sys/dev/cxgbe/t4_main.c1586
-rw-r--r--sys/dev/cxgbe/t4_sched.c13
-rw-r--r--sys/dev/cxgbe/t4_tracer.c10
6 files changed, 1393 insertions, 306 deletions
diff --git a/sys/dev/cxgbe/adapter.h b/sys/dev/cxgbe/adapter.h
index 10099fb47bc1..086918b7dff9 100644
--- a/sys/dev/cxgbe/adapter.h
+++ b/sys/dev/cxgbe/adapter.h
@@ -165,6 +165,7 @@ enum {
IS_VF = (1 << 7),
KERN_TLS_ON = (1 << 8), /* HW is configured for KERN_TLS */
CXGBE_BUSY = (1 << 9),
+ HW_OFF_LIMITS = (1 << 10), /* off limits to all except reset_thread */
/* port flags */
HAS_TRACEQ = (1 << 3),
@@ -955,13 +956,26 @@ struct adapter {
TAILQ_HEAD(, sge_fl) sfl;
struct callout sfl_callout;
- struct mtx reg_lock; /* for indirect register access */
+ /*
+ * Driver code that can run when the adapter is suspended must use this
+ * lock or a synchronized_op and check for HW_OFF_LIMITS before
+ * accessing hardware.
+ *
+ * XXX: could be changed to rwlock. wlock in suspend/resume and for
+ * indirect register access, rlock everywhere else.
+ */
+ struct mtx reg_lock;
struct memwin memwin[NUM_MEMWIN]; /* memory windows */
struct mtx tc_lock;
struct task tc_task;
+ struct task reset_task;
+ const void *reset_thread;
+ int num_resets;
+ int incarnation;
+
const char *last_op;
const void *last_op_thr;
int last_op_flags;
@@ -1051,24 +1065,34 @@ forwarding_intr_to_fwq(struct adapter *sc)
return (sc->intr_count == 1);
}
+/* Works reliably inside a sync_op or with reg_lock held. */
+static inline bool
+hw_off_limits(struct adapter *sc)
+{
+ return (__predict_false(sc->flags & HW_OFF_LIMITS));
+}
+
static inline uint32_t
t4_read_reg(struct adapter *sc, uint32_t reg)
{
-
+ if (hw_off_limits(sc))
+ MPASS(curthread == sc->reset_thread);
return bus_space_read_4(sc->bt, sc->bh, reg);
}
static inline void
t4_write_reg(struct adapter *sc, uint32_t reg, uint32_t val)
{
-
+ if (hw_off_limits(sc))
+ MPASS(curthread == sc->reset_thread);
bus_space_write_4(sc->bt, sc->bh, reg, val);
}
static inline uint64_t
t4_read_reg64(struct adapter *sc, uint32_t reg)
{
-
+ if (hw_off_limits(sc))
+ MPASS(curthread == sc->reset_thread);
#ifdef __LP64__
return bus_space_read_8(sc->bt, sc->bh, reg);
#else
@@ -1081,7 +1105,8 @@ t4_read_reg64(struct adapter *sc, uint32_t reg)
static inline void
t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val)
{
-
+ if (hw_off_limits(sc))
+ MPASS(curthread == sc->reset_thread);
#ifdef __LP64__
bus_space_write_8(sc->bt, sc->bh, reg, val);
#else
@@ -1093,14 +1118,16 @@ t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val)
static inline void
t4_os_pci_read_cfg1(struct adapter *sc, int reg, uint8_t *val)
{
-
+ if (hw_off_limits(sc))
+ MPASS(curthread == sc->reset_thread);
*val = pci_read_config(sc->dev, reg, 1);
}
static inline void
t4_os_pci_write_cfg1(struct adapter *sc, int reg, uint8_t val)
{
-
+ if (hw_off_limits(sc))
+ MPASS(curthread == sc->reset_thread);
pci_write_config(sc->dev, reg, val, 1);
}
@@ -1108,27 +1135,32 @@ static inline void
t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val)
{
+ if (hw_off_limits(sc))
+ MPASS(curthread == sc->reset_thread);
*val = pci_read_config(sc->dev, reg, 2);
}
static inline void
t4_os_pci_write_cfg2(struct adapter *sc, int reg, uint16_t val)
{
-
+ if (hw_off_limits(sc))
+ MPASS(curthread == sc->reset_thread);
pci_write_config(sc->dev, reg, val, 2);
}
static inline void
t4_os_pci_read_cfg4(struct adapter *sc, int reg, uint32_t *val)
{
-
+ if (hw_off_limits(sc))
+ MPASS(curthread == sc->reset_thread);
*val = pci_read_config(sc->dev, reg, 4);
}
static inline void
t4_os_pci_write_cfg4(struct adapter *sc, int reg, uint32_t val)
{
-
+ if (hw_off_limits(sc))
+ MPASS(curthread == sc->reset_thread);
pci_write_config(sc->dev, reg, val, 4);
}
diff --git a/sys/dev/cxgbe/t4_clip.c b/sys/dev/cxgbe/t4_clip.c
index a93271103998..da7b168721ab 100644
--- a/sys/dev/cxgbe/t4_clip.c
+++ b/sys/dev/cxgbe/t4_clip.c
@@ -171,7 +171,7 @@ update_clip(struct adapter *sc, void *arg __unused)
if (begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4clip"))
return;
- if (mtx_initialized(&sc->clip_table_lock))
+ if (mtx_initialized(&sc->clip_table_lock) && !hw_off_limits(sc))
update_clip_table(sc);
end_synchronized_op(sc, LOCK_HELD);
diff --git a/sys/dev/cxgbe/t4_filter.c b/sys/dev/cxgbe/t4_filter.c
index cddd2c96a620..3972111b4897 100644
--- a/sys/dev/cxgbe/t4_filter.c
+++ b/sys/dev/cxgbe/t4_filter.c
@@ -522,6 +522,11 @@ set_filter_mode(struct adapter *sc, uint32_t mode)
if (rc)
return (rc);
+ if (hw_off_limits(sc)) {
+ rc = ENXIO;
+ goto done;
+ }
+
if (sc->tids.ftids_in_use > 0 || /* TCAM filters active */
sc->tids.hpftids_in_use > 0 || /* hi-pri TCAM filters active */
sc->tids.tids_in_use > 0) { /* TOE or hashfilters active */
@@ -568,6 +573,11 @@ set_filter_mask(struct adapter *sc, uint32_t mode)
if (rc)
return (rc);
+ if (hw_off_limits(sc)) {
+ rc = ENXIO;
+ goto done;
+ }
+
if (sc->tids.tids_in_use > 0) { /* TOE or hashfilters active */
rc = EBUSY;
goto done;
@@ -589,20 +599,27 @@ static inline uint64_t
get_filter_hits(struct adapter *sc, uint32_t tid)
{
uint32_t tcb_addr;
+ uint64_t hits;
tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
- if (is_t4(sc)) {
- uint64_t hits;
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ hits = 0;
+ else if (is_t4(sc)) {
+ uint64_t t;
- read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8);
- return (be64toh(hits));
+ read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&t, 8);
+ hits = be64toh(t);
} else {
- uint32_t hits;
+ uint32_t t;
- read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4);
- return (be32toh(hits));
+ read_via_memwin(sc, 0, tcb_addr + 24, &t, 4);
+ hits = be32toh(t);
}
+ mtx_unlock(&sc->reg_lock);
+
+ return (hits);
}
int
@@ -961,6 +978,11 @@ set_filter(struct adapter *sc, struct t4_filter *t)
if (rc)
return (rc);
+ if (hw_off_limits(sc)) {
+ rc = ENXIO;
+ goto done;
+ }
+
if (!(sc->flags & FULL_INIT_DONE) && ((rc = adapter_init(sc)) != 0))
goto done;
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index 9a2a256d5dd2..da9d7565f3a3 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -100,12 +100,20 @@ static int t4_detach(device_t);
static int t4_child_location_str(device_t, device_t, char *, size_t);
static int t4_ready(device_t);
static int t4_read_port_device(device_t, int, device_t *);
+static int t4_suspend(device_t);
+static int t4_resume(device_t);
+static int t4_reset_prepare(device_t, device_t);
+static int t4_reset_post(device_t, device_t);
static device_method_t t4_methods[] = {
DEVMETHOD(device_probe, t4_probe),
DEVMETHOD(device_attach, t4_attach),
DEVMETHOD(device_detach, t4_detach),
+ DEVMETHOD(device_suspend, t4_suspend),
+ DEVMETHOD(device_resume, t4_resume),
DEVMETHOD(bus_child_location_str, t4_child_location_str),
+ DEVMETHOD(bus_reset_prepare, t4_reset_prepare),
+ DEVMETHOD(bus_reset_post, t4_reset_post),
DEVMETHOD(t4_is_main_ready, t4_ready),
DEVMETHOD(t4_read_port_device, t4_read_port_device),
@@ -165,8 +173,12 @@ static device_method_t t5_methods[] = {
DEVMETHOD(device_probe, t5_probe),
DEVMETHOD(device_attach, t4_attach),
DEVMETHOD(device_detach, t4_detach),
+ DEVMETHOD(device_suspend, t4_suspend),
+ DEVMETHOD(device_resume, t4_resume),
DEVMETHOD(bus_child_location_str, t4_child_location_str),
+ DEVMETHOD(bus_reset_prepare, t4_reset_prepare),
+ DEVMETHOD(bus_reset_post, t4_reset_post),
DEVMETHOD(t4_is_main_ready, t4_ready),
DEVMETHOD(t4_read_port_device, t4_read_port_device),
@@ -200,8 +212,12 @@ static device_method_t t6_methods[] = {
DEVMETHOD(device_probe, t6_probe),
DEVMETHOD(device_attach, t4_attach),
DEVMETHOD(device_detach, t4_detach),
+ DEVMETHOD(device_suspend, t4_suspend),
+ DEVMETHOD(device_resume, t4_resume),
DEVMETHOD(bus_child_location_str, t4_child_location_str),
+ DEVMETHOD(bus_reset_prepare, t4_reset_prepare),
+ DEVMETHOD(bus_reset_post, t4_reset_post),
DEVMETHOD(t4_is_main_ready, t4_ready),
DEVMETHOD(t4_read_port_device, t4_read_port_device),
@@ -596,6 +612,10 @@ static int t4_panic_on_fatal_err = 0;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, panic_on_fatal_err, CTLFLAG_RWTUN,
&t4_panic_on_fatal_err, 0, "panic on fatal errors");
+static int t4_reset_on_fatal_err = 0;
+SYSCTL_INT(_hw_cxgbe, OID_AUTO, reset_on_fatal_err, CTLFLAG_RWTUN,
+ &t4_reset_on_fatal_err, 0, "reset adapter on fatal errors");
+
static int t4_tx_vm_wr = 0;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_vm_wr, CTLFLAG_RWTUN, &t4_tx_vm_wr, 0,
"Use VM work requests to transmit packets.");
@@ -794,6 +814,7 @@ static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_cpus(SYSCTL_HANDLER_ARGS);
+static int sysctl_reset(SYSCTL_HANDLER_ARGS);
#ifdef TCP_OFFLOAD
static int sysctl_tls(SYSCTL_HANDLER_ARGS);
static int sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS);
@@ -829,6 +850,7 @@ static int notify_siblings(device_t, int);
static uint64_t vi_get_counter(struct ifnet *, ift_counter);
static uint64_t cxgbe_get_counter(struct ifnet *, ift_counter);
static void enable_vxlan_rx(struct adapter *);
+static void reset_adapter(void *, int);
struct {
uint16_t device;
@@ -1137,6 +1159,8 @@ t4_attach(device_t dev)
refcount_init(&sc->vxlan_refcount, 0);
+ TASK_INIT(&sc->reset_task, 0, reset_adapter, sc);
+
sc->ctrlq_oid = SYSCTL_ADD_NODE(device_get_sysctl_ctx(sc->dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "ctrlq",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "control queues");
@@ -1785,6 +1809,572 @@ t4_detach_common(device_t dev)
return (0);
}
+static inline bool
+ok_to_reset(struct adapter *sc)
+{
+ struct tid_info *t = &sc->tids;
+ struct port_info *pi;
+ struct vi_info *vi;
+ int i, j;
+ const int caps = IFCAP_TOE | IFCAP_TXTLS | IFCAP_NETMAP | IFCAP_TXRTLMT;
+
+ ASSERT_SYNCHRONIZED_OP(sc);
+ MPASS(!(sc->flags & IS_VF));
+
+ for_each_port(sc, i) {
+ pi = sc->port[i];
+ for_each_vi(pi, j, vi) {
+ if (vi->ifp->if_capenable & caps)
+ return (false);
+ }
+ }
+
+ if (atomic_load_int(&t->tids_in_use) > 0)
+ return (false);
+ if (atomic_load_int(&t->stids_in_use) > 0)
+ return (false);
+ if (atomic_load_int(&t->atids_in_use) > 0)
+ return (false);
+ if (atomic_load_int(&t->ftids_in_use) > 0)
+ return (false);
+ if (atomic_load_int(&t->hpftids_in_use) > 0)
+ return (false);
+ if (atomic_load_int(&t->etids_in_use) > 0)
+ return (false);
+
+ return (true);
+}
+
+static int
+t4_suspend(device_t dev)
+{
+ struct adapter *sc = device_get_softc(dev);
+ struct port_info *pi;
+ struct vi_info *vi;
+ struct ifnet *ifp;
+ struct sge_rxq *rxq;
+ struct sge_txq *txq;
+ struct sge_wrq *wrq;
+#ifdef TCP_OFFLOAD
+ struct sge_ofld_rxq *ofld_rxq;
+#endif
+#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
+ struct sge_ofld_txq *ofld_txq;
+#endif
+ int rc, i, j, k;
+
+ CH_ALERT(sc, "suspend requested\n");
+
+ rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4sus");
+ if (rc != 0)
+ return (ENXIO);
+
+ /* XXX: Can the kernel call suspend repeatedly without resume? */
+ MPASS(!hw_off_limits(sc));
+
+ if (!ok_to_reset(sc)) {
+ /* XXX: should list what resource is preventing suspend. */
+ CH_ERR(sc, "not safe to suspend.\n");
+ rc = EBUSY;
+ goto done;
+ }
+
+ /* No more DMA or interrupts. */
+ t4_shutdown_adapter(sc);
+
+ /* Quiesce all activity. */
+ for_each_port(sc, i) {
+ pi = sc->port[i];
+ pi->vxlan_tcam_entry = false;
+
+ PORT_LOCK(pi);
+ if (pi->up_vis > 0) {
+ /*
+ * t4_shutdown_adapter has already shut down all the
+ * PHYs but it also disables interrupts and DMA so there
+ * won't be a link interrupt. So we update the state
+ * manually and inform the kernel.
+ */
+ pi->link_cfg.link_ok = false;
+ t4_os_link_changed(pi);
+ }
+ PORT_UNLOCK(pi);
+
+ for_each_vi(pi, j, vi) {
+ vi->xact_addr_filt = -1;
+ if (!(vi->flags & VI_INIT_DONE))
+ continue;
+
+ ifp = vi->ifp;
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ mtx_lock(&vi->tick_mtx);
+ vi->flags |= VI_SKIP_STATS;
+ callout_stop(&vi->tick);
+ mtx_unlock(&vi->tick_mtx);
+ callout_drain(&vi->tick);
+ }
+
+ /*
+ * Note that the HW is not available.
+ */
+ for_each_txq(vi, k, txq) {
+ TXQ_LOCK(txq);
+ txq->eq.flags &= ~(EQ_ENABLED | EQ_HW_ALLOCATED);
+ TXQ_UNLOCK(txq);
+ }
+#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
+ for_each_ofld_txq(vi, k, ofld_txq) {
+ ofld_txq->wrq.eq.flags &= ~EQ_HW_ALLOCATED;
+ }
+#endif
+ for_each_rxq(vi, k, rxq) {
+ rxq->iq.flags &= ~IQ_HW_ALLOCATED;
+ }
+#if defined(TCP_OFFLOAD)
+ for_each_ofld_rxq(vi, k, ofld_rxq) {
+ ofld_rxq->iq.flags &= ~IQ_HW_ALLOCATED;
+ }
+#endif
+
+ quiesce_vi(vi);
+ }
+
+ if (sc->flags & FULL_INIT_DONE) {
+ /* Control queue */
+ wrq = &sc->sge.ctrlq[i];
+ wrq->eq.flags &= ~EQ_HW_ALLOCATED;
+ quiesce_wrq(wrq);
+ }
+ }
+ if (sc->flags & FULL_INIT_DONE) {
+ /* Firmware event queue */
+ sc->sge.fwq.flags &= ~IQ_HW_ALLOCATED;
+ quiesce_iq_fl(sc, &sc->sge.fwq, NULL);
+ }
+
+ /* Mark the adapter totally off limits. */
+ mtx_lock(&sc->reg_lock);
+ sc->flags |= HW_OFF_LIMITS;
+ sc->flags &= ~(FW_OK | MASTER_PF);
+ sc->reset_thread = NULL;
+ mtx_unlock(&sc->reg_lock);
+
+ sc->num_resets++;
+ CH_ALERT(sc, "suspend completed.\n");
+done:
+ end_synchronized_op(sc, 0);
+ return (rc);
+}
+
+struct adapter_pre_reset_state {
+ u_int flags;
+ uint16_t nbmcaps;
+ uint16_t linkcaps;
+ uint16_t switchcaps;
+ uint16_t niccaps;
+ uint16_t toecaps;
+ uint16_t rdmacaps;
+ uint16_t cryptocaps;
+ uint16_t iscsicaps;
+ uint16_t fcoecaps;
+
+ u_int cfcsum;
+ char cfg_file[32];
+
+ struct adapter_params params;
+ struct t4_virt_res vres;
+ struct tid_info tids;
+ struct sge sge;
+
+ int rawf_base;
+ int nrawf;
+
+};
+
+static void
+save_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o)
+{
+
+ ASSERT_SYNCHRONIZED_OP(sc);
+
+ o->flags = sc->flags;
+
+ o->nbmcaps = sc->nbmcaps;
+ o->linkcaps = sc->linkcaps;
+ o->switchcaps = sc->switchcaps;
+ o->niccaps = sc->niccaps;
+ o->toecaps = sc->toecaps;
+ o->rdmacaps = sc->rdmacaps;
+ o->cryptocaps = sc->cryptocaps;
+ o->iscsicaps = sc->iscsicaps;
+ o->fcoecaps = sc->fcoecaps;
+
+ o->cfcsum = sc->cfcsum;
+ MPASS(sizeof(o->cfg_file) == sizeof(sc->cfg_file));
+ memcpy(o->cfg_file, sc->cfg_file, sizeof(o->cfg_file));
+
+ o->params = sc->params;
+ o->vres = sc->vres;
+ o->tids = sc->tids;
+ o->sge = sc->sge;
+
+ o->rawf_base = sc->rawf_base;
+ o->nrawf = sc->nrawf;
+}
+
+static int
+compare_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o)
+{
+ int rc = 0;
+
+ ASSERT_SYNCHRONIZED_OP(sc);
+
+ /* Capabilities */
+#define COMPARE_CAPS(c) do { \
+ if (o->c##caps != sc->c##caps) { \
+ CH_ERR(sc, "%scaps 0x%04x -> 0x%04x.\n", #c, o->c##caps, \
+ sc->c##caps); \
+ rc = EINVAL; \
+ } \
+} while (0)
+ COMPARE_CAPS(nbm);
+ COMPARE_CAPS(link);
+ COMPARE_CAPS(switch);
+ COMPARE_CAPS(nic);
+ COMPARE_CAPS(toe);
+ COMPARE_CAPS(rdma);
+ COMPARE_CAPS(crypto);
+ COMPARE_CAPS(iscsi);
+ COMPARE_CAPS(fcoe);
+#undef COMPARE_CAPS
+
+ /* Firmware config file */
+ if (o->cfcsum != sc->cfcsum) {
+ CH_ERR(sc, "config file %s (0x%x) -> %s (0x%x)\n", o->cfg_file,
+ o->cfcsum, sc->cfg_file, sc->cfcsum);
+ rc = EINVAL;
+ }
+
+#define COMPARE_PARAM(p, name) do { \
+ if (o->p != sc->p) { \
+ CH_ERR(sc, #name " %d -> %d\n", o->p, sc->p); \
+ rc = EINVAL; \
+ } \
+} while (0)
+ COMPARE_PARAM(sge.iq_start, iq_start);
+ COMPARE_PARAM(sge.eq_start, eq_start);
+ COMPARE_PARAM(tids.ftid_base, ftid_base);
+ COMPARE_PARAM(tids.ftid_end, ftid_end);
+ COMPARE_PARAM(tids.nftids, nftids);
+ COMPARE_PARAM(vres.l2t.start, l2t_start);
+ COMPARE_PARAM(vres.l2t.size, l2t_size);
+ COMPARE_PARAM(sge.iqmap_sz, iqmap_sz);
+ COMPARE_PARAM(sge.eqmap_sz, eqmap_sz);
+ COMPARE_PARAM(tids.tid_base, tid_base);
+ COMPARE_PARAM(tids.hpftid_base, hpftid_base);
+ COMPARE_PARAM(tids.hpftid_end, hpftid_end);
+ COMPARE_PARAM(tids.nhpftids, nhpftids);
+ COMPARE_PARAM(rawf_base, rawf_base);
+ COMPARE_PARAM(nrawf, nrawf);
+ COMPARE_PARAM(params.mps_bg_map, mps_bg_map);
+ COMPARE_PARAM(params.filter2_wr_support, filter2_wr_support);
+ COMPARE_PARAM(params.ulptx_memwrite_dsgl, ulptx_memwrite_dsgl);
+ COMPARE_PARAM(params.fr_nsmr_tpte_wr_support, fr_nsmr_tpte_wr_support);
+ COMPARE_PARAM(params.max_pkts_per_eth_tx_pkts_wr, max_pkts_per_eth_tx_pkts_wr);
+ COMPARE_PARAM(tids.ntids, ntids);
+ COMPARE_PARAM(tids.etid_base, etid_base);
+ COMPARE_PARAM(tids.etid_end, etid_end);
+ COMPARE_PARAM(tids.netids, netids);
+ COMPARE_PARAM(params.eo_wr_cred, eo_wr_cred);
+ COMPARE_PARAM(params.ethoffload, ethoffload);
+ COMPARE_PARAM(tids.natids, natids);
+ COMPARE_PARAM(tids.stid_base, stid_base);
+ COMPARE_PARAM(vres.ddp.start, ddp_start);
+ COMPARE_PARAM(vres.ddp.size, ddp_size);
+ COMPARE_PARAM(params.ofldq_wr_cred, ofldq_wr_cred);
+ COMPARE_PARAM(vres.stag.start, stag_start);
+ COMPARE_PARAM(vres.stag.size, stag_size);
+ COMPARE_PARAM(vres.rq.start, rq_start);
+ COMPARE_PARAM(vres.rq.size, rq_size);
+ COMPARE_PARAM(vres.pbl.start, pbl_start);
+ COMPARE_PARAM(vres.pbl.size, pbl_size);
+ COMPARE_PARAM(vres.qp.start, qp_start);
+ COMPARE_PARAM(vres.qp.size, qp_size);
+ COMPARE_PARAM(vres.cq.start, cq_start);
+ COMPARE_PARAM(vres.cq.size, cq_size);
+ COMPARE_PARAM(vres.ocq.start, ocq_start);
+ COMPARE_PARAM(vres.ocq.size, ocq_size);
+ COMPARE_PARAM(vres.srq.start, srq_start);
+ COMPARE_PARAM(vres.srq.size, srq_size);
+ COMPARE_PARAM(params.max_ordird_qp, max_ordird_qp);
+ COMPARE_PARAM(params.max_ird_adapter, max_ird_adapter);
+ COMPARE_PARAM(vres.iscsi.start, iscsi_start);
+ COMPARE_PARAM(vres.iscsi.size, iscsi_size);
+ COMPARE_PARAM(vres.key.start, key_start);
+ COMPARE_PARAM(vres.key.size, key_size);
+#undef COMPARE_PARAM
+
+ return (rc);
+}
+
+static int
+t4_resume(device_t dev)
+{
+ struct adapter *sc = device_get_softc(dev);
+ struct adapter_pre_reset_state *old_state = NULL;
+ struct port_info *pi;
+ struct vi_info *vi;
+ struct ifnet *ifp;
+ struct sge_txq *txq;
+ int rc, i, j, k;
+
+ CH_ALERT(sc, "resume requested.\n");
+
+ rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4res");
+ if (rc != 0)
+ return (ENXIO);
+ MPASS(hw_off_limits(sc));
+ MPASS((sc->flags & FW_OK) == 0);
+ MPASS((sc->flags & MASTER_PF) == 0);
+ MPASS(sc->reset_thread == NULL);
+ sc->reset_thread = curthread;
+
+ /* Register access is expected to work by the time we're here. */
+ if (t4_read_reg(sc, A_PL_WHOAMI) == 0xffffffff) {
+ CH_ERR(sc, "%s: can't read device registers\n", __func__);
+ rc = ENXIO;
+ goto done;
+ }
+
+ /* Restore memory window. */
+ setup_memwin(sc);
+
+ /* Go no further if recovery mode has been requested. */
+ if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
+ CH_ALERT(sc, "recovery mode on resume.\n");
+ rc = 0;
+ mtx_lock(&sc->reg_lock);
+ sc->flags &= ~HW_OFF_LIMITS;
+ mtx_unlock(&sc->reg_lock);
+ goto done;
+ }
+
+ old_state = malloc(sizeof(*old_state), M_CXGBE, M_ZERO | M_WAITOK);
+ save_caps_and_params(sc, old_state);
+
+ /* Reestablish contact with firmware and become the primary PF. */
+ rc = contact_firmware(sc);
+ if (rc != 0)
+ goto done; /* error message displayed already */
+ MPASS(sc->flags & FW_OK);
+
+ if (sc->flags & MASTER_PF) {
+ rc = partition_resources(sc);
+ if (rc != 0)
+ goto done; /* error message displayed already */
+ t4_intr_clear(sc);
+ }
+
+ rc = get_params__post_init(sc);
+ if (rc != 0)
+ goto done; /* error message displayed already */
+
+ rc = set_params__post_init(sc);
+ if (rc != 0)
+ goto done; /* error message displayed already */
+
+ rc = compare_caps_and_params(sc, old_state);
+ if (rc != 0)
+ goto done; /* error message displayed already */
+
+ for_each_port(sc, i) {
+ pi = sc->port[i];
+ MPASS(pi != NULL);
+ MPASS(pi->vi != NULL);
+ MPASS(pi->vi[0].dev == pi->dev);
+
+ rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
+ if (rc != 0) {
+ CH_ERR(sc,
+ "failed to re-initialize port %d: %d\n", i, rc);
+ goto done;
+ }
+ MPASS(sc->chan_map[pi->tx_chan] == i);
+
+ PORT_LOCK(pi);
+ fixup_link_config(pi);
+ build_medialist(pi);
+ PORT_UNLOCK(pi);
+ for_each_vi(pi, j, vi) {
+ if (IS_MAIN_VI(vi))
+ continue;
+ rc = alloc_extra_vi(sc, pi, vi);
+ if (rc != 0) {
+ CH_ERR(vi,
+ "failed to re-allocate extra VI: %d\n", rc);
+ goto done;
+ }
+ }
+ }
+
+ /*
+ * Interrupts and queues are about to be enabled and other threads will
+ * want to access the hardware too. It is safe to do so. Note that
+ * this thread is still in the middle of a synchronized_op.
+ */
+ mtx_lock(&sc->reg_lock);
+ sc->flags &= ~HW_OFF_LIMITS;
+ mtx_unlock(&sc->reg_lock);
+
+ if (sc->flags & FULL_INIT_DONE) {
+ rc = adapter_full_init(sc);
+ if (rc != 0) {
+ CH_ERR(sc, "failed to re-initialize adapter: %d\n", rc);
+ goto done;
+ }
+
+ if (sc->vxlan_refcount > 0)
+ enable_vxlan_rx(sc);
+
+ for_each_port(sc, i) {
+ pi = sc->port[i];
+ for_each_vi(pi, j, vi) {
+ if (!(vi->flags & VI_INIT_DONE))
+ continue;
+ rc = vi_full_init(vi);
+ if (rc != 0) {
+ CH_ERR(vi, "failed to re-initialize "
+ "interface: %d\n", rc);
+ goto done;
+ }
+
+ ifp = vi->ifp;
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+ continue;
+ /*
+ * Note that we do not setup multicast addresses
+ * in the first pass. This ensures that the
+ * unicast DMACs for all VIs on all ports get an
+ * MPS TCAM entry.
+ */
+ rc = update_mac_settings(ifp, XGMAC_ALL &
+ ~XGMAC_MCADDRS);
+ if (rc != 0) {
+ CH_ERR(vi, "failed to re-configure MAC: %d\n", rc);
+ goto done;
+ }
+ rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true,
+ true);
+ if (rc != 0) {
+ CH_ERR(vi, "failed to re-enable VI: %d\n", rc);
+ goto done;
+ }
+ for_each_txq(vi, k, txq) {
+ TXQ_LOCK(txq);
+ txq->eq.flags |= EQ_ENABLED;
+ TXQ_UNLOCK(txq);
+ }
+ mtx_lock(&vi->tick_mtx);
+ vi->flags &= ~VI_SKIP_STATS;
+ callout_schedule(&vi->tick, hz);
+ mtx_unlock(&vi->tick_mtx);
+ }
+ PORT_LOCK(pi);
+ if (pi->up_vis > 0) {
+ t4_update_port_info(pi);
+ fixup_link_config(pi);
+ build_medialist(pi);
+ apply_link_config(pi);
+ if (pi->link_cfg.link_ok)
+ t4_os_link_changed(pi);
+ }
+ PORT_UNLOCK(pi);
+ }
+
+ /* Now reprogram the L2 multicast addresses. */
+ for_each_port(sc, i) {
+ pi = sc->port[i];
+ for_each_vi(pi, j, vi) {
+ if (!(vi->flags & VI_INIT_DONE))
+ continue;
+ ifp = vi->ifp;
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+ continue;
+ rc = update_mac_settings(ifp, XGMAC_MCADDRS);
+ if (rc != 0) {
+ CH_ERR(vi, "failed to re-configure MCAST MACs: %d\n", rc);
+ rc = 0; /* carry on */
+ }
+ }
+ }
+ }
+done:
+ if (rc == 0) {
+ sc->incarnation++;
+ CH_ALERT(sc, "resume completed.\n");
+ }
+ end_synchronized_op(sc, 0);
+ free(old_state, M_CXGBE);
+ return (rc);
+}
+
+static int
+t4_reset_prepare(device_t dev, device_t child)
+{
+ struct adapter *sc = device_get_softc(dev);
+
+ CH_ALERT(sc, "reset_prepare.\n");
+ return (0);
+}
+
+static int
+t4_reset_post(device_t dev, device_t child)
+{
+ struct adapter *sc = device_get_softc(dev);
+
+ CH_ALERT(sc, "reset_post.\n");
+ return (0);
+}
+
+static void
+reset_adapter(void *arg, int pending)
+{
+ struct adapter *sc = arg;
+ int rc;
+
+ CH_ALERT(sc, "reset requested.\n");
+
+ rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4rst1");
+ if (rc != 0)
+ return;
+
+ if (hw_off_limits(sc)) {
+ CH_ERR(sc, "adapter is suspended, use resume (not reset).\n");
+ rc = ENXIO;
+ goto done;
+ }
+
+ if (!ok_to_reset(sc)) {
+ /* XXX: should list what resource is preventing reset. */
+ CH_ERR(sc, "not safe to reset.\n");
+ rc = EBUSY;
+ goto done;
+ }
+
+done:
+ end_synchronized_op(sc, 0);
+ if (rc != 0)
+ return; /* Error logged already. */
+
+ mtx_lock(&Giant);
+ rc = BUS_RESET_CHILD(device_get_parent(sc->dev), sc->dev, 0);
+ mtx_unlock(&Giant);
+ if (rc != 0)
+ CH_ERR(sc, "bus_reset_child failed: %d.\n", rc);
+ else
+ CH_ALERT(sc, "bus_reset_child succeeded.\n");
+}
+
static int
cxgbe_probe(device_t dev)
{
@@ -2072,7 +2662,8 @@ cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
ifp->if_mtu = mtu;
if (vi->flags & VI_INIT_DONE) {
t4_update_fl_bufsize(ifp);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if (!hw_off_limits(sc) &&
+ ifp->if_drv_flags & IFF_DRV_RUNNING)
rc = update_mac_settings(ifp, XGMAC_MTU);
}
end_synchronized_op(sc, 0);
@@ -2083,6 +2674,11 @@ cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
if (rc)
return (rc);
+ if (hw_off_limits(sc)) {
+ rc = ENXIO;
+ goto fail;
+ }
+
if (ifp->if_flags & IFF_UP) {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
flags = vi->if_flags;
@@ -2106,7 +2702,7 @@ cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4multi");
if (rc)
return (rc);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if (!hw_off_limits(sc) && ifp->if_drv_flags & IFF_DRV_RUNNING)
rc = update_mac_settings(ifp, XGMAC_MCADDRS);
end_synchronized_op(sc, 0);
break;
@@ -2281,8 +2877,11 @@ fail:
rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
if (rc)
return (rc);
- rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
- i2c.offset, i2c.len, &i2c.data[0]);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
+ i2c.offset, i2c.len, &i2c.data[0]);
end_synchronized_op(sc, 0);
if (rc == 0)
rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
@@ -2827,7 +3426,7 @@ alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi)
rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0);
if (rc < 0) {
- device_printf(vi->dev, "failed to allocate virtual interface %d"
+ CH_ERR(vi, "failed to allocate virtual interface %d"
"for port %d: %d\n", index, pi->port_id, -rc);
return (-rc);
}
@@ -2908,6 +3507,7 @@ vcxgbe_detach(device_t dev)
}
static struct callout fatal_callout;
+static struct taskqueue *reset_tq;
static void
delayed_panic(void *arg)
@@ -2938,9 +3538,11 @@ t4_fatal_err(struct adapter *sc, bool fw_error)
#endif
if (t4_panic_on_fatal_err) {
- log(LOG_ALERT, "%s: panic on fatal error after 30s",
- device_get_nameunit(sc->dev));
+ CH_ALERT(sc, "panicking on fatal error (after 30s).\n");
callout_reset(&fatal_callout, hz * 30, delayed_panic, sc);
+ } else if (t4_reset_on_fatal_err) {
+ CH_ALERT(sc, "resetting on fatal error.\n");
+ taskqueue_enqueue(reset_tq, &sc->reset_task);
}
}
@@ -6685,6 +7287,10 @@ t4_sysctls(struct adapter *sc)
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "swintr", CTLFLAG_RW,
&sc->swintr, 0, "software triggered interrupts");
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reset",
+ CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_reset, "I",
+ "1 = reset adapter, 0 = zero reset counter");
+
/*
* dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
*/
@@ -7419,9 +8025,13 @@ sysctl_btphy(SYSCTL_HANDLER_ARGS)
rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
if (rc)
return (rc);
- /* XXX: magic numbers */
- rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
- &v);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ /* XXX: magic numbers */
+ rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e,
+ op ? 0x20 : 0xc820, &v);
+ }
end_synchronized_op(sc, 0);
if (rc)
return (rc);
@@ -7472,7 +8082,9 @@ sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS)
"t4txvm");
if (rc)
return (rc);
- if (vi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else if (vi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
/*
* We don't want parse_pkt to run with one setting (VF or PF)
* and then eth_tx to see a different setting but still use
@@ -7688,13 +8300,15 @@ sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
"t4PAUSE");
if (rc)
return (rc);
- PORT_LOCK(pi);
- lc->requested_fc = n;
- fixup_link_config(pi);
- if (pi->up_vis > 0)
- rc = apply_link_config(pi);
- set_current_media(pi);
- PORT_UNLOCK(pi);
+ if (!hw_off_limits(sc)) {
+ PORT_LOCK(pi);
+ lc->requested_fc = n;
+ fixup_link_config(pi);
+ if (pi->up_vis > 0)
+ rc = apply_link_config(pi);
+ set_current_media(pi);
+ PORT_UNLOCK(pi);
+ }
end_synchronized_op(sc, 0);
}
@@ -7774,13 +8388,15 @@ sysctl_fec(SYSCTL_HANDLER_ARGS)
lc->requested_fec = n & (M_FW_PORT_CAP32_FEC |
FEC_MODULE);
}
- fixup_link_config(pi);
- if (pi->up_vis > 0) {
- rc = apply_link_config(pi);
- if (rc != 0) {
- lc->requested_fec = old;
- if (rc == FW_EPROTO)
- rc = ENOTSUP;
+ if (!hw_off_limits(sc)) {
+ fixup_link_config(pi);
+ if (pi->up_vis > 0) {
+ rc = apply_link_config(pi);
+ if (rc != 0) {
+ lc->requested_fec = old;
+ if (rc == FW_EPROTO)
+ rc = ENOTSUP;
+ }
}
}
done:
@@ -7810,8 +8426,14 @@ sysctl_module_fec(SYSCTL_HANDLER_ARGS)
if (sb == NULL)
return (ENOMEM);
- if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mfec") != 0)
- return (EBUSY);
+ if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mfec") != 0) {
+ rc = EBUSY;
+ goto done;
+ }
+ if (hw_off_limits(sc)) {
+ rc = ENXIO;
+ goto done;
+ }
PORT_LOCK(pi);
if (pi->up_vis == 0) {
/*
@@ -7833,9 +8455,9 @@ sysctl_module_fec(SYSCTL_HANDLER_ARGS)
sbuf_printf(sb, "%b", fec & M_FW_PORT_CAP32_FEC, bits);
}
rc = sbuf_finish(sb);
- sbuf_delete(sb);
-
PORT_UNLOCK(pi);
+done:
+ sbuf_delete(sb);
end_synchronized_op(sc, 0);
return (rc);
@@ -7873,10 +8495,12 @@ sysctl_autoneg(SYSCTL_HANDLER_ARGS)
goto done;
}
lc->requested_aneg = val;
- fixup_link_config(pi);
- if (pi->up_vis > 0)
- rc = apply_link_config(pi);
- set_current_media(pi);
+ if (!hw_off_limits(sc)) {
+ fixup_link_config(pi);
+ if (pi->up_vis > 0)
+ rc = apply_link_config(pi);
+ set_current_media(pi);
+ }
done:
PORT_UNLOCK(pi);
end_synchronized_op(sc, 0);
@@ -7887,12 +8511,20 @@ static int
sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
- int reg = arg2;
+ int rc, reg = arg2;
uint64_t val;
- val = t4_read_reg64(sc, reg);
-
- return (sysctl_handle_64(oidp, &val, 0, req));
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ rc = 0;
+ val = t4_read_reg64(sc, reg);
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc == 0)
+ rc = sysctl_handle_64(oidp, &val, 0, req);
+ return (rc);
}
static int
@@ -7905,10 +8537,14 @@ sysctl_temperature(SYSCTL_HANDLER_ARGS)
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
if (rc)
return (rc);
- param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
- V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
- rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
+ V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
+ }
end_synchronized_op(sc, 0);
if (rc)
return (rc);
@@ -7932,10 +8568,15 @@ sysctl_vdd(SYSCTL_HANDLER_ARGS)
"t4vdd");
if (rc)
return (rc);
- param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
- V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD);
- rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
+ V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD);
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1,
+ &param, &val);
+ }
end_synchronized_op(sc, 0);
if (rc)
return (rc);
@@ -7964,11 +8605,15 @@ sysctl_reset_sensor(SYSCTL_HANDLER_ARGS)
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4srst");
if (rc)
return (rc);
- param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
- V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_RESET_TMP_SENSOR));
- val = 1;
- rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
+ V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_RESET_TMP_SENSOR));
+ val = 1;
+ rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
+ }
end_synchronized_op(sc, 0);
if (rc == 0)
sc->sensor_resets++;
@@ -7986,9 +8631,13 @@ sysctl_loadavg(SYSCTL_HANDLER_ARGS)
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg");
if (rc)
return (rc);
- param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD);
- rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD);
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
+ }
end_synchronized_op(sc, 0);
if (rc)
return (rc);
@@ -8034,7 +8683,14 @@ sysctl_cctrl(SYSCTL_HANDLER_ARGS)
if (sb == NULL)
return (ENOMEM);
- t4_read_cong_tbl(sc, incr);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ t4_read_cong_tbl(sc, incr);
+ mtx_unlock(&sc->reg_lock);
+ if (rc)
+ goto done;
for (i = 0; i < NCCTRL_WIN; ++i) {
sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
@@ -8047,8 +8703,8 @@ sysctl_cctrl(SYSCTL_HANDLER_ARGS)
}
rc = sbuf_finish(sb);
+done:
sbuf_delete(sb);
-
return (rc);
}
@@ -8076,14 +8732,24 @@ sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
qtype = "IBQ";
n = 4 * CIM_IBQ_SIZE;
buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
- rc = t4_read_cim_ibq(sc, qid, buf, n);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = -ENXIO;
+ else
+ rc = t4_read_cim_ibq(sc, qid, buf, n);
+ mtx_unlock(&sc->reg_lock);
} else {
/* outbound queue */
qtype = "OBQ";
qid -= CIM_NUM_IBQ;
n = 4 * cim_num_obq * CIM_OBQ_SIZE;
buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
- rc = t4_read_cim_obq(sc, qid, buf, n);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = -ENXIO;
+ else
+ rc = t4_read_cim_obq(sc, qid, buf, n);
+ mtx_unlock(&sc->reg_lock);
}
if (rc < 0) {
@@ -8182,25 +8848,27 @@ sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags)
uint32_t cfg, *buf;
int rc;
- rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
- if (rc != 0)
- return (rc);
-
MPASS(flags == M_WAITOK || flags == M_NOWAIT);
buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
M_ZERO | flags);
if (buf == NULL)
return (ENOMEM);
- rc = -t4_cim_read_la(sc, buf, NULL);
- if (rc != 0)
- goto done;
- if (chip_id(sc) < CHELSIO_T6)
- sbuf_cim_la4(sc, sb, buf, cfg);
- else
- sbuf_cim_la6(sc, sb, buf, cfg);
-
-done:
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
+ if (rc == 0)
+ rc = -t4_cim_read_la(sc, buf, NULL);
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc == 0) {
+ if (chip_id(sc) < CHELSIO_T6)
+ sbuf_cim_la4(sc, sb, buf, cfg);
+ else
+ sbuf_cim_la6(sc, sb, buf, cfg);
+ }
free(buf, M_CXGBE);
return (rc);
}
@@ -8266,9 +8934,16 @@ sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
M_ZERO | M_WAITOK);
- t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
- p = buf;
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
+ mtx_unlock(&sc->reg_lock);
+ if (rc)
+ goto done;
+ p = buf;
for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
p[1], p[0]);
@@ -8283,8 +8958,8 @@ sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
(p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
p[0] & 1);
}
-
rc = sbuf_finish(sb);
+done:
sbuf_delete(sb);
free(buf, M_CXGBE);
return (rc);
@@ -8310,9 +8985,16 @@ sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
M_ZERO | M_WAITOK);
- t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
- p = buf;
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
+ mtx_unlock(&sc->reg_lock);
+ if (rc)
+ goto done;
+ p = buf;
sbuf_printf(sb, "Cntl ID DataBE Addr Data");
for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x",
@@ -8327,6 +9009,7 @@ sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
}
rc = sbuf_finish(sb);
+done:
sbuf_delete(sb);
free(buf, M_CXGBE);
return (rc);
@@ -8355,14 +9038,22 @@ sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
}
nq = CIM_NUM_IBQ + cim_num_obq;
- rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
- if (rc == 0)
- rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
- if (rc != 0)
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
+ if (rc == 0) {
+ rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq,
+ obq_wr);
+ if (rc == 0)
+ t4_read_cimq_cfg(sc, base, size, thres);
+ }
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc)
return (rc);
- t4_read_cimq_cfg(sc, base, size, thres);
-
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
return (rc);
@@ -8408,8 +9099,13 @@ sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
return (ENOMEM);
mtx_lock(&sc->reg_lock);
- t4_tp_get_cpl_stats(sc, &stats, 0);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ t4_tp_get_cpl_stats(sc, &stats, 0);
mtx_unlock(&sc->reg_lock);
+ if (rc)
+ goto done;
if (sc->chip_params->nchan > 2) {
sbuf_printf(sb, " channel 0 channel 1"
@@ -8427,8 +9123,8 @@ sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
}
rc = sbuf_finish(sb);
+done:
sbuf_delete(sb);
-
return (rc);
}
@@ -8449,14 +9145,17 @@ sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
return (ENOMEM);
mtx_lock(&sc->reg_lock);
- t4_get_usm_stats(sc, &stats, 1);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ t4_get_usm_stats(sc, &stats, 1);
mtx_unlock(&sc->reg_lock);
-
- sbuf_printf(sb, "Frames: %u\n", stats.frames);
- sbuf_printf(sb, "Octets: %ju\n", stats.octets);
- sbuf_printf(sb, "Drops: %u", stats.drops);
-
- rc = sbuf_finish(sb);
+ if (rc == 0) {
+ sbuf_printf(sb, "Frames: %u\n", stats.frames);
+ sbuf_printf(sb, "Octets: %ju\n", stats.octets);
+ sbuf_printf(sb, "Drops: %u", stats.drops);
+ rc = sbuf_finish(sb);
+ }
sbuf_delete(sb);
return (rc);
@@ -8479,15 +9178,18 @@ sysctl_tid_stats(SYSCTL_HANDLER_ARGS)
return (ENOMEM);
mtx_lock(&sc->reg_lock);
- t4_tp_get_tid_stats(sc, &stats, 1);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ t4_tp_get_tid_stats(sc, &stats, 1);
mtx_unlock(&sc->reg_lock);
-
- sbuf_printf(sb, "Delete: %u\n", stats.del);
- sbuf_printf(sb, "Invalidate: %u\n", stats.inv);
- sbuf_printf(sb, "Active: %u\n", stats.act);
- sbuf_printf(sb, "Passive: %u", stats.pas);
-
- rc = sbuf_finish(sb);
+ if (rc == 0) {
+ sbuf_printf(sb, "Delete: %u\n", stats.del);
+ sbuf_printf(sb, "Invalidate: %u\n", stats.inv);
+ sbuf_printf(sb, "Active: %u\n", stats.act);
+ sbuf_printf(sb, "Passive: %u", stats.pas);
+ rc = sbuf_finish(sb);
+ }
sbuf_delete(sb);
return (rc);
@@ -8546,7 +9248,13 @@ sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags)
if (buf == NULL)
return (ENOMEM);
- rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf,
+ dparams->size);
+ mtx_unlock(&sc->reg_lock);
if (rc != 0)
goto done;
@@ -8651,15 +9359,21 @@ sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
if (rc != 0)
return (rc);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ for (i = 0; i < nchan; i++)
+ t4_get_fcoe_stats(sc, i, &stats[i], 1);
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ return (rc);
+
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
- mtx_lock(&sc->reg_lock);
- for (i = 0; i < nchan; i++)
- t4_get_fcoe_stats(sc, i, &stats[i], 1);
- mtx_unlock(&sc->reg_lock);
-
if (nchan > 2) {
sbuf_printf(sb, " channel 0 channel 1"
" channel 2 channel 3");
@@ -8701,10 +9415,16 @@ sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
if (rc != 0)
return (rc);
- sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
+ sb = sbuf_new_for_sysctl(NULL, NULL, 512, req);
if (sb == NULL)
return (ENOMEM);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc)) {
+ rc = ENXIO;
+ goto done;
+ }
+
map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
t4_read_pace_tbl(sc, pace_tab);
@@ -8731,10 +9451,10 @@ sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
else
sbuf_printf(sb, " disabled");
}
-
rc = sbuf_finish(sb);
+done:
+ mtx_unlock(&sc->reg_lock);
sbuf_delete(sb);
-
return (rc);
}
@@ -8767,8 +9487,16 @@ sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
memset(s, 0, sizeof(s));
for (i = 0; i < sc->chip_params->nchan; i += 2) {
- t4_get_lb_stats(sc, i, &s[0]);
- t4_get_lb_stats(sc, i + 1, &s[1]);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ t4_get_lb_stats(sc, i, &s[0]);
+ t4_get_lb_stats(sc, i + 1, &s[1]);
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ break;
p0 = &s[0].octets;
p1 = &s[1].octets;
@@ -8876,6 +9604,12 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
mem[i].idx = i;
}
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc)) {
+ rc = ENXIO;
+ goto done;
+ }
+
/* Find and sort the populated memory ranges */
i = 0;
lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
@@ -8910,7 +9644,7 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
i++;
}
if (!i) /* no memory available */
- return 0;
+ goto done;
qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
@@ -9085,10 +9819,11 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
"\nLoopback %d using %u pages out of %u allocated",
i, used, alloc);
}
-
- rc = sbuf_finish(sb);
+done:
+ mtx_unlock(&sc->reg_lock);
+ if (rc == 0)
+ rc = sbuf_finish(sb);
sbuf_delete(sb);
-
return (rc);
}
@@ -9125,13 +9860,29 @@ sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
uint32_t cls_lo, cls_hi;
uint8_t addr[ETHER_ADDR_LEN];
- tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
- tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
+ tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ break;
if (tcamx & tcamy)
continue;
tcamxy2valmask(tcamx, tcamy, addr, &mask);
- cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
- cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
+ cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ break;
sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
" %c %#x%4u%4d", i, addr[0], addr[1], addr[2],
addr[3], addr[4], addr[5], (uintmax_t)mask,
@@ -9156,14 +9907,15 @@ sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
"t4mps");
if (rc)
break;
- rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
- sizeof(ldst_cmd), &ldst_cmd);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
+ sizeof(ldst_cmd), &ldst_cmd);
end_synchronized_op(sc, 0);
-
- if (rc != 0) {
- sbuf_printf(sb, "%36d", rc);
- rc = 0;
- } else {
+ if (rc != 0)
+ break;
+ else {
sbuf_printf(sb, " %08x %08x %08x %08x",
be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
@@ -9221,11 +9973,20 @@ sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
else
ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1);
- t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
- val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
- tcamy = G_DMACH(val) << 32;
- tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
- data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
+ val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
+ tcamy = G_DMACH(val) << 32;
+ tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
+ data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ break;
+
lookup_type = G_DATALKPTYPE(data2);
port_num = G_DATAPORTNUM(data2);
if (lookup_type && lookup_type != M_DATALKPTYPE) {
@@ -9242,11 +10003,20 @@ sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
}
ctl |= V_CTLXYBITSEL(1);
- t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
- val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
- tcamx = G_DMACH(val) << 32;
- tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
- data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
+ val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
+ tcamx = G_DMACH(val) << 32;
+ tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
+ data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ break;
+
if (lookup_type && lookup_type != M_DATALKPTYPE) {
/* Inner header VNI mask */
vnix = ((data2 & F_DATAVIDH2) << 23) |
@@ -9258,8 +10028,16 @@ sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
continue;
tcamxy2valmask(tcamx, tcamy, addr, &mask);
- cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
- cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
+ cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ break;
if (lookup_type && lookup_type != M_DATALKPTYPE) {
sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
@@ -9306,14 +10084,15 @@ sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
"t6mps");
if (rc)
break;
- rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
- sizeof(ldst_cmd), &ldst_cmd);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
+ sizeof(ldst_cmd), &ldst_cmd);
end_synchronized_op(sc, 0);
-
- if (rc != 0) {
- sbuf_printf(sb, "%72d", rc);
- rc = 0;
- } else {
+ if (rc != 0)
+ break;
+ else {
sbuf_printf(sb, " %08x %08x %08x %08x"
" %08x %08x %08x %08x",
be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
@@ -9355,12 +10134,19 @@ sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
if (rc != 0)
return (rc);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ t4_read_mtu_tbl(sc, mtus, NULL);
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ return (rc);
+
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
- t4_read_mtu_tbl(sc, mtus, NULL);
-
sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
@@ -9393,13 +10179,21 @@ sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
if (rc != 0)
return (rc);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
+ t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ return (rc);
+
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
- t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
- t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
-
sbuf_printf(sb, " Tx pcmds Tx bytes");
for (i = 0; i < 4; i++) {
sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
@@ -9449,14 +10243,19 @@ sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
if (rc != 0)
return (rc);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ t4_tp_get_rdma_stats(sc, &stats, 0);
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ return (rc);
+
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
- mtx_lock(&sc->reg_lock);
- t4_tp_get_rdma_stats(sc, &stats, 0);
- mtx_unlock(&sc->reg_lock);
-
sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
@@ -9478,14 +10277,19 @@ sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
if (rc != 0)
return (rc);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ t4_tp_get_tcp_stats(sc, &v4, &v6, 0);
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ return (rc);
+
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
- mtx_lock(&sc->reg_lock);
- t4_tp_get_tcp_stats(sc, &v4, &v6, 0);
- mtx_unlock(&sc->reg_lock);
-
sbuf_printf(sb,
" IP IPv6\n");
sbuf_printf(sb, "OutRsts: %20u %20u\n",
@@ -9509,6 +10313,7 @@ sysctl_tids(SYSCTL_HANDLER_ARGS)
struct adapter *sc = arg1;
struct sbuf *sb;
int rc;
+ uint32_t x, y;
struct tid_info *t = &sc->tids;
rc = sysctl_wire_old_buffer(req, 0);
@@ -9530,21 +10335,30 @@ sysctl_tids(SYSCTL_HANDLER_ARGS)
}
if (t->ntids) {
- sbuf_printf(sb, "TID range: ");
- if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
- uint32_t b, hb;
+ bool hashen = false;
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
+ hashen = true;
if (chip_id(sc) <= CHELSIO_T5) {
- b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
- hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
+ x = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
+ y = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
} else {
- b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX);
- hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE);
+ x = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX);
+ y = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE);
}
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ goto done;
- if (b)
- sbuf_printf(sb, "%u-%u, ", t->tid_base, b - 1);
- sbuf_printf(sb, "%u-%u", hb, t->ntids - 1);
+ sbuf_printf(sb, "TID range: ");
+ if (hashen) {
+ if (x)
+ sbuf_printf(sb, "%u-%u, ", t->tid_base, x - 1);
+ sbuf_printf(sb, "%u-%u", y, t->ntids - 1);
} else {
sbuf_printf(sb, "%u-%u", t->tid_base, t->tid_base +
t->ntids - 1);
@@ -9568,11 +10382,22 @@ sysctl_tids(SYSCTL_HANDLER_ARGS)
t->etid_base + t->netids - 1, t->etids_in_use);
}
- sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
- t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
- t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
-
- rc = sbuf_finish(sb);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ x = t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4);
+ y = t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6);
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ goto done;
+ sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", x, y);
+done:
+ if (rc == 0)
+ rc = sbuf_finish(sb);
+ else
+ (void)sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
@@ -9590,14 +10415,19 @@ sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
if (rc != 0)
return (rc);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ t4_tp_get_err_stats(sc, &stats, 0);
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ return (rc);
+
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
- mtx_lock(&sc->reg_lock);
- t4_tp_get_err_stats(sc, &stats, 0);
- mtx_unlock(&sc->reg_lock);
-
if (sc->chip_params->nchan > 2) {
sbuf_printf(sb, " channel 0 channel 1"
" channel 2 channel 3\n");
@@ -9666,14 +10496,19 @@ sysctl_tnl_stats(SYSCTL_HANDLER_ARGS)
if (rc != 0)
return(rc);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ t4_tp_get_tnl_stats(sc, &stats, 1);
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ return (rc);
+
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
- mtx_lock(&sc->reg_lock);
- t4_tp_get_tnl_stats(sc, &stats, 1);
- mtx_unlock(&sc->reg_lock);
-
if (sc->chip_params->nchan > 2) {
sbuf_printf(sb, " channel 0 channel 1"
" channel 2 channel 3\n");
@@ -9711,10 +10546,17 @@ sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS)
return (rc);
if (mask > 0xffff)
return (EINVAL);
- tpp->la_mask = mask << 16;
- t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ tpp->la_mask = mask << 16;
+ t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U,
+ tpp->la_mask);
+ }
+ mtx_unlock(&sc->reg_lock);
- return (0);
+ return (rc);
}
struct field_desc {
@@ -9927,27 +10769,34 @@ sysctl_tp_la(SYSCTL_HANDLER_ARGS)
buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
- t4_tp_read_la(sc, buf, NULL);
- p = buf;
-
- switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
- case 2:
- inc = 2;
- show_func = tp_la_show2;
- break;
- case 3:
- inc = 2;
- show_func = tp_la_show3;
- break;
- default:
- inc = 1;
- show_func = tp_la_show;
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ t4_tp_read_la(sc, buf, NULL);
+ switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
+ case 2:
+ inc = 2;
+ show_func = tp_la_show2;
+ break;
+ case 3:
+ inc = 2;
+ show_func = tp_la_show3;
+ break;
+ default:
+ inc = 1;
+ show_func = tp_la_show;
+ }
}
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ goto done;
+ p = buf;
for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
(*show_func)(sb, p, i);
-
rc = sbuf_finish(sb);
+done:
sbuf_delete(sb);
free(buf, M_CXGBE);
return (rc);
@@ -9965,12 +10814,19 @@ sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
if (rc != 0)
return (rc);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ t4_get_chan_txrate(sc, nrate, orate);
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ return (rc);
+
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
- t4_get_chan_txrate(sc, nrate, orate);
-
if (sc->chip_params->nchan > 2) {
sbuf_printf(sb, " channel 0 channel 1"
" channel 2 channel 3\n");
@@ -10011,17 +10867,24 @@ sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
M_ZERO | M_WAITOK);
- t4_ulprx_read_la(sc, buf);
- p = buf;
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ t4_ulprx_read_la(sc, buf);
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ goto done;
+ p = buf;
sbuf_printf(sb, " Pcmd Type Message"
" Data");
for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x",
p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
}
-
rc = sbuf_finish(sb);
+done:
sbuf_delete(sb);
free(buf, M_CXGBE);
return (rc);
@@ -10032,7 +10895,8 @@ sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
- int rc, v;
+ int rc;
+ uint32_t cfg, s1, s2;
MPASS(chip_id(sc) >= CHELSIO_T5);
@@ -10040,26 +10904,32 @@ sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
if (rc != 0)
return (rc);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ cfg = t4_read_reg(sc, A_SGE_STAT_CFG);
+ s1 = t4_read_reg(sc, A_SGE_STAT_TOTAL);
+ s2 = t4_read_reg(sc, A_SGE_STAT_MATCH);
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ return (rc);
+
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
- v = t4_read_reg(sc, A_SGE_STAT_CFG);
- if (G_STATSOURCE_T5(v) == 7) {
+ if (G_STATSOURCE_T5(cfg) == 7) {
int mode;
- mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v);
- if (mode == 0) {
- sbuf_printf(sb, "total %d, incomplete %d",
- t4_read_reg(sc, A_SGE_STAT_TOTAL),
- t4_read_reg(sc, A_SGE_STAT_MATCH));
- } else if (mode == 1) {
- sbuf_printf(sb, "total %d, data overflow %d",
- t4_read_reg(sc, A_SGE_STAT_TOTAL),
- t4_read_reg(sc, A_SGE_STAT_MATCH));
- } else {
+ mode = is_t5(sc) ? G_STATMODE(cfg) : G_T6_STATMODE(cfg);
+ if (mode == 0)
+ sbuf_printf(sb, "total %d, incomplete %d", s1, s2);
+ else if (mode == 1)
+ sbuf_printf(sb, "total %d, data overflow %d", s1, s2);
+ else
sbuf_printf(sb, "unknown mode %d", mode);
- }
}
rc = sbuf_finish(sb);
sbuf_delete(sb);
@@ -10099,6 +10969,34 @@ sysctl_cpus(SYSCTL_HANDLER_ARGS)
return (rc);
}
+static int
+sysctl_reset(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *sc = arg1;
+ u_int val;
+ int rc;
+
+ val = sc->num_resets;
+ rc = sysctl_handle_int(oidp, &val, 0, req);
+ if (rc != 0 || req->newptr == NULL)
+ return (rc);
+
+ if (val == 0) {
+ /* Zero out the counter that tracks reset. */
+ sc->num_resets = 0;
+ return (0);
+ }
+
+ if (val != 1)
+ return (EINVAL); /* 0 or 1 are the only legal values */
+
+ if (hw_off_limits(sc)) /* harmless race */
+ return (EALREADY);
+
+ taskqueue_enqueue(reset_tq, &sc->reset_task);
+ return (0);
+}
+
#ifdef TCP_OFFLOAD
static int
sysctl_tls(SYSCTL_HANDLER_ARGS)
@@ -10118,16 +11016,20 @@ sysctl_tls(SYSCTL_HANDLER_ARGS)
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4stls");
if (rc)
return (rc);
- sc->tt.tls = !!v;
- for_each_port(sc, i) {
- for_each_vi(sc->port[i], j, vi) {
- if (vi->flags & VI_INIT_DONE)
- t4_update_fl_bufsize(vi->ifp);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ sc->tt.tls = !!v;
+ for_each_port(sc, i) {
+ for_each_vi(sc->port[i], j, vi) {
+ if (vi->flags & VI_INIT_DONE)
+ t4_update_fl_bufsize(vi->ifp);
+ }
}
}
end_synchronized_op(sc, 0);
- return (0);
+ return (rc);
}
@@ -10146,6 +11048,11 @@ sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS)
if (rc)
return (rc);
+ if (hw_off_limits(sc)) {
+ rc = ENXIO;
+ goto done;
+ }
+
if (sc->tt.num_tls_rx_ports == 0) {
i = -1;
rc = SYSCTL_OUT(req, &i, sizeof(i));
@@ -10189,6 +11096,7 @@ sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS)
err:
free(new_ports, M_CXGBE);
}
+done:
end_synchronized_op(sc, 0);
return (rc);
}
@@ -10238,7 +11146,15 @@ sysctl_tp_tick(SYSCTL_HANDLER_ARGS)
u_int res, re;
u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
- res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ res = (u_int)-1;
+ else
+ res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
+ mtx_unlock(&sc->reg_lock);
+ if (res == (u_int)-1)
+ return (ENXIO);
+
switch (arg2) {
case 0:
/* timer_tick */
@@ -10265,12 +11181,24 @@ static int
sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
- u_int res, dack_re, v;
+ int rc;
+ u_int dack_tmr, dack_re, v;
u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
- res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
- dack_re = G_DELAYEDACKRESOLUTION(res);
- v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ rc = 0;
+ dack_re = G_DELAYEDACKRESOLUTION(t4_read_reg(sc,
+ A_TP_TIMER_RESOLUTION));
+ dack_tmr = t4_read_reg(sc, A_TP_DACK_TIMER);
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ return (rc);
+
+ v = ((cclk_ps << dack_re) / 1000000) * dack_tmr;
return (sysctl_handle_int(oidp, &v, 0, req));
}
@@ -10279,7 +11207,7 @@ static int
sysctl_tp_timer(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
- int reg = arg2;
+ int rc, reg = arg2;
u_int tre;
u_long tp_tick_us, v;
u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
@@ -10289,15 +11217,23 @@ sysctl_tp_timer(SYSCTL_HANDLER_ARGS)
reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL ||
reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER);
- tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION));
- tp_tick_us = (cclk_ps << tre) / 1000000;
-
- if (reg == A_TP_INIT_SRTT)
- v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg));
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ rc = 0;
+ tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION));
+ tp_tick_us = (cclk_ps << tre) / 1000000;
+ if (reg == A_TP_INIT_SRTT)
+ v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg));
+ else
+ v = tp_tick_us * t4_read_reg(sc, reg);
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ return (rc);
else
- v = tp_tick_us * t4_read_reg(sc, reg);
-
- return (sysctl_handle_long(oidp, &v, 0, req));
+ return (sysctl_handle_long(oidp, &v, 0, req));
}
/*
@@ -10308,30 +11244,48 @@ static int
sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
- int idx = arg2;
+ int rc, idx = arg2;
u_int v;
MPASS(idx >= 0 && idx <= 24);
- v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf;
-
- return (sysctl_handle_int(oidp, &v, 0, req));
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ rc = 0;
+ v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf;
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ return (rc);
+ else
+ return (sysctl_handle_int(oidp, &v, 0, req));
}
static int
sysctl_tp_backoff(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
- int idx = arg2;
+ int rc, idx = arg2;
u_int shift, v, r;
MPASS(idx >= 0 && idx < 16);
r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3);
shift = (idx & 3) << 3;
- v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0;
-
- return (sysctl_handle_int(oidp, &v, 0, req));
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ rc = 0;
+ v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0;
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ return (rc);
+ else
+ return (sysctl_handle_int(oidp, &v, 0, req));
}
static int
@@ -10418,6 +11372,11 @@ get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
if (rc)
return (rc);
+ if (hw_off_limits(sc)) {
+ rc = ENXIO;
+ goto done;
+ }
+
if (sc->flags & FW_OK) {
rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
&cntxt->data[0]);
@@ -10445,6 +11404,11 @@ load_fw(struct adapter *sc, struct t4_data *fw)
if (rc)
return (rc);
+ if (hw_off_limits(sc)) {
+ rc = ENXIO;
+ goto done;
+ }
+
/*
* The firmware, with the sole exception of the memory parity error
* handler, runs from memory and not flash. It is almost always safe to
@@ -10479,6 +11443,11 @@ load_cfg(struct adapter *sc, struct t4_data *cfg)
if (rc)
return (rc);
+ if (hw_off_limits(sc)) {
+ rc = ENXIO;
+ goto done;
+ }
+
if (cfg->len == 0) {
/* clear */
rc = -t4_load_cfg(sc, NULL, 0);
@@ -10524,6 +11493,11 @@ load_boot(struct adapter *sc, struct t4_bootrom *br)
if (rc)
return (rc);
+ if (hw_off_limits(sc)) {
+ rc = ENXIO;
+ goto done;
+ }
+
if (br->len == 0) {
/* clear */
rc = -t4_load_boot(sc, NULL, offset, 0);
@@ -10552,6 +11526,11 @@ load_bootcfg(struct adapter *sc, struct t4_data *bc)
if (rc)
return (rc);
+ if (hw_off_limits(sc)) {
+ rc = ENXIO;
+ goto done;
+ }
+
if (bc->len == 0) {
/* clear */
rc = -t4_load_bootcfg(sc, NULL, 0);
@@ -10729,7 +11708,12 @@ read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
int rc;
uint8_t *dst;
- rc = validate_mem_range(sc, mr->addr, mr->len);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ rc = validate_mem_range(sc, mr->addr, mr->len);
+ mtx_unlock(&sc->reg_lock);
if (rc != 0)
return (rc);
@@ -10740,7 +11724,14 @@ read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
while (remaining) {
n = min(remaining, MAX_READ_BUF_SIZE);
- read_via_memwin(sc, 2, addr, buf, n);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ read_via_memwin(sc, 2, addr, buf, n);
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ break;
rc = copyout(buf, dst, n);
if (rc != 0)
@@ -10770,8 +11761,11 @@ read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
if (rc)
return (rc);
- rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
- i2cd->offset, i2cd->len, &i2cd->data[0]);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
+ i2cd->offset, i2cd->len, &i2cd->data[0]);
end_synchronized_op(sc, 0);
return (rc);
@@ -10799,30 +11793,32 @@ clear_stats(struct adapter *sc, u_int port_id)
if (pi == NULL)
return (EIO);
- /* MAC stats */
- t4_clr_port_stats(sc, pi->tx_chan);
- if (is_t6(sc)) {
- if (pi->fcs_reg != -1)
- pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
- else
- pi->stats.rx_fcs_err = 0;
- }
- pi->tx_parse_error = 0;
- pi->tnl_cong_drops = 0;
mtx_lock(&sc->reg_lock);
- for_each_vi(pi, v, vi) {
- if (vi->flags & VI_INIT_DONE)
- t4_clr_vi_stats(sc, vi->vin);
- }
- chan_map = pi->rx_e_chan_map;
- v = 0; /* reuse */
- while (chan_map) {
- i = ffs(chan_map) - 1;
- t4_write_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
- 1, A_TP_MIB_TNL_CNG_DROP_0 + i);
- chan_map &= ~(1 << i);
+ if (!hw_off_limits(sc)) {
+ /* MAC stats */
+ t4_clr_port_stats(sc, pi->tx_chan);
+ if (is_t6(sc)) {
+ if (pi->fcs_reg != -1)
+ pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
+ else
+ pi->stats.rx_fcs_err = 0;
+ }
+ for_each_vi(pi, v, vi) {
+ if (vi->flags & VI_INIT_DONE)
+ t4_clr_vi_stats(sc, vi->vin);
+ }
+ chan_map = pi->rx_e_chan_map;
+ v = 0; /* reuse */
+ while (chan_map) {
+ i = ffs(chan_map) - 1;
+ t4_write_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
+ 1, A_TP_MIB_TNL_CNG_DROP_0 + i);
+ chan_map &= ~(1 << i);
+ }
}
mtx_unlock(&sc->reg_lock);
+ pi->tx_parse_error = 0;
+ pi->tnl_cong_drops = 0;
/*
* Since this command accepts a port, clear stats for
@@ -11060,12 +12056,16 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
return (EFAULT);
- if (edata->size == 4)
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else if (edata->size == 4)
edata->val = t4_read_reg(sc, edata->addr);
else if (edata->size == 8)
edata->val = t4_read_reg64(sc, edata->addr);
else
- return (EINVAL);
+ rc = EINVAL;
+ mtx_unlock(&sc->reg_lock);
break;
}
@@ -11075,14 +12075,19 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
return (EFAULT);
- if (edata->size == 4) {
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else if (edata->size == 4) {
if (edata->val & 0xffffffff00000000)
- return (EINVAL);
+ rc = EINVAL;
t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
} else if (edata->size == 8)
t4_write_reg64(sc, edata->addr, edata->val);
else
- return (EINVAL);
+ rc = EINVAL;
+ mtx_unlock(&sc->reg_lock);
+
break;
}
case CHELSIO_T4_REGDUMP: {
@@ -11097,8 +12102,14 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
regs->len = reglen;
buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
- get_regs(sc, regs, buf);
- rc = copyout(buf, regs->data, reglen);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ get_regs(sc, regs, buf);
+ mtx_unlock(&sc->reg_lock);
+ if (rc == 0)
+ rc = copyout(buf, regs->data, reglen);
free(buf, M_CXGBE);
break;
}
@@ -11181,6 +12192,8 @@ toe_capability(struct vi_info *vi, bool enable)
if (!is_offload(sc))
return (ENODEV);
+ if (hw_off_limits(sc))
+ return (ENXIO);
if (enable) {
#ifdef KERN_TLS
@@ -11427,6 +12440,8 @@ ktls_capability(struct adapter *sc, bool enable)
if (!is_ktls(sc))
return (ENODEV);
+ if (hw_off_limits(sc))
+ return (ENXIO);
if (enable) {
if (sc->flags & KERN_TLS_ON)
@@ -11809,7 +12824,8 @@ t4_vxlan_start(struct adapter *sc, void *arg)
if (sc->vxlan_refcount == 0) {
sc->vxlan_port = v->port;
sc->vxlan_refcount = 1;
- enable_vxlan_rx(sc);
+ if (!hw_off_limits(sc))
+ enable_vxlan_rx(sc);
} else if (sc->vxlan_port == v->port) {
sc->vxlan_refcount++;
} else {
@@ -11838,13 +12854,10 @@ t4_vxlan_stop(struct adapter *sc, void *arg)
if (sc->vxlan_port != v->port)
goto done;
if (sc->vxlan_refcount == 0) {
- log(LOG_ERR,
- "%s: VXLAN operation on port %d was stopped earlier; "
- "ignoring attempt to stop it again.\n",
- device_get_nameunit(sc->dev), sc->vxlan_port);
- } else if (--sc->vxlan_refcount == 0) {
+ CH_ERR(sc, "VXLAN operation on port %d was stopped earlier; "
+ "ignoring attempt to stop it again.\n", sc->vxlan_port);
+ } else if (--sc->vxlan_refcount == 0 && !hw_off_limits(sc))
t4_set_reg_field(sc, A_MPS_RX_VXLAN_TYPE, F_VXLAN_EN, 0);
- }
done:
end_synchronized_op(sc, 0);
}
@@ -11927,6 +12940,10 @@ mod_event(module_t mod, int cmd, void *arg)
EVENTHANDLER_REGISTER(vxlan_stop,
t4_vxlan_stop_handler, NULL,
EVENTHANDLER_PRI_ANY);
+ reset_tq = taskqueue_create("t4_rst_tq", M_WAITOK,
+ taskqueue_thread_enqueue, &reset_tq);
+ taskqueue_start_threads(&reset_tq, 1, PI_SOFT,
+ "t4_rst_thr");
}
sx_xunlock(&mlu);
break;
@@ -11936,6 +12953,7 @@ mod_event(module_t mod, int cmd, void *arg)
if (--loaded == 0) {
int tries;
+ taskqueue_free(reset_tq);
sx_slock(&t4_list_lock);
if (!SLIST_EMPTY(&t4_list)) {
rc = EBUSY;
diff --git a/sys/dev/cxgbe/t4_sched.c b/sys/dev/cxgbe/t4_sched.c
index 0cb44e1d4ffb..b320ff4a7c68 100644
--- a/sys/dev/cxgbe/t4_sched.c
+++ b/sys/dev/cxgbe/t4_sched.c
@@ -63,7 +63,10 @@ set_sched_class_config(struct adapter *sc, int minmax)
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sscc");
if (rc)
return (rc);
- rc = -t4_sched_config(sc, FW_SCHED_TYPE_PKTSCHED, minmax, 1);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ rc = -t4_sched_config(sc, FW_SCHED_TYPE_PKTSCHED, minmax, 1);
end_synchronized_op(sc, 0);
return (rc);
@@ -209,9 +212,11 @@ set_sched_class_params(struct adapter *sc, struct t4_sched_class_params *p,
}
return (rc);
}
- rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED, fw_level, fw_mode,
- fw_rateunit, fw_ratemode, p->channel, p->cl, p->minrate, p->maxrate,
- p->weight, p->pktsize, 0, sleep_ok);
+ if (!hw_off_limits(sc)) {
+ rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED, fw_level,
+ fw_mode, fw_rateunit, fw_ratemode, p->channel, p->cl,
+ p->minrate, p->maxrate, p->weight, p->pktsize, 0, sleep_ok);
+ }
end_synchronized_op(sc, sleep_ok ? 0 : LOCK_HELD);
if (p->level == SCHED_CLASS_LEVEL_CL_RL) {
diff --git a/sys/dev/cxgbe/t4_tracer.c b/sys/dev/cxgbe/t4_tracer.c
index 035c7b029d8f..f6c85b15c75a 100644
--- a/sys/dev/cxgbe/t4_tracer.c
+++ b/sys/dev/cxgbe/t4_tracer.c
@@ -289,6 +289,11 @@ t4_get_tracer(struct adapter *sc, struct t4_tracer *t)
if (rc)
return (rc);
+ if (hw_off_limits(sc)) {
+ rc = ENXIO;
+ goto done;
+ }
+
for (i = t->idx; i < NTRACE; i++) {
if (isset(&sc->tracer_valid, t->idx)) {
t4_get_trace_filter(sc, &tp, i, &enabled);
@@ -338,6 +343,11 @@ t4_set_tracer(struct adapter *sc, struct t4_tracer *t)
if (rc)
return (rc);
+ if (hw_off_limits(sc)) {
+ rc = ENXIO;
+ goto done;
+ }
+
/*
* If no tracing filter is specified this time then check if the filter
* at the index is valid anyway because it was set previously. If so