aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/gem/if_gem.c
diff options
context:
space:
mode:
authorMarius Strobl <marius@FreeBSD.org>2009-06-23 20:36:59 +0000
committerMarius Strobl <marius@FreeBSD.org>2009-06-23 20:36:59 +0000
commit9ba2b298dfda78b6669281507b314a4c441cd706 (patch)
treef992451f2d24f7251db039adf592e8c394536d48 /sys/dev/gem/if_gem.c
parentb97457e2e6d5d16a0fef1789509f3f246f6a3b98 (diff)
downloadsrc-9ba2b298dfda78b6669281507b314a4c441cd706.tar.gz
src-9ba2b298dfda78b6669281507b314a4c441cd706.zip
- Initialize the ifnet structure, especially if_dname, before probing
the PHYs as some PHY drivers use it (but probably shouldn't). How gem(4) has worked with brgphy(4) on powerpc without this so far is unclear to me. - Introduce a dying flag which is set during detach and checked in gem_ioctl() in order to prevent active BPF listeners to clear promiscuous mode which may lead to the tick callout being restarted which will trigger a panic once it's actually gone. - In gem_stop() reset rather than just disable the transmitter and receiver in order to ensure we're not unloading DMA maps still in use by the hardware. [1] - The blanking time is specified in PCI clocks so we should use twice the value when operating at 66MHz. - Spell some 2 as ETHER_ALIGN and a 19 as GEM_STATUS_TX_COMPLETION_SHFT to make the actual intentions clear. - As we don't unload the peak attempts counter ignore its overflow interrupts. - Remove a stale setting of a variable to GEM_TD_INTERRUPT_ME which isn't used afterwards. - For optimum performance increment the TX kick register in multiples of 4 if possible as suggested by the documentation. - Partially revert r164931; drivers should only clear the watchdog timer if all outstanding TX descriptors are done. - Fix some debugging strings. - Add a missing BUS_DMASYNC_POSTWRITE in gem_rint(). - As the error paths in the interrupt handler are generally unlikely predict them as false. - Add support for the SBus version of the GEM controller. [2] - Add some lock assertions. - Improve some comments. - Fix some more or less cosmetic issues in the code of the PCI front-end. - Change some softc members to be unsigned where more appropriate and remove unused ones. Approved by: re (kib) Obtained from: NetBSD (partially) [2], OpenBSD [1] MFC after: 2 weeks
Notes
Notes: svn path=/head/; revision=194763
Diffstat (limited to 'sys/dev/gem/if_gem.c')
-rw-r--r--sys/dev/gem/if_gem.c237
1 files changed, 148 insertions, 89 deletions
diff --git a/sys/dev/gem/if_gem.c b/sys/dev/gem/if_gem.c
index da8c1e73bffb..6361519e217a 100644
--- a/sys/dev/gem/if_gem.c
+++ b/sys/dev/gem/if_gem.c
@@ -84,7 +84,7 @@ __FBSDID("$FreeBSD$");
CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192);
CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192);
-#define TRIES 10000
+#define GEM_TRIES 10000
/*
* The hardware supports basic TCP/UDP checksum offloading. However,
@@ -119,7 +119,7 @@ static void gem_rint(struct gem_softc *sc);
#ifdef GEM_RINT_TIMEOUT
static void gem_rint_timeout(void *arg);
#endif
-static __inline void gem_rxcksum(struct mbuf *m, uint64_t flags);
+static inline void gem_rxcksum(struct mbuf *m, uint64_t flags);
static void gem_rxdrain(struct gem_softc *sc);
static void gem_setladrf(struct gem_softc *sc);
static void gem_start(struct ifnet *ifp);
@@ -127,6 +127,7 @@ static void gem_start_locked(struct ifnet *ifp);
static void gem_stop(struct ifnet *ifp, int disable);
static void gem_tick(void *arg);
static void gem_tint(struct gem_softc *sc);
+static inline void gem_txkick(struct gem_softc *sc);
static int gem_watchdog(struct gem_softc *sc);
devclass_t gem_devclass;
@@ -151,9 +152,24 @@ gem_attach(struct gem_softc *sc)
int error, i;
uint32_t v;
+ if (bootverbose)
+ device_printf(sc->sc_dev, "flags=0x%x\n", sc->sc_flags);
+
+ /* Set up ifnet structure. */
ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
if (ifp == NULL)
return (ENOSPC);
+ sc->sc_csum_features = GEM_CSUM_FEATURES;
+ ifp->if_softc = sc;
+ if_initname(ifp, device_get_name(sc->sc_dev),
+ device_get_unit(sc->sc_dev));
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_start = gem_start;
+ ifp->if_ioctl = gem_ioctl;
+ ifp->if_init = gem_init;
+ IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN);
+ ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN;
+ IFQ_SET_READY(&ifp->if_snd);
callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
#ifdef GEM_RINT_TIMEOUT
@@ -161,27 +177,26 @@ gem_attach(struct gem_softc *sc)
#endif
/* Make sure the chip is stopped. */
- ifp->if_softc = sc;
gem_reset(sc);
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,
NULL, &sc->sc_pdmatag);
- if (error)
+ if (error != 0)
goto fail_ifnet;
error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
- if (error)
+ if (error != 0)
goto fail_ptag;
error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES,
BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
- if (error)
+ if (error != 0)
goto fail_rtag;
error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0,
@@ -189,7 +204,7 @@ gem_attach(struct gem_softc *sc)
sizeof(struct gem_control_data), 1,
sizeof(struct gem_control_data), 0,
NULL, NULL, &sc->sc_cdmatag);
- if (error)
+ if (error != 0)
goto fail_ttag;
/*
@@ -199,7 +214,7 @@ gem_attach(struct gem_softc *sc)
if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
(void **)&sc->sc_control_data,
BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
- &sc->sc_cddmamap))) {
+ &sc->sc_cddmamap)) != 0) {
device_printf(sc->sc_dev,
"unable to allocate control data, error = %d\n", error);
goto fail_ctag;
@@ -338,19 +353,6 @@ gem_attach(struct gem_softc *sc)
device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
sc->sc_rxfifosize / 1024, v / 16);
- sc->sc_csum_features = GEM_CSUM_FEATURES;
- /* Initialize ifnet structure. */
- ifp->if_softc = sc;
- if_initname(ifp, device_get_name(sc->sc_dev),
- device_get_unit(sc->sc_dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_start = gem_start;
- ifp->if_ioctl = gem_ioctl;
- ifp->if_init = gem_init;
- IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN);
- ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN;
- IFQ_SET_READY(&ifp->if_snd);
-
/* Attach the interface. */
ether_ifattach(ifp, sc->sc_enaddr);
@@ -402,6 +404,7 @@ gem_detach(struct gem_softc *sc)
int i;
GEM_LOCK(sc);
+ sc->sc_flags |= GEM_DYING;
gem_stop(ifp, 1);
GEM_UNLOCK(sc);
callout_drain(&sc->sc_tick_ch);
@@ -456,7 +459,7 @@ gem_resume(struct gem_softc *sc)
GEM_UNLOCK(sc);
}
-static __inline void
+static inline void
gem_rxcksum(struct mbuf *m, uint64_t flags)
{
struct ether_header *eh;
@@ -535,12 +538,11 @@ static void
gem_tick(void *arg)
{
struct gem_softc *sc = arg;
- struct ifnet *ifp;
+ struct ifnet *ifp = sc->sc_ifp;
uint32_t v;
GEM_LOCK_ASSERT(sc, MA_OWNED);
- ifp = sc->sc_ifp;
/*
* Unload collision and error counters.
*/
@@ -584,7 +586,7 @@ gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r, uint32_t clr,
int i;
uint32_t reg;
- for (i = TRIES; i--; DELAY(100)) {
+ for (i = GEM_TRIES; i--; DELAY(100)) {
reg = GEM_BANKN_READ_M(bank, 4, sc, r);
if ((reg & clr) == 0 && (reg & set) == set)
return (1);
@@ -593,8 +595,7 @@ gem_bitwait(struct gem_softc *sc, u_int bank, bus_addr_t r, uint32_t clr,
}
static void
-gem_reset(sc)
- struct gem_softc *sc;
+gem_reset(struct gem_softc *sc)
{
#ifdef GEM_DEBUG
@@ -644,9 +645,8 @@ gem_stop(struct ifnet *ifp, int disable)
callout_stop(&sc->sc_rx_ch);
#endif
- /* XXX should we reset these instead? */
- gem_disable_tx(sc);
- gem_disable_rx(sc);
+ gem_reset_tx(sc);
+ gem_reset_rx(sc);
/*
* Release any queued transmit buffers.
@@ -721,7 +721,7 @@ gem_reset_rxdma(struct gem_softc *sc)
if (sc->sc_rxsoft[i].rxs_mbuf != NULL)
GEM_UPDATE_RXDESC(sc, i);
sc->sc_rxptr = 0;
- GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+ GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* NOTE: we use only 32-bit DMA addresses here. */
GEM_BANK1_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0);
@@ -732,9 +732,11 @@ gem_reset_rxdma(struct gem_softc *sc)
((ETHER_HDR_LEN + sizeof(struct ip)) <<
GEM_RX_CONFIG_CXM_START_SHFT) |
(GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
- (2 << GEM_RX_CONFIG_FBOFF_SHFT));
+ (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT));
+ /* Adjust for the SBus clock probably isn't worth the fuzz. */
GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING,
- (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6);
+ ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
+ GEM_RX_BLANKING_TIME_SHIFT) | 6);
GEM_BANK1_WRITE_4(sc, GEM_RX_PAUSE_THRESH,
(3 * sc->sc_rxfifosize / 256) |
((sc->sc_rxfifosize / 256) << 12));
@@ -798,12 +800,13 @@ gem_disable_tx(struct gem_softc *sc)
}
static int
-gem_meminit(sc)
- struct gem_softc *sc;
+gem_meminit(struct gem_softc *sc)
{
struct gem_rxsoft *rxs;
int error, i;
+ GEM_LOCK_ASSERT(sc, MA_OWNED);
+
/*
* Initialize the transmit descriptor ring.
*/
@@ -837,7 +840,8 @@ gem_meminit(sc)
GEM_INIT_RXDESC(sc, i);
}
sc->sc_rxptr = 0;
- GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+
+ GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
}
@@ -938,6 +942,20 @@ gem_init_locked(struct gem_softc *sc)
#endif
/* step 8. Global Configuration & Interrupt Mask */
+
+ /*
+ * Set the internal arbitration to "infinite" bursts of the
+ * maximum length of 31 * 64 bytes so DMA transfers aren't
+ * split up in cache line size chunks. This greatly improves
+ * RX performance.
+ * Enable silicon bug workarounds for the Apple variants.
+ */
+ GEM_BANK1_WRITE_4(sc, GEM_CONFIG,
+ GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT |
+ ((sc->sc_flags & GEM_PCI) != 0 ? GEM_CONFIG_BURST_INF :
+ GEM_CONFIG_BURST_64) | (GEM_IS_APPLE(sc) ?
+ GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0));
+
GEM_BANK1_WRITE_4(sc, GEM_INTMASK,
~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE |
GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR |
@@ -949,7 +967,8 @@ gem_init_locked(struct gem_softc *sc)
GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_MASK,
GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_MASK,
- GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP);
+ GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP |
+ GEM_MAC_TX_PEAK_EXP);
#ifdef GEM_DEBUG
GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_MASK,
~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME));
@@ -961,7 +980,8 @@ gem_init_locked(struct gem_softc *sc)
/* step 9. ETX Configuration: use mostly default values. */
/* Enable DMA. */
- v = gem_ringsize(GEM_NTXDESC /* XXX */);
+ v = gem_ringsize(GEM_NTXDESC);
+ /* Set TX FIFO threshold and enable DMA. */
v |= ((sc->sc_variant == GEM_SUN_ERI ? 0x100 : 0x4ff) << 10) &
GEM_TX_CONFIG_TXFIFO_TH;
GEM_BANK1_WRITE_4(sc, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN);
@@ -973,14 +993,16 @@ gem_init_locked(struct gem_softc *sc)
/* RX TCP/UDP checksum offset */
v |= ((ETHER_HDR_LEN + sizeof(struct ip)) <<
GEM_RX_CONFIG_CXM_START_SHFT);
-
- /* Enable DMA. */
+ /* Set RX FIFO threshold, set first byte offset and enable DMA. */
GEM_BANK1_WRITE_4(sc, GEM_RX_CONFIG,
v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
- (2 << GEM_RX_CONFIG_FBOFF_SHFT) | GEM_RX_CONFIG_RXDMA_EN);
+ (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT) |
+ GEM_RX_CONFIG_RXDMA_EN);
+ /* Adjust for the SBus clock probably isn't worth the fuzz. */
GEM_BANK1_WRITE_4(sc, GEM_RX_BLANKING,
- (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6);
+ ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) <<
+ GEM_RX_BLANKING_TIME_SHIFT) | 6);
/*
* The following value is for an OFF Threshold of about 3/4 full
@@ -1002,7 +1024,7 @@ gem_init_locked(struct gem_softc *sc)
device_printf(sc->sc_dev, "cannot configure RX MAC\n");
GEM_BANK1_WRITE_4(sc, GEM_MAC_RX_CONFIG, v);
- /* step 13. TX_MAC Configuration Register */
+ /* step 13. TX_MAC Configuration Register */
v = GEM_BANK1_READ_4(sc, GEM_MAC_TX_CONFIG);
v |= GEM_MAC_TX_ENABLE;
GEM_BANK1_WRITE_4(sc, GEM_MAC_TX_CONFIG, 0);
@@ -1037,6 +1059,8 @@ gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head)
uint64_t cflags, flags;
int error, nexttx, nsegs, offset, seg;
+ GEM_LOCK_ASSERT(sc, MA_OWNED);
+
/* Get a work queue entry. */
if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
/* Ran out of descriptors. */
@@ -1143,7 +1167,6 @@ gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head)
#endif
if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
sc->sc_txwin = 0;
- flags |= GEM_TD_INTERRUPT_ME;
sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME |
GEM_TD_START_OF_PACKET);
@@ -1175,6 +1198,8 @@ gem_init_regs(struct gem_softc *sc)
{
const u_char *laddr = IF_LLADDR(sc->sc_ifp);
+ GEM_LOCK_ASSERT(sc, MA_OWNED);
+
/* These registers are not cleared on reset. */
if ((sc->sc_flags & GEM_INITED) == 0) {
/* magic values */
@@ -1182,16 +1207,19 @@ gem_init_regs(struct gem_softc *sc)
GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG1, 8);
GEM_BANK1_WRITE_4(sc, GEM_MAC_IPG2, 4);
+ /* min frame length */
GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
- /* max frame and max burst size */
+ /* max frame length and max burst size */
GEM_BANK1_WRITE_4(sc, GEM_MAC_MAC_MAX_FRAME,
(ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16));
+ /* more magic values */
GEM_BANK1_WRITE_4(sc, GEM_MAC_PREAMBLE_LEN, 0x7);
GEM_BANK1_WRITE_4(sc, GEM_MAC_JAM_SIZE, 0x4);
GEM_BANK1_WRITE_4(sc, GEM_MAC_ATTEMPT_LIMIT, 0x10);
- /* dunno... */
GEM_BANK1_WRITE_4(sc, GEM_MAC_CONTROL_TYPE, 0x8088);
+
+ /* random number seed */
GEM_BANK1_WRITE_4(sc, GEM_MAC_RANDOM_SEED,
((laddr[5] << 8) | laddr[4]) & 0x3ff);
@@ -1209,7 +1237,6 @@ gem_init_regs(struct gem_softc *sc)
GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER0, 0);
GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER1, 0);
GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR_FILTER2, 0);
-
GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK1_2, 0);
GEM_BANK1_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK0, 0);
@@ -1232,18 +1259,6 @@ gem_init_regs(struct gem_softc *sc)
/* Set XOFF PAUSE time. */
GEM_BANK1_WRITE_4(sc, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
- /*
- * Set the internal arbitration to "infinite" bursts of the
- * maximum length of 31 * 64 bytes so DMA transfers aren't
- * split up in cache line size chunks. This greatly improves
- * especially RX performance.
- * Enable silicon bug workarounds for the Apple variants.
- */
- GEM_BANK1_WRITE_4(sc, GEM_CONFIG,
- GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT |
- GEM_CONFIG_BURST_INF | (GEM_IS_APPLE(sc) ?
- GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0));
-
/* Set the station address. */
GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR0, (laddr[4] << 8) | laddr[5]);
GEM_BANK1_WRITE_4(sc, GEM_MAC_ADDR1, (laddr[2] << 8) | laddr[3]);
@@ -1263,12 +1278,32 @@ gem_start(struct ifnet *ifp)
GEM_UNLOCK(sc);
}
+static inline void
+gem_txkick(struct gem_softc *sc)
+{
+
+ /*
+ * Update the TX kick register. This register has to point to the
+ * descriptor after the last valid one and for optimum performance
+ * should be incremented in multiples of 4 (the DMA engine fetches/
+ * updates descriptors in batches of 4).
+ */
+#ifdef GEM_DEBUG
+ CTR3(KTR_GEM, "%s: %s: kicking TX %d",
+ device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
+#endif
+ GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ GEM_BANK1_WRITE_4(sc, GEM_TX_KICK, sc->sc_txnext);
+}
+
static void
gem_start_locked(struct ifnet *ifp)
{
struct gem_softc *sc = ifp->if_softc;
struct mbuf *m;
- int ntx;
+ int kicked, ntx;
+
+ GEM_LOCK_ASSERT(sc, MA_OWNED);
if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0)
@@ -1280,6 +1315,7 @@ gem_start_locked(struct ifnet *ifp)
sc->sc_txnext);
#endif
ntx = 0;
+ kicked = 0;
for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) {
IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
if (m == NULL)
@@ -1291,19 +1327,18 @@ gem_start_locked(struct ifnet *ifp)
IFQ_DRV_PREPEND(&ifp->if_snd, m);
break;
}
+ if ((sc->sc_txnext % 4) == 0) {
+ gem_txkick(sc);
+ kicked = 1;
+ } else
+ kicked = 0;
ntx++;
- /* Kick the transmitter. */
-#ifdef GEM_DEBUG
- CTR3(KTR_GEM, "%s: %s: kicking TX %d",
- device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
-#endif
- GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- GEM_BANK1_WRITE_4(sc, GEM_TX_KICK, sc->sc_txnext);
-
BPF_MTAP(ifp, m);
}
if (ntx > 0) {
+ if (kicked == 0)
+ gem_txkick(sc);
#ifdef GEM_DEBUG
CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d",
device_get_name(sc->sc_dev), sc->sc_txnext);
@@ -1324,10 +1359,13 @@ gem_tint(struct gem_softc *sc)
{
struct ifnet *ifp = sc->sc_ifp;
struct gem_txsoft *txs;
- int txlast, progress;
+ int progress;
+ uint32_t txlast;
#ifdef GEM_DEBUG
int i;
+ GEM_LOCK_ASSERT(sc, MA_OWNED);
+
CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
#endif
@@ -1338,7 +1376,6 @@ gem_tint(struct gem_softc *sc)
progress = 0;
GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
-
#ifdef GEM_DEBUG
if ((ifp->if_flags & IFF_DEBUG) != 0) {
printf(" txsoft %p transmit chain:\n", txs);
@@ -1419,8 +1456,8 @@ gem_tint(struct gem_softc *sc)
* and restart.
*/
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
-
+ if (STAILQ_EMPTY(&sc->sc_txdirtyq))
+ sc->sc_wdog_timer = 0;
gem_start_locked(ifp);
}
@@ -1437,6 +1474,7 @@ gem_rint_timeout(void *arg)
struct gem_softc *sc = arg;
GEM_LOCK_ASSERT(sc, MA_OWNED);
+
gem_rint(sc);
}
#endif
@@ -1449,6 +1487,8 @@ gem_rint(struct gem_softc *sc)
uint64_t rxstat;
uint32_t rxcomp;
+ GEM_LOCK_ASSERT(sc, MA_OWNED);
+
#ifdef GEM_RINT_TIMEOUT
callout_stop(&sc->sc_rx_ch);
#endif
@@ -1461,12 +1501,11 @@ gem_rint(struct gem_softc *sc)
* how long the following loop can execute.
*/
rxcomp = GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION);
-
#ifdef GEM_DEBUG
- CTR3(KTR_GEM, "%s: sc->rxptr %d, complete %d",
+ CTR3(KTR_GEM, "%s: sc->sc_rxptr %d, complete %d",
__func__, sc->sc_rxptr, rxcomp);
#endif
- GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
+ GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (; sc->sc_rxptr != rxcomp;) {
m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf;
rxstat = GEM_DMA_READ(sc,
@@ -1525,9 +1564,9 @@ gem_rint(struct gem_softc *sc)
/*
* Update the RX kick register. This register has to point
* to the descriptor after the last valid one (before the
- * current batch) and must be incremented in multiples of
- * 4 (because the DMA engine fetches/updates descriptors
- * in batches of 4).
+ * current batch) and for optimum performance should be
+ * incremented in multiples of 4 (the DMA engine fetches/
+ * updates descriptors in batches of 4).
*/
sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr);
if ((sc->sc_rxptr % 4) == 0) {
@@ -1545,7 +1584,7 @@ gem_rint(struct gem_softc *sc)
}
ifp->if_ipackets++;
- m->m_data += 2; /* We're already off by two */
+ m->m_data += ETHER_ALIGN; /* first byte offset */
m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat);
@@ -1559,7 +1598,7 @@ gem_rint(struct gem_softc *sc)
}
#ifdef GEM_DEBUG
- CTR3(KTR_GEM, "%s: done sc->rxptr %d, complete %d", __func__,
+ CTR3(KTR_GEM, "%s: done sc->sc_rxptr %d, complete %d", __func__,
sc->sc_rxptr, GEM_BANK1_READ_4(sc, GEM_RX_COMPLETION));
#endif
}
@@ -1572,6 +1611,8 @@ gem_add_rxbuf(struct gem_softc *sc, int idx)
bus_dma_segment_t segs[1];
int error, nsegs;
+ GEM_LOCK_ASSERT(sc, MA_OWNED);
+
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
@@ -1620,7 +1661,15 @@ gem_eint(struct gem_softc *sc, u_int status)
return;
}
- device_printf(sc->sc_dev, "%s: status=%x\n", __func__, status);
+ device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status);
+ if ((status & GEM_INTR_BERR) != 0) {
+ if ((sc->sc_flags & GEM_PCI) != 0)
+ printf(", PCI bus error 0x%x\n",
+ GEM_BANK1_READ_4(sc, GEM_PCI_ERROR_STATUS));
+ else
+ printf(", SBus error 0x%x\n",
+ GEM_BANK1_READ_4(sc, GEM_SBUS_STATUS));
+ }
}
void
@@ -1634,8 +1683,8 @@ gem_intr(void *v)
#ifdef GEM_DEBUG
CTR4(KTR_GEM, "%s: %s: cplt %x, status %x",
- device_get_name(sc->sc_dev), __func__, (status >> 19),
- (u_int)status);
+ device_get_name(sc->sc_dev), __func__,
+ (status >> GEM_STATUS_TX_COMPLETION_SHFT), (u_int)status);
/*
* PCS interrupts must be cleared, otherwise no traffic is passed!
@@ -1665,7 +1714,7 @@ gem_intr(void *v)
device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__);
#endif
- if ((status &
+ if (__predict_false(status &
(GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0)
gem_eint(sc, status);
@@ -1675,17 +1724,20 @@ gem_intr(void *v)
if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
gem_tint(sc);
- if (status & GEM_INTR_TX_MAC) {
+ if (__predict_false((status & GEM_INTR_TX_MAC) != 0)) {
status2 = GEM_BANK1_READ_4(sc, GEM_MAC_TX_STATUS);
if ((status2 &
- ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP)) != 0)
+ ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP |
+ GEM_MAC_TX_PEAK_EXP)) != 0)
device_printf(sc->sc_dev,
"MAC TX fault, status %x\n", status2);
if ((status2 &
- (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0)
+ (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0) {
+ sc->sc_ifp->if_oerrors++;
gem_init_locked(sc);
+ }
}
- if (status & GEM_INTR_RX_MAC) {
+ if (__predict_false((status & GEM_INTR_RX_MAC) != 0)) {
status2 = GEM_BANK1_READ_4(sc, GEM_MAC_RX_STATUS);
/*
* At least with GEM_SUN_GEM and some GEM_SUN_ERI
@@ -1906,6 +1958,8 @@ gem_mii_statchg(device_t dev)
sc = device_get_softc(dev);
+ GEM_LOCK_ASSERT(sc, MA_OWNED);
+
#ifdef GEM_DEBUG
if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
device_printf(sc->sc_dev, "%s: status change: PHY = %d\n",
@@ -1985,7 +2039,7 @@ gem_mii_statchg(device_t dev)
if ((GEM_BANK1_READ_4(sc, GEM_MIF_CONFIG) &
GEM_MIF_CONFIG_PHY_SEL) != 0) {
/* External MII needs echo disable if half duplex. */
- if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
+ if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) &
IFM_FDX) == 0)
v |= GEM_MAC_XIF_ECHO_DISABL;
} else
@@ -2053,6 +2107,11 @@ gem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
switch (cmd) {
case SIOCSIFFLAGS:
GEM_LOCK(sc);
+ if ((sc->sc_flags & GEM_DYING) != 0) {
+ error = EINVAL;
+ GEM_UNLOCK(sc);
+ break;
+ }
if ((ifp->if_flags & IFF_UP) != 0) {
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
((ifp->if_flags ^ sc->sc_ifflags) &