aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/ath/if_ath.c
diff options
context:
space:
mode:
authorAdrian Chadd <adrian@FreeBSD.org>2013-05-07 07:52:18 +0000
committerAdrian Chadd <adrian@FreeBSD.org>2013-05-07 07:52:18 +0000
commit7dcb2bea011ff37335be9ec7fd0263e715061c98 (patch)
tree8eb08c7634ebf6534447d8a013dc2fad3cbf5839 /sys/dev/ath/if_ath.c
parent55cf0326a1fc0c3a4803e429d595ac66720c4095 (diff)
downloadsrc-7dcb2bea011ff37335be9ec7fd0263e715061c98.tar.gz
src-7dcb2bea011ff37335be9ec7fd0263e715061c98.zip
Re-work how transmit buffer limits are enforced - partly to fix the PR,
but partly to just tidy up things. The problem here - there are too many TX buffers in the queue! By the time one needs to transmit an EAPOL frame (for this PR, it's the response to the group rekey notification from the AP) there are no ath_buf entries free and the EAPOL frame doesn't go out. Now, the problem! * Enforcing the TX buffer limitation _before_ we dequeue the frame? Bad idea. Because.. * .. it means I can't check whether the mbuf has M_EAPOL set. The solution(s): * De-queue the frame first * Don't bother doing the TX buffer minimum free check until after we know whether it's an EAPOL frame or not. * If it's an EAPOL frame, allocate the buffer from the mgmt pool rather than the default pool. Whilst I'm here: * Add a tweak to limit how many buffers a single node can acquire. * Don't enforce that for EAPOL frames. * .. set that to default to 1/4 of the available buffers, or 32, whichever is more sane. This doesn't fix issues due to a sleeping node or a very poor performing node; but this doesn't make it worse. Tested: * AR5416 STA, TX'ing 100+ mbit UDP to an AP, but only 50mbit being received (thus the TX queue fills up.) * .. with CCMP / WPA2 encryption configured * .. and the group rekey time set to 10 seconds, just to elicit the behaviour very quickly. PR: kern/138379
Notes
Notes: svn path=/head/; revision=250326
Diffstat (limited to 'sys/dev/ath/if_ath.c')
-rw-r--r--sys/dev/ath/if_ath.c102
1 files changed, 87 insertions, 15 deletions
diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c
index ec8681e289d1..f0b5328c9820 100644
--- a/sys/dev/ath/if_ath.c
+++ b/sys/dev/ath/if_ath.c
@@ -694,6 +694,13 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
*/
sc->sc_txq_mcastq_maxdepth = ath_txbuf;
+ /*
+ * Default the maximum queue depth for a given node
+ * to 1/4'th the TX buffers, or 64, whichever
+ * is larger.
+ */
+ sc->sc_txq_node_maxdepth = MAX(64, ath_txbuf / 4);
+
/* Enable CABQ by default */
sc->sc_cabq_enable = 1;
@@ -2661,33 +2668,98 @@ ath_start(struct ifnet *ifp)
ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start: called");
for (;;) {
- ATH_TXBUF_LOCK(sc);
- if (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree) {
- /* XXX increment counter? */
- ATH_TXBUF_UNLOCK(sc);
+ /*
+ * Grab the frame that we're going to try and transmit.
+ */
+ IFQ_DEQUEUE(&ifp->if_snd, m);
+ if (m == NULL)
+ break;
+ ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
+
+ /*
+ * Enforce how deep a node queue can get.
+ *
+ * XXX it would be nicer if we kept an mbuf queue per
+ * node and only whacked them into ath_bufs when we
+ * are ready to schedule some traffic from them.
+ * .. that may come later.
+ *
+ * XXX we should also track the per-node hardware queue
+ * depth so it is easy to limit the _SUM_ of the swq and
+ * hwq frames. Since we only schedule two HWQ frames
+ * at a time, this should be OK for now.
+ */
+ if ((!(m->m_flags & M_EAPOL)) &&
+ (ATH_NODE(ni)->an_swq_depth > sc->sc_txq_node_maxdepth)) {
+ sc->sc_stats.ast_tx_nodeq_overflow++;
+ if (ni != NULL)
+ ieee80211_free_node(ni);
+ m_freem(m);
+ m = NULL;
+ continue;
+ }
+
+ /*
+ * Check how many TX buffers are available.
+ *
+ * If this is for non-EAPOL traffic, just leave some
+ * space free in order for buffer cloning and raw
+ * frame transmission to occur.
+ *
+ * If it's for EAPOL traffic, ignore this for now.
+ * Management traffic will be sent via the raw transmit
+ * method which bypasses this check.
+ *
+ * This is needed to ensure that EAPOL frames during
+ * (re) keying have a chance to go out.
+ *
+ * See kern/138379 for more information.
+ */
+ if ((!(m->m_flags & M_EAPOL)) &&
+ (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree)) {
+ sc->sc_stats.ast_tx_nobuf++;
IF_LOCK(&ifp->if_snd);
+ _IF_PREPEND(&ifp->if_snd, m);
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
IF_UNLOCK(&ifp->if_snd);
+ m = NULL;
break;
}
- ATH_TXBUF_UNLOCK(sc);
-
+
/*
* Grab a TX buffer and associated resources.
+ *
+ * If it's an EAPOL frame, allocate a MGMT ath_buf.
+ * That way even with temporary buffer exhaustion due to
+ * the data path doesn't leave us without the ability
+ * to transmit management frames.
+ *
+ * Otherwise allocate a normal buffer.
*/
- bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL);
- if (bf == NULL)
- break;
+ if (m->m_flags & M_EAPOL)
+ bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
+ else
+ bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL);
- IFQ_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL) {
- ATH_TXBUF_LOCK(sc);
- ath_returnbuf_head(sc, bf);
- ATH_TXBUF_UNLOCK(sc);
+ if (bf == NULL) {
+ /*
+ * If we failed to allocate a buffer, prepend it
+ * and continue.
+ *
+ * We shouldn't fail normally, due to the check
+ * above.
+ */
+ sc->sc_stats.ast_tx_nobuf++;
+ IF_LOCK(&ifp->if_snd);
+ _IF_PREPEND(&ifp->if_snd, m);
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ IF_UNLOCK(&ifp->if_snd);
+ m = NULL;
break;
}
- ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
+
npkts ++;
+
/*
* Check for fragmentation. If this frame
* has been broken up verify we have enough