summaryrefslogtreecommitdiffstats
path: root/sys/dev
diff options
context:
space:
mode:
authoradrian <adrian@FreeBSD.org>2012-03-10 04:14:04 +0000
committeradrian <adrian@FreeBSD.org>2012-03-10 04:14:04 +0000
commitbaaae4c089684ffdf08087371fb69edb26a97088 (patch)
treef1eeac29546cc6cdaa2731a048c92917139d9d31 /sys/dev
parent147645f640fdc88eafdb8d8942b31c147bba72e9 (diff)
downloadFreeBSD-src-baaae4c089684ffdf08087371fb69edb26a97088.zip
FreeBSD-src-baaae4c089684ffdf08087371fb69edb26a97088.tar.gz
Don't flood the cabq/mcastq with frames.
In a very noisy 2.4GHz environment (with HT/40 enabled, making it worse) I saw the following occur: * the air was considered "busy" a lot of the time; * the cabq time is quite short due to staggered beacons being enabled; * it just wasn't able to keep up TX'ing CABQ frames; * .. and the cabq would swallow up all the TX ath_buf's. This patch introduces a twiddle which allows the maximum cabq depth to be set, forcing further frames to be dropped. It defaults to the TX buffer count at the moment, so the default behaviour isn't changed. I've also started fleshing out a similar setup for the data path, so it doesn't swallow up all the available TX buffers and preventing management frames (such as ADDBA) out. PR: kern/165895
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/ath/if_ath.c13
-rw-r--r--sys/dev/ath/if_ath_sysctl.c16
-rw-r--r--sys/dev/ath/if_ath_tx.c56
-rw-r--r--sys/dev/ath/if_athioctl.h3
-rw-r--r--sys/dev/ath/if_athvar.h25
5 files changed, 109 insertions, 4 deletions
diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c
index bff28ee..bc47f87 100644
--- a/sys/dev/ath/if_ath.c
+++ b/sys/dev/ath/if_ath.c
@@ -637,6 +637,19 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
#endif
/*
+ * TODO: enforce that at least this many frames are available
+ * in the txbuf list before allowing data frames (raw or
+ * otherwise) to be transmitted.
+ */
+ sc->sc_txq_data_minfree = 10;
+ /*
+ * Leave this as default to maintain legacy behaviour.
+ * Shortening the cabq/mcastq may end up causing some
+ * undesirable behaviour.
+ */
+ sc->sc_txq_mcastq_maxdepth = ath_txbuf;
+
+ /*
* Allow the TX and RX chainmasks to be overridden by
* environment variables and/or device.hints.
*
diff --git a/sys/dev/ath/if_ath_sysctl.c b/sys/dev/ath/if_ath_sysctl.c
index 7343919..410b4c6 100644
--- a/sys/dev/ath/if_ath_sysctl.c
+++ b/sys/dev/ath/if_ath_sysctl.c
@@ -605,6 +605,17 @@ ath_sysctlattach(struct ath_softc *sc)
"tid_hwq_hi", CTLFLAG_RW, &sc->sc_tid_hwq_hi, 0,
"");
+#if 0
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "txq_data_minfree", CTLFLAG_RW, &sc->sc_txq_data_minfree,
+ 0, "Minimum free buffers before adding a data frame"
+ " to the TX queue");
+#endif
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "txq_mcastq_maxdepth", CTLFLAG_RW,
+ &sc->sc_txq_mcastq_maxdepth, 0,
+ "Maximum buffer depth for multicast/broadcast frames");
+
#ifdef IEEE80211_SUPPORT_TDMA
if (ath_hal_macversion(ah) > 0x78) {
sc->sc_tdmadbaprep = 2;
@@ -885,7 +896,10 @@ ath_sysctl_stats_attach(struct ath_softc *sc)
&sc->sc_stats.ast_rx_intr, 0, "RX interrupts");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_intr", CTLFLAG_RD,
&sc->sc_stats.ast_tx_intr, 0, "TX interrupts");
-
+ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_mcastq_overflow",
+ CTLFLAG_RD, &sc->sc_stats.ast_tx_mcastq_overflow, 0,
+ "Number of multicast frames exceeding maximum mcast queue depth");
+
/* Attach the RX phy error array */
ath_sysctl_stats_attach_rxphyerr(sc, child);
}
diff --git a/sys/dev/ath/if_ath_tx.c b/sys/dev/ath/if_ath_tx.c
index 9314034..53c9320 100644
--- a/sys/dev/ath/if_ath_tx.c
+++ b/sys/dev/ath/if_ath_tx.c
@@ -1369,7 +1369,7 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
{
struct ieee80211vap *vap = ni->ni_vap;
struct ath_vap *avp = ATH_VAP(vap);
- int r;
+ int r = 0;
u_int pri;
int tid;
struct ath_txq *txq;
@@ -1402,6 +1402,30 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+ /*
+ * Enforce how deep the multicast queue can grow.
+ *
+ * XXX duplicated in ath_raw_xmit().
+ */
+ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+ ATH_TXQ_LOCK(sc->sc_cabq);
+ ATH_TXQ_LOCK(&avp->av_mcastq);
+
+ if ((sc->sc_cabq->axq_depth + avp->av_mcastq.axq_depth) >
+ sc->sc_txq_mcastq_maxdepth) {
+ sc->sc_stats.ast_tx_mcastq_overflow++;
+ r = ENOBUFS;
+ }
+
+ ATH_TXQ_UNLOCK(&avp->av_mcastq);
+ ATH_TXQ_UNLOCK(sc->sc_cabq);
+
+ if (r != 0) {
+ m_freem(m0);
+ return r;
+ }
+ }
+
/* A-MPDU TX */
is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);
is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);
@@ -1734,7 +1758,10 @@ ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
struct ifnet *ifp = ic->ic_ifp;
struct ath_softc *sc = ifp->if_softc;
struct ath_buf *bf;
- int error;
+ struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ath_vap *avp = ATH_VAP(vap);
+ int error = 0;
ATH_PCU_LOCK(sc);
if (sc->sc_inreset_cnt > 0) {
@@ -1755,6 +1782,31 @@ ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
error = ENETDOWN;
goto bad;
}
+
+ /*
+ * Enforce how deep the multicast queue can grow.
+ *
+ * XXX duplicated in ath_tx_start().
+ */
+ if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+ ATH_TXQ_LOCK(sc->sc_cabq);
+ ATH_TXQ_LOCK(&avp->av_mcastq);
+
+ if ((sc->sc_cabq->axq_depth + avp->av_mcastq.axq_depth) >
+ sc->sc_txq_mcastq_maxdepth) {
+ sc->sc_stats.ast_tx_mcastq_overflow++;
+ error = ENOBUFS;
+ }
+
+ ATH_TXQ_UNLOCK(&avp->av_mcastq);
+ ATH_TXQ_UNLOCK(sc->sc_cabq);
+
+ if (error != 0) {
+ m_freem(m);
+ goto bad;
+ }
+ }
+
/*
* Grab a TX buffer and associated resources.
*/
diff --git a/sys/dev/ath/if_athioctl.h b/sys/dev/ath/if_athioctl.h
index 9c1cc12..2e19ee4 100644
--- a/sys/dev/ath/if_athioctl.h
+++ b/sys/dev/ath/if_athioctl.h
@@ -155,7 +155,8 @@ struct ath_stats {
u_int32_t ast_rx_intr;
u_int32_t ast_tx_aggr_ok; /* aggregate TX ok */
u_int32_t ast_tx_aggr_fail; /* aggregate TX failed */
- u_int32_t ast_pad[2];
+ u_int32_t ast_tx_mcastq_overflow; /* multicast queue overflow */
+ u_int32_t ast_pad[1];
};
#define SIOCGATHSTATS _IOWR('i', 137, struct ifreq)
diff --git a/sys/dev/ath/if_athvar.h b/sys/dev/ath/if_athvar.h
index 1b6542b..426c520 100644
--- a/sys/dev/ath/if_athvar.h
+++ b/sys/dev/ath/if_athvar.h
@@ -530,6 +530,31 @@ struct ath_softc {
int sc_txchainmask; /* currently configured TX chainmask */
int sc_rxchainmask; /* currently configured RX chainmask */
+ /* Queue limits */
+
+ /*
+ * To avoid queue starvation in congested conditions,
+ * these parameters tune the maximum number of frames
+ * queued to the data/mcastq before they're dropped.
+ *
+ * This is to prevent:
+ * + a single destination overwhelming everything, including
+ * management/multicast frames;
+ * + multicast frames overwhelming everything (when the
+ * air is sufficiently busy that cabq can't drain.)
+ *
+ * These implement:
+ * + data_minfree is the maximum number of free buffers
+ * overall to successfully allow a data frame.
+ *
+ * + mcastq_maxdepth is the maximum depth allowe dof the avp+cabq
+ * queue. The avp is included in each comparison just to be
+ * a little overly conservative and this may end up being
+ * unhelpful with multiple VAPs.
+ */
+ int sc_txq_data_minfree;
+ int sc_txq_mcastq_maxdepth;
+
/*
* Aggregation twiddles
*
OpenPOWER on IntegriCloud