summaryrefslogtreecommitdiffstats
path: root/sys/dev/ath/if_ath.c
diff options
context:
space:
mode:
authoradrian <adrian@FreeBSD.org>2012-06-13 06:57:55 +0000
committeradrian <adrian@FreeBSD.org>2012-06-13 06:57:55 +0000
commit528dfae9f303da46dcc4ebee63d6b187663d51a9 (patch)
tree708859cf2ac963dede56373155b3cc6be23016e5 /sys/dev/ath/if_ath.c
parentb15cbc2807377cb0ea0721766e07f5f168a2c25c (diff)
downloadFreeBSD-src-528dfae9f303da46dcc4ebee63d6b187663d51a9.zip
FreeBSD-src-528dfae9f303da46dcc4ebee63d6b187663d51a9.tar.gz
Implement a separate, smaller pool of ath_buf entries for use by management
traffic. * Create sc_mgmt_txbuf and sc_mgmt_txdesc, initialise/free them appropriately. * Create an enum to represent buffer types in the API. * Extend ath_getbuf() and _ath_getbuf_locked() to take the above enum. * Right now anything sent via ic_raw_xmit() allocates via ATH_BUFTYPE_MGMT. This may not be very useful. * Add ATH_BUF_MGMT flag (ath_buf.bf_flags) which indicates the current buffer is a mgmt buffer and should go back onto the mgmt free list. * Extend 'txagg' to include debugging output for both normal and mgmt txbufs. * When checking/clearing ATH_BUF_BUSY, do it on both TX pools. Tested: * STA mode, with heavy UDP injection via iperf. This filled the TX queue however BARs were still going out successfully. TODO: * Initialise the mgmt buffers with ATH_BUF_MGMT and then ensure the right type is being allocated and freed on the appropriate list. That'd save a write operation (to bf->bf_flags) on each buffer alloc/free. * Test on AP mode, ensure that BAR TX and probe responses go out nicely when the main TX queue is filled (eg with paused traffic to a TID, awaiting a BAR to complete.) PR: kern/168170
Diffstat (limited to 'sys/dev/ath/if_ath.c')
-rw-r--r--sys/dev/ath/if_ath.c82
1 files changed, 69 insertions, 13 deletions
diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c
index b925668..0044949 100644
--- a/sys/dev/ath/if_ath.c
+++ b/sys/dev/ath/if_ath.c
@@ -246,6 +246,10 @@ static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
0, "tx buffers allocated");
TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
+static int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */
+SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RW, &ath_txbuf_mgmt,
+ 0, "tx (mgmt) buffers allocated");
+TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt);
int ath_bstuck_threshold = 4; /* max missed beacons */
SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
@@ -2212,13 +2216,17 @@ ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
}
struct ath_buf *
-_ath_getbuf_locked(struct ath_softc *sc)
+_ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype)
{
struct ath_buf *bf;
ATH_TXBUF_LOCK_ASSERT(sc);
- bf = TAILQ_FIRST(&sc->sc_txbuf);
+ if (btype == ATH_BUFTYPE_MGMT)
+ bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt);
+ else
+ bf = TAILQ_FIRST(&sc->sc_txbuf);
+
if (bf == NULL) {
sc->sc_stats.ast_tx_getnobuf++;
} else {
@@ -2228,18 +2236,29 @@ _ath_getbuf_locked(struct ath_softc *sc)
}
}
- if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0)
- TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list);
- else
+ if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) {
+ if (btype == ATH_BUFTYPE_MGMT)
+ TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list);
+ else
+ TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list);
+ } else
bf = NULL;
if (bf == NULL) {
+ /* XXX should check which list, mgmt or otherwise */
DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
TAILQ_FIRST(&sc->sc_txbuf) == NULL ?
"out of xmit buffers" : "xmit buffer busy");
return NULL;
}
+ /* XXX TODO: should do this at buffer list initialisation */
+ /* XXX (then, ensure the buffer has the right flag set) */
+ if (btype == ATH_BUFTYPE_MGMT)
+ bf->bf_flags |= ATH_BUF_MGMT;
+ else
+ bf->bf_flags &= (~ATH_BUF_MGMT);
+
/* Valid bf here; clear some basic fields */
bf->bf_next = NULL; /* XXX just to be sure */
bf->bf_last = NULL; /* XXX again, just to be sure */
@@ -2274,7 +2293,9 @@ ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf)
{
struct ath_buf *tbf;
- tbf = ath_getbuf(sc);
+ tbf = ath_getbuf(sc,
+ (bf->bf_flags & ATH_BUF_MGMT) ?
+ ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL);
if (tbf == NULL)
return NULL; /* XXX failure? Why? */
@@ -2302,12 +2323,18 @@ ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf)
}
struct ath_buf *
-ath_getbuf(struct ath_softc *sc)
+ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype)
{
struct ath_buf *bf;
ATH_TXBUF_LOCK(sc);
- bf = _ath_getbuf_locked(sc);
+ bf = _ath_getbuf_locked(sc, btype);
+ /*
+ * If a mgmt buffer was requested but we're out of those,
+ * try requesting a normal one.
+ */
+ if (bf == NULL && btype == ATH_BUFTYPE_MGMT)
+ bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
ATH_TXBUF_UNLOCK(sc);
if (bf == NULL) {
struct ifnet *ifp = sc->sc_ifp;
@@ -2351,7 +2378,7 @@ ath_start(struct ifnet *ifp)
/*
* Grab a TX buffer and associated resources.
*/
- bf = ath_getbuf(sc);
+ bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL);
if (bf == NULL)
break;
@@ -2857,11 +2884,26 @@ ath_desc_alloc(struct ath_softc *sc)
return error;
}
+ error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt,
+ "tx_mgmt", ath_txbuf_mgmt, ATH_TXDESC);
+ if (error != 0) {
+ ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
+ ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
+ return error;
+ }
+
+ /*
+ * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the
+ * flag doesn't have to be set in ath_getbuf_locked().
+ */
+
error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
"beacon", ATH_BCBUF, 1);
if (error != 0) {
- ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
+ ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
+ ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
+ &sc->sc_txbuf_mgmt);
return error;
}
return 0;
@@ -2877,6 +2919,9 @@ ath_desc_free(struct ath_softc *sc)
ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
if (sc->sc_rxdma.dd_desc_len != 0)
ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
+ if (sc->sc_txdma_mgmt.dd_desc_len != 0)
+ ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
+ &sc->sc_txbuf_mgmt);
}
static struct ieee80211_node *
@@ -3323,12 +3368,14 @@ ath_tx_update_busy(struct ath_softc *sc)
* descriptor.
*/
ATH_TXBUF_LOCK_ASSERT(sc);
+ last = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s);
+ if (last != NULL)
+ last->bf_flags &= ~ATH_BUF_BUSY;
last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
if (last != NULL)
last->bf_flags &= ~ATH_BUF_BUSY;
}
-
/*
* Process completed xmit descriptors from the specified queue.
* Kick the packet scheduler if needed. This can occur from this
@@ -3637,7 +3684,10 @@ ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf)
ATH_TXBUF_LOCK_ASSERT(sc);
- TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
+ if (bf->bf_flags & ATH_BUF_MGMT)
+ TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list);
+ else
+ TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
}
void
@@ -3646,7 +3696,10 @@ ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf)
ATH_TXBUF_LOCK_ASSERT(sc);
- TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
+ if (bf->bf_flags & ATH_BUF_MGMT)
+ TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list);
+ else
+ TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
}
/*
@@ -3727,6 +3780,9 @@ ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
if (bf != NULL)
bf->bf_flags &= ~ATH_BUF_BUSY;
+ bf = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s);
+ if (bf != NULL)
+ bf->bf_flags &= ~ATH_BUF_BUSY;
ATH_TXBUF_UNLOCK(sc);
for (ix = 0;; ix++) {
OpenPOWER on IntegriCloud