summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoradrian <adrian@FreeBSD.org>2013-02-13 05:32:19 +0000
committeradrian <adrian@FreeBSD.org>2013-02-13 05:32:19 +0000
commitaf7893e0d41c28bee0d520d4e21900ace59ed963 (patch)
tree42ad376dd393e5ee24ee4cd2775f4de71f6b5d53
parent5d56fc6ccd907da03c2ffd167a384b205ca05f75 (diff)
downloadFreeBSD-src-af7893e0d41c28bee0d520d4e21900ace59ed963.zip
FreeBSD-src-af7893e0d41c28bee0d520d4e21900ace59ed963.tar.gz
Pull out the if_transmit() work and revert back to ath_start().
My changed had some rather significant behavioural changes to throughput. The two issues I noticed: * With if_start and the ifnet mbuf queue, any temporary latency would get eaten up by some mbufs being queued. With ath_transmit() queuing things to ath_buf's, I'd only get 512 TX buffers before I couldn't queue any further frames. * There's also some non-zero latency involved with TX being pushed into a taskqueue via direct dispatch. Any time the scheduler didn't immediately schedule the ath TX task would cause extra latency. Various 1ge/10ge drivers implement both direct dispatch (if the TX lock can be acquired) and deferred task transmission (if the TX lock can't be acquired), with frames being pushed into a drbd queue. I'll have to do this at some point, but until I figure out how to deal with 802.11 fragments, I'll have to wait a while longer. So what I saw: * lots of extra latency, specially under load - if the taskqueue wasn't immediately scheduled, things went pear shaped; * any extra latency would result in TX ath_buf's taking their sweet time being replenished, so any further calls to ath_transmit() would drop mbufs. * .. yes, there's no explicit backpressure here - things are just dropped. Eek. With this, the general performance has gone up, but those subtle if_start() related race conditions are back. For some reason, this is doubly-obvious with the AR5416 NIC and I don't quite understand why yet. There's an unrelated issue with AR5416 performance in STA mode (it's fine in AP mode when bridging frames, weirdly..) that requires a little further investigation. Specifically - it works fine on a Lenovo T40 (single core CPU) running a March 2012 9-STABLE kernel, but a Lenovo T60 (dual core) running an early November 2012 kernel behaves very poorly. The same hardware with an AR9160 or AR9280 behaves perfectly.
-rw-r--r--sys/dev/ath/if_ath.c541
-rw-r--r--sys/dev/ath/if_ath_misc.h4
-rw-r--r--sys/dev/ath/if_ath_tx.c8
-rw-r--r--sys/dev/ath/if_athvar.h5
4 files changed, 107 insertions, 451 deletions
diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c
index 7679fd2..b6b4217 100644
--- a/sys/dev/ath/if_ath.c
+++ b/sys/dev/ath/if_ath.c
@@ -152,6 +152,7 @@ static void ath_init(void *);
static void ath_stop_locked(struct ifnet *);
static void ath_stop(struct ifnet *);
static int ath_reset_vap(struct ieee80211vap *, u_long);
+static void ath_start_queue(struct ifnet *ifp);
static int ath_media_change(struct ifnet *);
static void ath_watchdog(void *);
static int ath_ioctl(struct ifnet *, u_long, caddr_t);
@@ -212,14 +213,6 @@ static void ath_dfs_tasklet(void *, int);
static void ath_node_powersave(struct ieee80211_node *, int);
static int ath_node_set_tim(struct ieee80211_node *, int);
-static int ath_transmit(struct ifnet *ifp, struct mbuf *m);
-static void ath_qflush(struct ifnet *ifp);
-
-static void ath_txq_qinit(struct ifnet *ifp);
-static void ath_txq_qflush(struct ifnet *ifp);
-static int ath_txq_qadd(struct ifnet *ifp, struct mbuf *m0);
-static void ath_txq_qrun(struct ifnet *ifp);
-
#ifdef IEEE80211_SUPPORT_TDMA
#include <dev/ath/if_ath_tdma.h>
#endif
@@ -436,21 +429,6 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
"%s taskq", ifp->if_xname);
- /*
- * This taskqueue doesn't get any higher priority
- * than the ath(4) taskqueue (PI_NET) so TX won't
- * pre-empt RX and other task priorities.
- *
- * This may not be optimal - the previous behaviour
- * was to direct-dispatch frames via the sending
- * task context, rather than (always) software
- * queuing.
- */
- sc->sc_tx_tq = taskqueue_create("ath_tx_taskq", M_NOWAIT,
- taskqueue_thread_enqueue, &sc->sc_tx_tq);
- taskqueue_start_threads(&sc->sc_tx_tq, 1, PI_NET,
- "%s TX taskq", ifp->if_xname);
-
TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc);
TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
@@ -579,18 +557,13 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
ifp->if_softc = sc;
ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
- /* XXX net80211 uses if_start to re-start ifnet processing */
- ifp->if_start = ath_start;
- ifp->if_transmit = ath_transmit;
- ifp->if_qflush = ath_qflush;
+ ifp->if_start = ath_start_queue;
ifp->if_ioctl = ath_ioctl;
ifp->if_init = ath_init;
IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
IFQ_SET_READY(&ifp->if_snd);
- ath_txq_qinit(ifp);
-
ic->ic_ifp = ifp;
/* XXX not right but it's not used anywhere important */
ic->ic_phytype = IEEE80211_T_OFDM;
@@ -996,7 +969,6 @@ ath_detach(struct ath_softc *sc)
ath_stop(ifp);
ieee80211_ifdetach(ifp->if_l2com);
taskqueue_free(sc->sc_tq);
- taskqueue_free(sc->sc_tx_tq);
#ifdef ATH_TX99_DIAG
if (sc->sc_tx99 != NULL)
sc->sc_tx99->detach(sc->sc_tx99);
@@ -1005,7 +977,6 @@ ath_detach(struct ath_softc *sc)
#ifdef ATH_DEBUG_ALQ
if_ath_alq_tidyup(&sc->sc_alq);
#endif
- ath_txq_qflush(ifp);
ath_spectral_detach(sc);
ath_dfs_detach(sc);
ath_desc_free(sc);
@@ -2145,7 +2116,6 @@ ath_txrx_start(struct ath_softc *sc)
{
taskqueue_unblock(sc->sc_tq);
- taskqueue_unblock(sc->sc_tx_tq);
}
/*
@@ -2246,7 +2216,6 @@ ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type)
/* Try to (stop any further TX/RX from occuring */
taskqueue_block(sc->sc_tq);
- taskqueue_block(sc->sc_tx_tq);
ATH_PCU_LOCK(sc);
ath_hal_intrset(ah, 0); /* disable interrupts */
@@ -2546,70 +2515,13 @@ ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype)
}
static void
-ath_qflush(struct ifnet *ifp)
-{
-
- /* XXX complete/suspend TX */
- ath_txq_qflush(ifp);
-
- /* Unsuspend TX? */
-}
-
-/*
- * Transmit a frame from net80211.
- */
-static int
-ath_transmit(struct ifnet *ifp, struct mbuf *m)
+ath_start_queue(struct ifnet *ifp)
{
- struct ieee80211_node *ni;
- struct ath_softc *sc = (struct ath_softc *) ifp->if_softc;
-
- ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
-
- if (ath_txq_qadd(ifp, m) < 0) {
- /*
- * If queuing fails, the if_transmit() API makes the
- * callee responsible for freeing the mbuf (rather than
- * the caller, who just assumes the mbuf has been dealt
- * with somehow).
- *
- * BUT, net80211 will free node references if if_transmit()
- * fails _on encapsulated buffers_. Since drivers
- * only get fully encapsulated frames from net80211 (via
- * raw or otherwise APIs), we must be absolutely careful
- * to not free the node ref or things will get loopy
- * down the track.
- *
- * For tx fragments, the TX code must free whatever
- * new references it created, but NOT the original
- * TX node ref that was passed in.
- */
- ath_freetx(m);
- return (ENOBUFS);
- }
+ struct ath_softc *sc = ifp->if_softc;
- /*
- * Unconditionally kick the taskqueue.
- *
- * Now, there's a subtle race condition possible here if we
- * went down the path of only kicking the taskqueue if it
- * wasn't running. If we're not absolutely, positively
- * careful, we could have a small race window between
- * finishing the taskqueue and clearing the TX flag, which
- * would be interpreted in _this_ context as "we don't need
- * to kick the TX taskqueue, as said taskqueue is already
- * running."
- *
- * It's a problem in some of the 1GE/10GE NIC drivers.
- * So until a _correct_ method for implementing this is
- * drafted up and written, which avoids (potentially)
- * large amounts of locking contention per-frame, let's
- * just do the inefficient "kick taskqueue each time"
- * method.
- */
+ ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_queue: start");
ath_tx_kick(sc);
-
- return (0);
+ ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_queue: finished");
}
void
@@ -2636,7 +2548,9 @@ ath_start_task(void *arg, int npending)
sc->sc_txstart_cnt++;
ATH_PCU_UNLOCK(sc);
- ath_txq_qrun(ifp);
+ ATH_TX_LOCK(sc);
+ ath_start(sc->sc_ifp);
+ ATH_TX_UNLOCK(sc);
ATH_PCU_LOCK(sc);
sc->sc_txstart_cnt--;
@@ -2644,292 +2558,91 @@ ath_start_task(void *arg, int npending)
ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: finished");
}
-/*
- * Pending TX buffer chain management routines.
- */
-
-
-/*
- * Initialise the TX queue!
- */
-static void
-ath_txq_qinit(struct ifnet *ifp)
-{
- struct ath_softc *sc = ifp->if_softc;
-
- TAILQ_INIT(&sc->sc_txbuf_list);
-}
-
-/*
- * Add this mbuf to the TX buffer chain.
- *
- * This allocates an ath_buf, links the mbuf into it, and
- * appends it to the end of the TX buffer chain.
- * It doesn't fill out the ath_buf in any way besides
- * that.
- *
- * Since the mbuf may be a list of mbufs representing
- * 802.11 fragments, handle allocating ath_bufs for each
- * of the mbuf fragments.
- *
- * If we queued it, 0 is returned. Else, < 0 is returned.
- *
- * If <0 is returned, the sender is responsible for
- * freeing the mbuf if appropriate.
- */
-static int
-ath_txq_qadd(struct ifnet *ifp, struct mbuf *m0)
+void
+ath_start(struct ifnet *ifp)
{
struct ath_softc *sc = ifp->if_softc;
- struct ath_buf *bf;
- ath_bufhead frags;
struct ieee80211_node *ni;
- struct mbuf *m;
-
- /* XXX recursive TX completion -> TX? */
- ATH_TX_IC_UNLOCK_ASSERT(sc);
-
- /*
- * We grab the node pointer, but we don't deref
- * the node. The caller must be responsible for
- * freeing the node reference if it decides to
- * free the mbuf.
- */
- ni = (struct ieee80211_node *) m0->m_pkthdr.rcvif;
-
- ATH_TXBUF_LOCK(sc);
- if (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree) {
- /* XXX increment counter? */
- ATH_TXBUF_UNLOCK(sc);
- IF_LOCK(&ifp->if_snd);
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- IF_UNLOCK(&ifp->if_snd);
- return (-1);
- }
- ATH_TXBUF_UNLOCK(sc);
-
- /*
- * Grab a TX buffer and associated resources.
- */
- bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL);
- if (bf == NULL) {
- device_printf(sc->sc_dev,
- "%s: couldn't allocate a buffer\n",
- __func__);
- return (-1);
- }
-
- /* Setup the initial buffer node contents */
- bf->bf_m = m0;
- bf->bf_node = ni;
-
- /*
- * Check for fragmentation. If this frame
- * has been broken up verify we have enough
- * buffers to send all the fragments so all
- * go out or none...
- */
- TAILQ_INIT(&frags);
- if (m0->m_flags & M_FRAG)
- DPRINTF(sc, ATH_DEBUG_XMIT, "%s: txfrag\n", __func__);
- if ((m0->m_flags & M_FRAG) &&
- !ath_txfrag_setup(sc, &frags, m0, ni)) {
- DPRINTF(sc, ATH_DEBUG_XMIT,
- "%s: out of txfrag buffers\n", __func__);
- sc->sc_stats.ast_tx_nofrag++;
- ifp->if_oerrors++;
- goto bad;
- }
-
- /*
- * Don't stuff the non-fragment frame onto the fragment
- * queue. ath_txfrag_cleanup() should only be called on fragments -
- * ie, the _extra_ ieee80211_node references - and not the single
- * node reference already done as part of the net08211 TX call
- * into the driver.
- */
-
- ATH_TX_IC_LOCK(sc);
-
- /*
- * Throw the single frame onto the queue.
- */
- TAILQ_INSERT_TAIL(&sc->sc_txbuf_list, bf, bf_list);
-
- /*
- * Update next packet duration length if it's a fragment.
- * It's needed for accurate NAV calculations (which for
- * fragments include the length of the NEXT fragment.)
- */
- if (m0->m_nextpkt != NULL)
- bf->bf_state.bfs_nextpktlen =
- m0->m_nextpkt->m_pkthdr.len;
-
- /*
- * Append the fragments. We have to populate bf and node
- * references here as although the txfrag setup code does
- * create buffers and increment the node ref, it doesn't
- * populate the fields for us.
- */
- m = m0->m_nextpkt;
- while ( (bf = TAILQ_FIRST(&frags)) != NULL) {
- bf->bf_m = m;
- bf->bf_node = ni;
- device_printf(sc->sc_dev, "%s: adding bf=%p, m=%p, ni=%p\n",
- __func__,
- bf,
- bf->bf_m,
- bf->bf_node);
- TAILQ_REMOVE(&frags, bf, bf_list);
- TAILQ_INSERT_TAIL(&sc->sc_txbuf_list, bf, bf_list);
-
- /*
- * For duration (NAV) calculations, we need
- * to know the next fragment size.
- *
- * XXX This isn't entirely accurate as it doesn't
- * take pad bytes and such into account, but it'll do
- * for fragment length / NAV calculations.
- */
- if (m->m_nextpkt != NULL)
- bf->bf_state.bfs_nextpktlen =
- m->m_nextpkt->m_pkthdr.len;
-
- m = m->m_nextpkt;
- }
- ATH_TX_IC_UNLOCK(sc);
-
- return (0);
-bad:
- device_printf(sc->sc_dev, "%s: bad?!\n", __func__);
- bf->bf_m = NULL;
- bf->bf_node = NULL;
- ATH_TXBUF_LOCK(sc);
- ath_returnbuf_head(sc, bf);
- ath_txfrag_cleanup(sc, &frags, ni);
- ATH_TXBUF_UNLOCK(sc);
- return (-1);
-}
-
-/*
- * Flush the pending TX buffer chain.
- */
-static void
-ath_txq_qflush(struct ifnet *ifp)
-{
- struct ath_softc *sc = ifp->if_softc;
- ath_bufhead txlist;
struct ath_buf *bf;
-
- device_printf(sc->sc_dev, "%s: called\n", __func__);
- TAILQ_INIT(&txlist);
-
- /* Grab lock */
- ATH_TX_IC_LOCK(sc);
-
- /* Copy everything out of sc_txbuf_list into txlist */
- TAILQ_CONCAT(&txlist, &sc->sc_txbuf_list, bf_list);
-
- /* Unlock */
- ATH_TX_IC_UNLOCK(sc);
-
- /* Now, walk the list, freeing things */
- while ((bf = TAILQ_FIRST(&txlist)) != NULL) {
- TAILQ_REMOVE(&txlist, bf, bf_list);
-
- if (bf->bf_node)
- ieee80211_free_node(bf->bf_node);
-
- m_free(bf->bf_m);
-
- /* XXX paranoia! */
- bf->bf_m = NULL;
- bf->bf_node = NULL;
-
- /*
- * XXX Perhaps do a second pass with the TXBUF lock
- * held and free them all at once?
- */
- ATH_TXBUF_LOCK(sc);
- ath_returnbuf_head(sc, bf);
- ATH_TXBUF_UNLOCK(sc);
- }
-}
-
-/*
- * Walk the TX buffer queue and call ath_tx_start() on each
- * of them.
- */
-static void
-ath_txq_qrun(struct ifnet *ifp)
-{
- struct ath_softc *sc = ifp->if_softc;
- ath_bufhead txlist;
- struct ath_buf *bf, *bf_next;
- struct ieee80211_node *ni;
- struct mbuf *m;
+ struct mbuf *m, *next;
+ ath_bufhead frags;
+ int npkts = 0;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
return;
- TAILQ_INIT(&txlist);
+ ATH_TX_LOCK_ASSERT(sc);
- /*
- * Grab the frames to transmit from the tx queue
- */
+ ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start: called");
- /* Copy everything out of sc_txbuf_list into txlist */
- ATH_TX_IC_LOCK(sc);
- TAILQ_CONCAT(&txlist, &sc->sc_txbuf_list, bf_list);
- ATH_TX_IC_UNLOCK(sc);
-
- /*
- * Attempt to transmit each frame.
- *
- * In the old code path - if a TX fragment fails, subsequent
- * fragments in that group would be aborted.
- *
- * It would be nice to chain together TX fragments in this
- * way so they can be aborted together.
- */
- ATH_TX_LOCK(sc);
- TAILQ_FOREACH_SAFE(bf, &txlist, bf_list, bf_next) {
+ for (;;) {
+ ATH_TXBUF_LOCK(sc);
+ if (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree) {
+ /* XXX increment counter? */
+ ATH_TXBUF_UNLOCK(sc);
+ IF_LOCK(&ifp->if_snd);
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ IF_UNLOCK(&ifp->if_snd);
+ break;
+ }
+ ATH_TXBUF_UNLOCK(sc);
+
/*
- * Clear, because we're going to reuse this
- * as a real ath_buf now
+ * Grab a TX buffer and associated resources.
*/
- ni = bf->bf_node;
- m = bf->bf_m;
-
- bf->bf_node = NULL;
- bf->bf_m = NULL;
+ bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL);
+ if (bf == NULL)
+ break;
+ IFQ_DEQUEUE(&ifp->if_snd, m);
+ if (m == NULL) {
+ ATH_TXBUF_LOCK(sc);
+ ath_returnbuf_head(sc, bf);
+ ATH_TXBUF_UNLOCK(sc);
+ break;
+ }
+ ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
+ npkts ++;
/*
- * Remove it from the list.
+ * Check for fragmentation. If this frame
+ * has been broken up verify we have enough
+ * buffers to send all the fragments so all
+ * go out or none...
*/
- TAILQ_REMOVE(&txlist, bf, bf_list);
-
+ TAILQ_INIT(&frags);
+ if ((m->m_flags & M_FRAG) &&
+ !ath_txfrag_setup(sc, &frags, m, ni)) {
+ DPRINTF(sc, ATH_DEBUG_XMIT,
+ "%s: out of txfrag buffers\n", __func__);
+ sc->sc_stats.ast_tx_nofrag++;
+ ifp->if_oerrors++;
+ ath_freetx(m);
+ goto bad;
+ }
+ ifp->if_opackets++;
+ nextfrag:
/*
- * If we fail, free this buffer and go to the next one;
- * ath_tx_start() frees the mbuf but not the node
- * reference.
+ * Pass the frame to the h/w for transmission.
+ * Fragmented frames have each frag chained together
+ * with m_nextpkt. We know there are sufficient ath_buf's
+ * to send all the frags because of work done by
+ * ath_txfrag_setup. We leave m_nextpkt set while
+ * calling ath_tx_start so it can use it to extend the
+ * the tx duration to cover the subsequent frag and
+ * so it can reclaim all the mbufs in case of an error;
+ * ath_tx_start clears m_nextpkt once it commits to
+ * handing the frame to the hardware.
*/
+ next = m->m_nextpkt;
if (ath_tx_start(sc, ni, bf, m)) {
- /*
- * XXX m is freed by ath_tx_start(); node reference
- * is not!
- */
- DPRINTF(sc, ATH_DEBUG_XMIT,
- "%s: failed; bf=%p, ni=%p, m=%p\n",
- __func__,
- bf,
- ni,
- m);
+ bad:
ifp->if_oerrors++;
+ reclaim:
bf->bf_m = NULL;
bf->bf_node = NULL;
ATH_TXBUF_LOCK(sc);
ath_returnbuf_head(sc, bf);
+ ath_txfrag_cleanup(sc, &frags, ni);
ATH_TXBUF_UNLOCK(sc);
/*
* XXX todo, free the node outside of
@@ -2937,86 +2650,38 @@ ath_txq_qrun(struct ifnet *ifp)
*/
if (ni != NULL)
ieee80211_free_node(ni);
- } else {
- /*
- * Check here if the node is in power save state.
- * XXX we should hold a node ref here, and release
- * it after the TX has completed.
- */
- ath_tx_update_tim(sc, ni, 1);
- ifp->if_opackets++;
+ continue;
}
/*
- * XXX should check for state change and flip out
- * if needed.
+ * Check here if the node is in power save state.
*/
- }
- ATH_TX_UNLOCK(sc);
+ ath_tx_update_tim(sc, ni, 1);
- /*
- * If we break out early (eg a state change) we should prepend these
- * frames onto the TX queue.
- */
-}
-
-/*
- * This is now primarily used by the net80211 layer to kick-start
- * queue processing.
- */
-void
-ath_start(struct ifnet *ifp)
-{
- struct mbuf *m;
- struct ath_softc *sc = ifp->if_softc;
- struct ieee80211_node *ni;
- int npkts = 0;
-
- ATH_TX_UNLOCK_ASSERT(sc);
-
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
- return;
-
- /*
- * If we're below the free buffer limit, don't dequeue anything.
- * The original code would not dequeue anything from the queue
- * if allocating an ath_buf failed.
- *
- * For if_transmit, we have to either queue or drop the frame.
- * So we have to try and queue it _somewhere_.
- */
- for (;;) {
- IFQ_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL) {
- break;
+ if (next != NULL) {
+ /*
+ * Beware of state changing between frags.
+ * XXX check sta power-save state?
+ */
+ if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
+ DPRINTF(sc, ATH_DEBUG_XMIT,
+ "%s: flush fragmented packet, state %s\n",
+ __func__,
+ ieee80211_state_name[ni->ni_vap->iv_state]);
+ ath_freetx(next);
+ goto reclaim;
+ }
+ m = next;
+ bf = TAILQ_FIRST(&frags);
+ KASSERT(bf != NULL, ("no buf for txfrag"));
+ TAILQ_REMOVE(&frags, bf, bf_list);
+ goto nextfrag;
}
- /*
- * If we do fail here, just break out for now
- * and wait until we've transmitted something
- * before we attempt again?
- */
- if (ath_txq_qadd(ifp, m) < 0) {
- DPRINTF(sc, ATH_DEBUG_XMIT,
- "%s: ath_txq_qadd failed\n",
- __func__);
- ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
- if (ni != NULL)
- ieee80211_free_node(ni);
- ath_freetx(m);
- break;
- }
- npkts++;
+ sc->sc_wd_timer = 5;
}
-
- /*
- * Kick the taskqueue into activity, but only if we
- * queued something.
- */
- if (npkts > 0)
- ath_tx_kick(sc);
+ ATH_KTR(sc, ATH_KTR_TX, 1, "ath_start: finished; npkts=%d", npkts);
}
-
static int
ath_media_change(struct ifnet *ifp)
{
@@ -3039,7 +2704,6 @@ ath_key_update_begin(struct ieee80211vap *vap)
DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
taskqueue_block(sc->sc_tq);
- taskqueue_block(sc->sc_tx_tq);
IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */
}
@@ -3052,7 +2716,6 @@ ath_key_update_end(struct ieee80211vap *vap)
DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
IF_UNLOCK(&ifp->if_snd);
taskqueue_unblock(sc->sc_tq);
- taskqueue_unblock(sc->sc_tx_tq);
}
static void
@@ -4273,11 +3936,11 @@ ath_tx_proc_q0(void *arg, int npending)
if (sc->sc_softled)
ath_led_event(sc, sc->sc_txrix);
- ath_txq_qrun(ifp);
-
ATH_PCU_LOCK(sc);
sc->sc_txproc_cnt--;
ATH_PCU_UNLOCK(sc);
+
+ ath_tx_kick(sc);
}
/*
@@ -4326,11 +3989,11 @@ ath_tx_proc_q0123(void *arg, int npending)
if (sc->sc_softled)
ath_led_event(sc, sc->sc_txrix);
- ath_txq_qrun(ifp);
-
ATH_PCU_LOCK(sc);
sc->sc_txproc_cnt--;
ATH_PCU_UNLOCK(sc);
+
+ ath_tx_kick(sc);
}
/*
@@ -4371,11 +4034,11 @@ ath_tx_proc(void *arg, int npending)
if (sc->sc_softled)
ath_led_event(sc, sc->sc_txrix);
- ath_txq_qrun(ifp);
-
ATH_PCU_LOCK(sc);
sc->sc_txproc_cnt--;
ATH_PCU_UNLOCK(sc);
+
+ ath_tx_kick(sc);
}
#undef TXQACTIVE
@@ -4726,7 +4389,6 @@ ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
/* (Try to) stop TX/RX from occuring */
taskqueue_block(sc->sc_tq);
- taskqueue_block(sc->sc_tx_tq);
ATH_PCU_LOCK(sc);
ath_hal_intrset(ah, 0); /* Stop new RX/TX completion */
@@ -5116,7 +4778,6 @@ ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
sc->sc_beacons = 0;
taskqueue_unblock(sc->sc_tq);
- taskqueue_unblock(sc->sc_tx_tq);
}
ni = ieee80211_ref_node(vap->iv_bss);
@@ -5282,7 +4943,6 @@ ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
"%s: calibration disabled\n", __func__);
}
taskqueue_unblock(sc->sc_tq);
- taskqueue_unblock(sc->sc_tx_tq);
} else if (nstate == IEEE80211_S_INIT) {
/*
* If there are no vaps left in RUN state then
@@ -5296,7 +4956,6 @@ ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
/* disable interrupts */
ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
taskqueue_block(sc->sc_tq);
- taskqueue_block(sc->sc_tx_tq);
sc->sc_beacons = 0;
}
#ifdef IEEE80211_SUPPORT_TDMA
diff --git a/sys/dev/ath/if_ath_misc.h b/sys/dev/ath/if_ath_misc.h
index f35fc11..167c826 100644
--- a/sys/dev/ath/if_ath_misc.h
+++ b/sys/dev/ath/if_ath_misc.h
@@ -127,7 +127,9 @@ static inline void
ath_tx_kick(struct ath_softc *sc)
{
- taskqueue_enqueue(sc->sc_tx_tq, &sc->sc_txpkttask);
+ ATH_TX_LOCK(sc);
+ ath_start(sc->sc_ifp);
+ ATH_TX_UNLOCK(sc);
}
/*
diff --git a/sys/dev/ath/if_ath_tx.c b/sys/dev/ath/if_ath_tx.c
index e19a637..64c4378 100644
--- a/sys/dev/ath/if_ath_tx.c
+++ b/sys/dev/ath/if_ath_tx.c
@@ -1124,11 +1124,7 @@ ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf)
dur = rt->info[rix].lpAckDuration;
if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
dur += dur; /* additional SIFS+ACK */
- if (bf->bf_state.bfs_nextpktlen == 0) {
- device_printf(sc->sc_dev,
- "%s: next txfrag len=0?\n",
- __func__);
- }
+ KASSERT(bf->bf_m->m_nextpkt != NULL, ("no fragment"));
/*
* Include the size of next fragment so NAV is
* updated properly. The last fragment uses only
@@ -1139,7 +1135,7 @@ ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf)
* first fragment!
*/
dur += ath_hal_computetxtime(ah, rt,
- bf->bf_state.bfs_nextpktlen,
+ bf->bf_m->m_nextpkt->m_pkthdr.len,
rix, shortPreamble);
}
if (isfrag) {
diff --git a/sys/dev/ath/if_athvar.h b/sys/dev/ath/if_athvar.h
index 52ed6d8..36923cc 100644
--- a/sys/dev/ath/if_athvar.h
+++ b/sys/dev/ath/if_athvar.h
@@ -279,8 +279,6 @@ struct ath_buf {
int32_t bfs_keyix; /* crypto key index */
int32_t bfs_txantenna; /* TX antenna config */
- uint16_t bfs_nextpktlen; /* length of next frag pkt */
-
/* Make this an 8 bit value? */
enum ieee80211_protmode bfs_protmode;
@@ -525,7 +523,6 @@ struct ath_softc {
struct mtx sc_tx_ic_mtx; /* TX queue mutex */
char sc_tx_ic_mtx_name[32];
struct taskqueue *sc_tq; /* private task queue */
- struct taskqueue *sc_tx_tq; /* private TX task queue */
struct ath_hal *sc_ah; /* Atheros HAL */
struct ath_ratectrl *sc_rc; /* tx rate control support */
struct ath_tx99 *sc_tx99; /* tx99 adjunct state */
@@ -815,6 +812,8 @@ struct ath_softc {
MA_OWNED)
#define ATH_TX_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_tx_mtx, \
MA_NOTOWNED)
+#define ATH_TX_TRYLOCK(_sc) (mtx_owned(&(_sc)->sc_tx_mtx) != 0 && \
+ mtx_trylock(&(_sc)->sc_tx_mtx))
/*
* The IC TX lock is non-reentrant and serialises packet queuing from
OpenPOWER on IntegriCloud