summaryrefslogtreecommitdiffstats
path: root/net/mac80211/agg-tx.c
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2012-12-28 09:43:03 +0100
committerJohannes Berg <johannes.berg@intel.com>2013-01-03 13:01:42 +0100
commit30bf5f1f433c7612857ed13c50525945c483dfe0 (patch)
treea9dbe1536cf42c42bd03e232c632ab18140dae86 /net/mac80211/agg-tx.c
parentfaec12ee2dd92edc09f75aab3d8c5085102052c5 (diff)
downloadop-kernel-dev-30bf5f1f433c7612857ed13c50525945c483dfe0.zip
op-kernel-dev-30bf5f1f433c7612857ed13c50525945c483dfe0.tar.gz
mac80211: move ieee80211_remove_tid_tx function
To call it from ___ieee80211_stop_tx_ba_session, move the function and dependencies up. Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'net/mac80211/agg-tx.c')
-rw-r--r--net/mac80211/agg-tx.c206
1 files changed, 103 insertions, 103 deletions
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 101bbfb..dda8d7d 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -149,6 +149,109 @@ void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
}
+static inline int ieee80211_ac_from_tid(int tid)
+{
+ return ieee802_1d_to_ac[tid & 7];
+}
+
+/*
+ * When multiple aggregation sessions on multiple stations
+ * are being created/destroyed simultaneously, we need to
+ * refcount the global queue stop caused by that in order
+ * to not get into a situation where one of the aggregation
+ * setup or teardown re-enables queues before the other is
+ * ready to handle that.
+ *
+ * These two functions take care of this issue by keeping
+ * a global "agg_queue_stop" refcount.
+ */
+static void __acquires(agg_queue)
+ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
+{
+ int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
+
+ if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1)
+ ieee80211_stop_queue_by_reason(
+ &sdata->local->hw, queue,
+ IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+ __acquire(agg_queue);
+}
+
+static void __releases(agg_queue)
+ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
+{
+ int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
+
+ if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0)
+ ieee80211_wake_queue_by_reason(
+ &sdata->local->hw, queue,
+ IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+ __release(agg_queue);
+}
+
+/*
+ * splice packets from the STA's pending to the local pending,
+ * requires a call to ieee80211_agg_splice_finish later
+ */
+static void __acquires(agg_queue)
+ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata,
+ struct tid_ampdu_tx *tid_tx, u16 tid)
+{
+ struct ieee80211_local *local = sdata->local;
+ int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
+ unsigned long flags;
+
+ ieee80211_stop_queue_agg(sdata, tid);
+
+ if (WARN(!tid_tx,
+ "TID %d gone but expected when splicing aggregates from the pending queue\n",
+ tid))
+ return;
+
+ if (!skb_queue_empty(&tid_tx->pending)) {
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ /* copy over remaining packets */
+ skb_queue_splice_tail_init(&tid_tx->pending,
+ &local->pending[queue]);
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ }
+}
+
+static void __releases(agg_queue)
+ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid)
+{
+ ieee80211_wake_queue_agg(sdata, tid);
+}
+
+static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid)
+{
+ struct tid_ampdu_tx *tid_tx;
+
+ lockdep_assert_held(&sta->ampdu_mlme.mtx);
+ lockdep_assert_held(&sta->lock);
+
+ tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
+
+ /*
+ * When we get here, the TX path will not be lockless any more wrt.
+ * aggregation, since the OPERATIONAL bit has long been cleared.
+ * Thus it will block on getting the lock, if it occurs. So if we
+ * stop the queue now, we will not get any more packets, and any
+ * that might be being processed will wait for us here, thereby
+ * guaranteeing that no packets go to the tid_tx pending queue any
+ * more.
+ */
+
+ ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
+
+ /* future packets must not find the tid_tx struct any more */
+ ieee80211_assign_tid_tx(sta, tid, NULL);
+
+ ieee80211_agg_splice_finish(sta->sdata, tid);
+
+ kfree_rcu(tid_tx, rcu_head);
+}
+
int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
enum ieee80211_agg_stop_reason reason)
{
@@ -265,80 +368,6 @@ static void sta_addba_resp_timer_expired(unsigned long data)
rcu_read_unlock();
}
-static inline int ieee80211_ac_from_tid(int tid)
-{
- return ieee802_1d_to_ac[tid & 7];
-}
-
-/*
- * When multiple aggregation sessions on multiple stations
- * are being created/destroyed simultaneously, we need to
- * refcount the global queue stop caused by that in order
- * to not get into a situation where one of the aggregation
- * setup or teardown re-enables queues before the other is
- * ready to handle that.
- *
- * These two functions take care of this issue by keeping
- * a global "agg_queue_stop" refcount.
- */
-static void __acquires(agg_queue)
-ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
-{
- int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
-
- if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1)
- ieee80211_stop_queue_by_reason(
- &sdata->local->hw, queue,
- IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
- __acquire(agg_queue);
-}
-
-static void __releases(agg_queue)
-ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
-{
- int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
-
- if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0)
- ieee80211_wake_queue_by_reason(
- &sdata->local->hw, queue,
- IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
- __release(agg_queue);
-}
-
-/*
- * splice packets from the STA's pending to the local pending,
- * requires a call to ieee80211_agg_splice_finish later
- */
-static void __acquires(agg_queue)
-ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata,
- struct tid_ampdu_tx *tid_tx, u16 tid)
-{
- struct ieee80211_local *local = sdata->local;
- int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
- unsigned long flags;
-
- ieee80211_stop_queue_agg(sdata, tid);
-
- if (WARN(!tid_tx,
- "TID %d gone but expected when splicing aggregates from the pending queue\n",
- tid))
- return;
-
- if (!skb_queue_empty(&tid_tx->pending)) {
- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
- /* copy over remaining packets */
- skb_queue_splice_tail_init(&tid_tx->pending,
- &local->pending[queue]);
- spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
- }
-}
-
-static void __releases(agg_queue)
-ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid)
-{
- ieee80211_wake_queue_agg(sdata, tid);
-}
-
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
{
struct tid_ampdu_tx *tid_tx;
@@ -713,35 +742,6 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
}
EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
-static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid)
-{
- struct tid_ampdu_tx *tid_tx;
-
- lockdep_assert_held(&sta->ampdu_mlme.mtx);
- lockdep_assert_held(&sta->lock);
-
- tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
-
- /*
- * When we get here, the TX path will not be lockless any more wrt.
- * aggregation, since the OPERATIONAL bit has long been cleared.
- * Thus it will block on getting the lock, if it occurs. So if we
- * stop the queue now, we will not get any more packets, and any
- * that might be being processed will wait for us here, thereby
- * guaranteeing that no packets go to the tid_tx pending queue any
- * more.
- */
-
- ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
-
- /* future packets must not find the tid_tx struct any more */
- ieee80211_assign_tid_tx(sta, tid, NULL);
-
- ieee80211_agg_splice_finish(sta->sdata, tid);
-
- kfree_rcu(tid_tx, rcu_head);
-}
-
void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
OpenPOWER on IntegriCloud