summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/mvm/ops.c
diff options
context:
space:
mode:
authorArik Nemtsov <arik@wizery.com>2014-03-13 12:21:50 +0200
committerEmmanuel Grumbach <emmanuel.grumbach@intel.com>2014-03-16 13:45:32 +0200
commitb2492501d234ef7a99613576550126b88b377070 (patch)
tree080c906b8dbe463463aa095c3be82810e4356fb3 /drivers/net/wireless/iwlwifi/mvm/ops.c
parent1a95c8df7ed1ddf5e1d732a594f5a1b09da9a8c5 (diff)
downloadop-kernel-dev-b2492501d234ef7a99613576550126b88b377070.zip
op-kernel-dev-b2492501d234ef7a99613576550126b88b377070.tar.gz
iwlwifi: mvm: reconfigure qos seq on D0i3 exit
In order to restore the qos seq number on d0i3 exit, we need to read it from the wowlan status. However, in order to make sure we use correct seq num for tx frames, we need to defer any outgoing frames, and re-enqueue them only after the seq num is configured correctly. Sync new Tx aggregations with D0i3 so that the correct seq num is used for them. Wait synchronously for D0i3 exit before starting a new Tx agg. Signed-off-by: Eliad Peller <eliadx.peller@intel.com> Signed-off-by: Arik Nemtsov <arikx.nemtsov@intel.com> Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/mvm/ops.c')
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c132
1 files changed, 132 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 87c32e8..a3e21f1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -410,6 +410,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
+ spin_lock_init(&mvm->d0i3_tx_lock);
+ skb_queue_head_init(&mvm->d0i3_tx);
+ init_waitqueue_head(&mvm->d0i3_exit_waitq);
+
SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
/*
@@ -823,8 +827,62 @@ struct iwl_d0i3_iter_data {
struct iwl_mvm *mvm;
u8 ap_sta_id;
u8 vif_count;
+ u8 offloading_tid;
+ bool disable_offloading;
};
+static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_d0i3_iter_data *iter_data)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_sta *ap_sta;
+ struct iwl_mvm_sta *mvmsta;
+ u32 available_tids = 0;
+ u8 tid;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
+ mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
+ return false;
+
+ ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
+ if (IS_ERR_OR_NULL(ap_sta))
+ return false;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(ap_sta);
+ spin_lock_bh(&mvmsta->lock);
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+ /*
+ * in case of pending tx packets, don't use this tid
+ * for offloading in order to prevent reuse of the same
+ * qos seq counters.
+ */
+ if (iwl_mvm_tid_queued(tid_data))
+ continue;
+
+ if (tid_data->state != IWL_AGG_OFF)
+ continue;
+
+ available_tids |= BIT(tid);
+ }
+ spin_unlock_bh(&mvmsta->lock);
+
+ /*
+ * disallow protocol offloading if we have no available tid
+ * (with no pending frames and no active aggregation,
+ * as we don't handle "holes" properly - the scheduler needs the
+ * frame's seq number and TFD index to match)
+ */
+ if (!available_tids)
+ return true;
+
+ /* for simplicity, just use the first available tid */
+ iter_data->offloading_tid = ffs(available_tids) - 1;
+ return false;
+}
+
static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -838,6 +896,14 @@ static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
!vif->bss_conf.assoc)
return;
+ /*
+ * in case of pending tx packets or active aggregations,
+ * avoid offloading features in order to prevent reuse of
+ * the same qos seq counters.
+ */
+ if (iwl_mvm_disallow_offloading(mvm, vif, data))
+ data->disable_offloading = true;
+
iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
/*
@@ -868,6 +934,7 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
cmd->common.is_11n_connection = ap_sta->ht_cap.ht_supported;
+ cmd->offloading_tid = iter_data->offloading_tid;
/*
* The d0i3 uCode takes care of the nonqos counters,
@@ -900,15 +967,21 @@ static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
+ /* make sure we have no running tx while configuring the qos */
+ set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+ synchronize_net();
+
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_enter_d0i3_iterator,
&d0i3_iter_data);
if (d0i3_iter_data.vif_count == 1) {
mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
+ mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
} else {
WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+ mvm->d0i3_offloading = false;
}
iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data);
@@ -948,6 +1021,62 @@ static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac,
ieee80211_connection_loss(vif);
}
+void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
+{
+ struct ieee80211_sta *sta = NULL;
+ struct iwl_mvm_sta *mvm_ap_sta;
+ int i;
+ bool wake_queues = false;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->d0i3_tx_lock);
+
+ if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT)
+ goto out;
+
+ IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
+
+ /* get the sta in order to update seq numbers and re-enqueue skbs */
+ sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (IS_ERR_OR_NULL(sta)) {
+ sta = NULL;
+ goto out;
+ }
+
+ if (mvm->d0i3_offloading && qos_seq) {
+ /* update qos seq numbers if offloading was enabled */
+ mvm_ap_sta = (struct iwl_mvm_sta *)sta->drv_priv;
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ u16 seq = le16_to_cpu(qos_seq[i]);
+ /* firmware stores last-used one, we store next one */
+ seq += 0x10;
+ mvm_ap_sta->tid_data[i].seq_number = seq;
+ }
+ }
+out:
+ /* re-enqueue (or drop) all packets */
+ while (!skb_queue_empty(&mvm->d0i3_tx)) {
+ struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);
+
+ if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
+ ieee80211_free_txskb(mvm->hw, skb);
+
+ /* if the skb_queue is not empty, we need to wake queues */
+ wake_queues = true;
+ }
+ clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+ wake_up(&mvm->d0i3_exit_waitq);
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+ if (wake_queues)
+ ieee80211_wake_queues(mvm->hw);
+
+ spin_unlock_bh(&mvm->d0i3_tx_lock);
+}
+
static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
@@ -958,6 +1087,7 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
struct iwl_wowlan_status_v6 *status;
int ret;
u32 disconnection_reasons, wakeup_reasons;
+ __le16 *qos_seq = NULL;
mutex_lock(&mvm->mutex);
ret = iwl_mvm_send_cmd(mvm, &get_status_cmd);
@@ -969,6 +1099,7 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
status = (void *)get_status_cmd.resp_pkt->data;
wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
+ qos_seq = status->qos_seq_ctr;
IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
@@ -982,6 +1113,7 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
iwl_free_resp(&get_status_cmd);
out:
+ iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
mutex_unlock(&mvm->mutex);
}
OpenPOWER on IntegriCloud