|
@@ -284,60 +284,6 @@ unlock:
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
}
|
|
}
|
|
|
|
|
|
-static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
|
|
|
|
- struct ieee80211_sta *sta)
|
|
|
|
-{
|
|
|
|
- unsigned long used_hw_queues;
|
|
|
|
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
|
|
- unsigned int wdg_timeout =
|
|
|
|
- iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
|
|
|
|
- u32 ac;
|
|
|
|
-
|
|
|
|
- lockdep_assert_held(&mvm->mutex);
|
|
|
|
-
|
|
|
|
- used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
|
|
|
|
-
|
|
|
|
- /* Find available queues, and allocate them to the ACs */
|
|
|
|
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
|
|
|
|
- u8 queue = find_first_zero_bit(&used_hw_queues,
|
|
|
|
- mvm->first_agg_queue);
|
|
|
|
-
|
|
|
|
- if (queue >= mvm->first_agg_queue) {
|
|
|
|
- IWL_ERR(mvm, "Failed to allocate STA queue\n");
|
|
|
|
- return -EBUSY;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- __set_bit(queue, &used_hw_queues);
|
|
|
|
- mvmsta->hw_queue[ac] = queue;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* Found a place for all queues - enable them */
|
|
|
|
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
|
|
|
|
- iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
|
|
|
|
- mvmsta->hw_queue[ac],
|
|
|
|
- iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), 0,
|
|
|
|
- wdg_timeout);
|
|
|
|
- mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
|
|
|
|
- struct ieee80211_sta *sta)
|
|
|
|
-{
|
|
|
|
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
|
|
- unsigned long sta_msk;
|
|
|
|
- int i;
|
|
|
|
-
|
|
|
|
- lockdep_assert_held(&mvm->mutex);
|
|
|
|
-
|
|
|
|
- /* disable the TDLS STA-specific queues */
|
|
|
|
- sta_msk = mvmsta->tfd_queue_msk;
|
|
|
|
- for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
|
|
|
|
- iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/* Disable aggregations for a bitmap of TIDs for a given station */
|
|
/* Disable aggregations for a bitmap of TIDs for a given station */
|
|
static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
|
|
static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
|
|
unsigned long disable_agg_tids,
|
|
unsigned long disable_agg_tids,
|
|
@@ -1317,8 +1263,6 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
|
|
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
|
|
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
-
|
|
|
|
- atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
|
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
|
@@ -1343,9 +1287,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
|
|
|
|
|
spin_lock_init(&mvm_sta->lock);
|
|
spin_lock_init(&mvm_sta->lock);
|
|
|
|
|
|
- /* In DQA mode, if this is a HW restart, re-alloc existing queues */
|
|
|
|
- if (iwl_mvm_is_dqa_supported(mvm) &&
|
|
|
|
- test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
|
|
|
|
|
|
+ /* if this is a HW restart re-alloc existing queues */
|
|
|
|
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
|
|
iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
|
|
iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
|
|
goto update_fw;
|
|
goto update_fw;
|
|
}
|
|
}
|
|
@@ -1363,33 +1306,15 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
|
mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
|
|
mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
|
|
|
|
|
|
/* HW restart, don't assume the memory has been zeroed */
|
|
/* HW restart, don't assume the memory has been zeroed */
|
|
- atomic_set(&mvm->pending_frames[sta_id], 0);
|
|
|
|
mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
|
|
mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
|
|
mvm_sta->tfd_queue_msk = 0;
|
|
mvm_sta->tfd_queue_msk = 0;
|
|
|
|
|
|
- /*
|
|
|
|
- * Allocate new queues for a TDLS station, unless we're in DQA mode,
|
|
|
|
- * and then they'll be allocated dynamically
|
|
|
|
- */
|
|
|
|
- if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) {
|
|
|
|
- ret = iwl_mvm_tdls_sta_init(mvm, sta);
|
|
|
|
- if (ret)
|
|
|
|
- return ret;
|
|
|
|
- } else if (!iwl_mvm_is_dqa_supported(mvm)) {
|
|
|
|
- for (i = 0; i < IEEE80211_NUM_ACS; i++)
|
|
|
|
- if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
|
|
|
|
- mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
/* for HW restart - reset everything but the sequence number */
|
|
/* for HW restart - reset everything but the sequence number */
|
|
for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
|
|
for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
|
|
u16 seq = mvm_sta->tid_data[i].seq_number;
|
|
u16 seq = mvm_sta->tid_data[i].seq_number;
|
|
memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
|
|
memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
|
|
mvm_sta->tid_data[i].seq_number = seq;
|
|
mvm_sta->tid_data[i].seq_number = seq;
|
|
|
|
|
|
- if (!iwl_mvm_is_dqa_supported(mvm))
|
|
|
|
- continue;
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Mark all queues for this STA as unallocated and defer TX
|
|
* Mark all queues for this STA as unallocated and defer TX
|
|
* frames until the queue is allocated
|
|
* frames until the queue is allocated
|
|
@@ -1423,7 +1348,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
|
mvm_sta->dup_data = dup_data;
|
|
mvm_sta->dup_data = dup_data;
|
|
}
|
|
}
|
|
|
|
|
|
- if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
|
|
|
|
|
|
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
|
|
ret = iwl_mvm_reserve_sta_stream(mvm, sta,
|
|
ret = iwl_mvm_reserve_sta_stream(mvm, sta,
|
|
ieee80211_vif_type_p2p(vif));
|
|
ieee80211_vif_type_p2p(vif));
|
|
if (ret)
|
|
if (ret)
|
|
@@ -1449,8 +1374,6 @@ update_fw:
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
err:
|
|
err:
|
|
- if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
|
|
|
|
- iwl_mvm_tdls_sta_deinit(mvm, sta);
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1523,79 +1446,6 @@ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-void iwl_mvm_sta_drained_wk(struct work_struct *wk)
|
|
|
|
-{
|
|
|
|
- struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
|
|
|
|
- u8 sta_id;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * The mutex is needed because of the SYNC cmd, but not only: if the
|
|
|
|
- * work would run concurrently with iwl_mvm_rm_sta, it would run before
|
|
|
|
- * iwl_mvm_rm_sta sets the station as busy, and exit. Then
|
|
|
|
- * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
|
|
|
|
- * that later.
|
|
|
|
- */
|
|
|
|
- mutex_lock(&mvm->mutex);
|
|
|
|
-
|
|
|
|
- for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
|
|
|
|
- int ret;
|
|
|
|
- struct ieee80211_sta *sta =
|
|
|
|
- rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
|
|
|
|
- lockdep_is_held(&mvm->mutex));
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * This station is in use or RCU-removed; the latter happens in
|
|
|
|
- * managed mode, where mac80211 removes the station before we
|
|
|
|
- * can remove it from firmware (we can only do that after the
|
|
|
|
- * MAC is marked unassociated), and possibly while the deauth
|
|
|
|
- * frame to disconnect from the AP is still queued. Then, the
|
|
|
|
- * station pointer is -ENOENT when the last skb is reclaimed.
|
|
|
|
- */
|
|
|
|
- if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
|
|
|
|
- continue;
|
|
|
|
-
|
|
|
|
- if (PTR_ERR(sta) == -EINVAL) {
|
|
|
|
- IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
|
|
|
|
- sta_id);
|
|
|
|
- continue;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (!sta) {
|
|
|
|
- IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
|
|
|
|
- sta_id);
|
|
|
|
- continue;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- WARN_ON(PTR_ERR(sta) != -EBUSY);
|
|
|
|
- /* This station was removed and we waited until it got drained,
|
|
|
|
- * we can now proceed and remove it.
|
|
|
|
- */
|
|
|
|
- ret = iwl_mvm_rm_sta_common(mvm, sta_id);
|
|
|
|
- if (ret) {
|
|
|
|
- IWL_ERR(mvm,
|
|
|
|
- "Couldn't remove sta %d after it was drained\n",
|
|
|
|
- sta_id);
|
|
|
|
- continue;
|
|
|
|
- }
|
|
|
|
- RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
|
|
|
|
- clear_bit(sta_id, mvm->sta_drained);
|
|
|
|
-
|
|
|
|
- if (mvm->tfd_drained[sta_id]) {
|
|
|
|
- unsigned long i, msk = mvm->tfd_drained[sta_id];
|
|
|
|
-
|
|
|
|
- for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
|
|
|
|
- iwl_mvm_disable_txq(mvm, i, i,
|
|
|
|
- IWL_MAX_TID_COUNT, 0);
|
|
|
|
-
|
|
|
|
- mvm->tfd_drained[sta_id] = 0;
|
|
|
|
- IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
|
|
|
|
- sta_id, msk);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- mutex_unlock(&mvm->mutex);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
|
|
static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
|
|
struct ieee80211_vif *vif,
|
|
struct ieee80211_vif *vif,
|
|
struct iwl_mvm_sta *mvm_sta)
|
|
struct iwl_mvm_sta *mvm_sta)
|
|
@@ -1654,79 +1504,65 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
|
if (iwl_mvm_has_new_rx_api(mvm))
|
|
if (iwl_mvm_has_new_rx_api(mvm))
|
|
kfree(mvm_sta->dup_data);
|
|
kfree(mvm_sta->dup_data);
|
|
|
|
|
|
- if ((vif->type == NL80211_IFTYPE_STATION &&
|
|
|
|
- mvmvif->ap_sta_id == sta_id) ||
|
|
|
|
- iwl_mvm_is_dqa_supported(mvm)){
|
|
|
|
- ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
|
|
|
|
- if (ret)
|
|
|
|
- return ret;
|
|
|
|
- /* flush its queues here since we are freeing mvm_sta */
|
|
|
|
- ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
|
|
|
|
- if (ret)
|
|
|
|
- return ret;
|
|
|
|
- if (iwl_mvm_has_new_tx_api(mvm)) {
|
|
|
|
- ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
|
|
|
|
- } else {
|
|
|
|
- u32 q_mask = mvm_sta->tfd_queue_msk;
|
|
|
|
|
|
+ ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
|
|
|
|
- ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
|
|
|
|
- q_mask);
|
|
|
|
- }
|
|
|
|
- if (ret)
|
|
|
|
- return ret;
|
|
|
|
- ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
|
|
|
|
-
|
|
|
|
- /* If DQA is supported - the queues can be disabled now */
|
|
|
|
- if (iwl_mvm_is_dqa_supported(mvm)) {
|
|
|
|
- iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
|
|
|
|
- /*
|
|
|
|
- * If pending_frames is set at this point - it must be
|
|
|
|
- * driver internal logic error, since queues are empty
|
|
|
|
- * and removed successuly.
|
|
|
|
- * warn on it but set it to 0 anyway to avoid station
|
|
|
|
- * not being removed later in the function
|
|
|
|
- */
|
|
|
|
- WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0));
|
|
|
|
- }
|
|
|
|
|
|
+ /* flush its queues here since we are freeing mvm_sta */
|
|
|
|
+ ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
|
|
+ if (iwl_mvm_has_new_tx_api(mvm)) {
|
|
|
|
+ ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
|
|
|
|
+ } else {
|
|
|
|
+ u32 q_mask = mvm_sta->tfd_queue_msk;
|
|
|
|
|
|
- /* If there is a TXQ still marked as reserved - free it */
|
|
|
|
- if (iwl_mvm_is_dqa_supported(mvm) &&
|
|
|
|
- mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
|
|
|
|
- u8 reserved_txq = mvm_sta->reserved_queue;
|
|
|
|
- enum iwl_mvm_queue_status *status;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * If no traffic has gone through the reserved TXQ - it
|
|
|
|
- * is still marked as IWL_MVM_QUEUE_RESERVED, and
|
|
|
|
- * should be manually marked as free again
|
|
|
|
- */
|
|
|
|
- spin_lock_bh(&mvm->queue_info_lock);
|
|
|
|
- status = &mvm->queue_info[reserved_txq].status;
|
|
|
|
- if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
|
|
|
|
- (*status != IWL_MVM_QUEUE_FREE),
|
|
|
|
- "sta_id %d reserved txq %d status %d",
|
|
|
|
- sta_id, reserved_txq, *status)) {
|
|
|
|
- spin_unlock_bh(&mvm->queue_info_lock);
|
|
|
|
- return -EINVAL;
|
|
|
|
- }
|
|
|
|
|
|
+ ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
|
|
|
|
+ q_mask);
|
|
|
|
+ }
|
|
|
|
+ if (ret)
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
|
|
+ ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
|
|
|
|
|
|
- *status = IWL_MVM_QUEUE_FREE;
|
|
|
|
|
|
+ iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
|
|
|
|
+
|
|
|
|
+ /* If there is a TXQ still marked as reserved - free it */
|
|
|
|
+ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
|
|
|
|
+ u8 reserved_txq = mvm_sta->reserved_queue;
|
|
|
|
+ enum iwl_mvm_queue_status *status;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If no traffic has gone through the reserved TXQ - it
|
|
|
|
+ * is still marked as IWL_MVM_QUEUE_RESERVED, and
|
|
|
|
+ * should be manually marked as free again
|
|
|
|
+ */
|
|
|
|
+ spin_lock_bh(&mvm->queue_info_lock);
|
|
|
|
+ status = &mvm->queue_info[reserved_txq].status;
|
|
|
|
+ if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
|
|
|
|
+ (*status != IWL_MVM_QUEUE_FREE),
|
|
|
|
+ "sta_id %d reserved txq %d status %d",
|
|
|
|
+ sta_id, reserved_txq, *status)) {
|
|
spin_unlock_bh(&mvm->queue_info_lock);
|
|
spin_unlock_bh(&mvm->queue_info_lock);
|
|
|
|
+ return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
- if (vif->type == NL80211_IFTYPE_STATION &&
|
|
|
|
- mvmvif->ap_sta_id == sta_id) {
|
|
|
|
- /* if associated - we can't remove the AP STA now */
|
|
|
|
- if (vif->bss_conf.assoc)
|
|
|
|
- return ret;
|
|
|
|
|
|
+ *status = IWL_MVM_QUEUE_FREE;
|
|
|
|
+ spin_unlock_bh(&mvm->queue_info_lock);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (vif->type == NL80211_IFTYPE_STATION &&
|
|
|
|
+ mvmvif->ap_sta_id == sta_id) {
|
|
|
|
+ /* if associated - we can't remove the AP STA now */
|
|
|
|
+ if (vif->bss_conf.assoc)
|
|
|
|
+ return ret;
|
|
|
|
|
|
- /* unassoc - go ahead - remove the AP STA now */
|
|
|
|
- mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
|
|
|
|
|
|
+ /* unassoc - go ahead - remove the AP STA now */
|
|
|
|
+ mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
|
|
|
|
|
|
- /* clear d0i3_ap_sta_id if no longer relevant */
|
|
|
|
- if (mvm->d0i3_ap_sta_id == sta_id)
|
|
|
|
- mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
|
|
|
|
- }
|
|
|
|
|
|
+ /* clear d0i3_ap_sta_id if no longer relevant */
|
|
|
|
+ if (mvm->d0i3_ap_sta_id == sta_id)
|
|
|
|
+ mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1743,32 +1579,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
|
* calls the drain worker.
|
|
* calls the drain worker.
|
|
*/
|
|
*/
|
|
spin_lock_bh(&mvm_sta->lock);
|
|
spin_lock_bh(&mvm_sta->lock);
|
|
|
|
+ spin_unlock_bh(&mvm_sta->lock);
|
|
|
|
|
|
- /*
|
|
|
|
- * There are frames pending on the AC queues for this station.
|
|
|
|
- * We need to wait until all the frames are drained...
|
|
|
|
- */
|
|
|
|
- if (atomic_read(&mvm->pending_frames[sta_id])) {
|
|
|
|
- rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id],
|
|
|
|
- ERR_PTR(-EBUSY));
|
|
|
|
- spin_unlock_bh(&mvm_sta->lock);
|
|
|
|
-
|
|
|
|
- /* disable TDLS sta queues on drain complete */
|
|
|
|
- if (sta->tdls) {
|
|
|
|
- mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk;
|
|
|
|
- IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
|
|
|
|
- } else {
|
|
|
|
- spin_unlock_bh(&mvm_sta->lock);
|
|
|
|
-
|
|
|
|
- if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
|
|
|
|
- iwl_mvm_tdls_sta_deinit(mvm, sta);
|
|
|
|
-
|
|
|
|
- ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
|
|
|
|
- RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
|
|
|
|
- }
|
|
|
|
|
|
+ ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
|
|
|
|
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -1867,7 +1681,7 @@ static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
|
|
IWL_MAX_TID_COUNT,
|
|
IWL_MAX_TID_COUNT,
|
|
wdg_timeout);
|
|
wdg_timeout);
|
|
mvm->aux_queue = queue;
|
|
mvm->aux_queue = queue;
|
|
- } else if (iwl_mvm_is_dqa_supported(mvm)) {
|
|
|
|
|
|
+ } else {
|
|
struct iwl_trans_txq_scd_cfg cfg = {
|
|
struct iwl_trans_txq_scd_cfg cfg = {
|
|
.fifo = IWL_MVM_TX_FIFO_MCAST,
|
|
.fifo = IWL_MVM_TX_FIFO_MCAST,
|
|
.sta_id = mvm->aux_sta.sta_id,
|
|
.sta_id = mvm->aux_sta.sta_id,
|
|
@@ -1878,9 +1692,6 @@ static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
|
|
|
|
|
|
iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
|
|
iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
|
|
wdg_timeout);
|
|
wdg_timeout);
|
|
- } else {
|
|
|
|
- iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
|
|
|
|
- IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
|
|
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1980,7 +1791,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|
|
|
|
|
lockdep_assert_held(&mvm->mutex);
|
|
lockdep_assert_held(&mvm->mutex);
|
|
|
|
|
|
- if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
|
|
|
|
|
|
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
|
|
if (vif->type == NL80211_IFTYPE_AP ||
|
|
if (vif->type == NL80211_IFTYPE_AP ||
|
|
vif->type == NL80211_IFTYPE_ADHOC)
|
|
vif->type == NL80211_IFTYPE_ADHOC)
|
|
queue = mvm->probe_queue;
|
|
queue = mvm->probe_queue;
|
|
@@ -2066,8 +1877,7 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|
|
|
|
|
lockdep_assert_held(&mvm->mutex);
|
|
lockdep_assert_held(&mvm->mutex);
|
|
|
|
|
|
- if (iwl_mvm_is_dqa_supported(mvm))
|
|
|
|
- iwl_mvm_free_bcast_sta_queues(mvm, vif);
|
|
|
|
|
|
+ iwl_mvm_free_bcast_sta_queues(mvm, vif);
|
|
|
|
|
|
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
|
|
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
|
|
if (ret)
|
|
if (ret)
|
|
@@ -2078,23 +1888,10 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|
int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|
int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|
{
|
|
{
|
|
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
|
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
|
- u32 qmask = 0;
|
|
|
|
|
|
|
|
lockdep_assert_held(&mvm->mutex);
|
|
lockdep_assert_held(&mvm->mutex);
|
|
|
|
|
|
- if (!iwl_mvm_is_dqa_supported(mvm)) {
|
|
|
|
- qmask = iwl_mvm_mac_get_queues_mask(vif);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * The firmware defines the TFD queue mask to only be relevant
|
|
|
|
- * for *unicast* queues, so the multicast (CAB) queue shouldn't
|
|
|
|
- * be included. This only happens in NL80211_IFTYPE_AP vif type,
|
|
|
|
- * so the next line will only have an effect there.
|
|
|
|
- */
|
|
|
|
- qmask &= ~BIT(vif->cab_queue);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
|
|
|
|
|
|
+ return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
|
|
ieee80211_vif_type_p2p(vif),
|
|
ieee80211_vif_type_p2p(vif),
|
|
IWL_STA_GENERAL_PURPOSE);
|
|
IWL_STA_GENERAL_PURPOSE);
|
|
}
|
|
}
|
|
@@ -2176,9 +1973,6 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|
|
|
|
|
lockdep_assert_held(&mvm->mutex);
|
|
lockdep_assert_held(&mvm->mutex);
|
|
|
|
|
|
- if (!iwl_mvm_is_dqa_supported(mvm))
|
|
|
|
- return 0;
|
|
|
|
-
|
|
|
|
if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
|
|
if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
|
|
vif->type != NL80211_IFTYPE_ADHOC))
|
|
vif->type != NL80211_IFTYPE_ADHOC))
|
|
return -ENOTSUPP;
|
|
return -ENOTSUPP;
|
|
@@ -2243,9 +2037,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|
|
|
|
|
lockdep_assert_held(&mvm->mutex);
|
|
lockdep_assert_held(&mvm->mutex);
|
|
|
|
|
|
- if (!iwl_mvm_is_dqa_supported(mvm))
|
|
|
|
- return 0;
|
|
|
|
-
|
|
|
|
iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
|
|
iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
|
|
|
|
|
|
iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
|
|
iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
|
|
@@ -2495,8 +2286,6 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|
mvm_sta->tid_disable_agg &= ~BIT(tid);
|
|
mvm_sta->tid_disable_agg &= ~BIT(tid);
|
|
} else {
|
|
} else {
|
|
/* In DQA-mode the queue isn't removed on agg termination */
|
|
/* In DQA-mode the queue isn't removed on agg termination */
|
|
- if (!iwl_mvm_is_dqa_supported(mvm))
|
|
|
|
- mvm_sta->tfd_queue_msk &= ~BIT(queue);
|
|
|
|
mvm_sta->tid_disable_agg |= BIT(tid);
|
|
mvm_sta->tid_disable_agg |= BIT(tid);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2599,19 +2388,17 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
ret = -ENXIO;
|
|
ret = -ENXIO;
|
|
goto release_locks;
|
|
goto release_locks;
|
|
}
|
|
}
|
|
- } else if (iwl_mvm_is_dqa_supported(mvm) &&
|
|
|
|
- unlikely(mvm->queue_info[txq_id].status ==
|
|
|
|
|
|
+ } else if (unlikely(mvm->queue_info[txq_id].status ==
|
|
IWL_MVM_QUEUE_SHARED)) {
|
|
IWL_MVM_QUEUE_SHARED)) {
|
|
ret = -ENXIO;
|
|
ret = -ENXIO;
|
|
IWL_DEBUG_TX_QUEUES(mvm,
|
|
IWL_DEBUG_TX_QUEUES(mvm,
|
|
"Can't start tid %d agg on shared queue!\n",
|
|
"Can't start tid %d agg on shared queue!\n",
|
|
tid);
|
|
tid);
|
|
goto release_locks;
|
|
goto release_locks;
|
|
- } else if (!iwl_mvm_is_dqa_supported(mvm) ||
|
|
|
|
- mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
|
|
|
|
|
|
+ } else if (mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
|
|
txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
|
|
txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
|
|
- mvm->first_agg_queue,
|
|
|
|
- mvm->last_agg_queue);
|
|
|
|
|
|
+ IWL_MVM_DQA_MIN_DATA_QUEUE,
|
|
|
|
+ IWL_MVM_DQA_MAX_DATA_QUEUE);
|
|
if (txq_id < 0) {
|
|
if (txq_id < 0) {
|
|
ret = txq_id;
|
|
ret = txq_id;
|
|
IWL_ERR(mvm, "Failed to allocate agg queue\n");
|
|
IWL_ERR(mvm, "Failed to allocate agg queue\n");
|
|
@@ -2729,37 +2516,34 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
queue_status = mvm->queue_info[queue].status;
|
|
queue_status = mvm->queue_info[queue].status;
|
|
spin_unlock_bh(&mvm->queue_info_lock);
|
|
spin_unlock_bh(&mvm->queue_info_lock);
|
|
|
|
|
|
- /* In DQA mode, the existing queue might need to be reconfigured */
|
|
|
|
- if (iwl_mvm_is_dqa_supported(mvm)) {
|
|
|
|
- /* Maybe there is no need to even alloc a queue... */
|
|
|
|
- if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
|
|
|
|
- alloc_queue = false;
|
|
|
|
|
|
+ /* Maybe there is no need to even alloc a queue... */
|
|
|
|
+ if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
|
|
|
|
+ alloc_queue = false;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Only reconfig the SCD for the queue if the window size has
|
|
|
|
+ * changed from current (become smaller)
|
|
|
|
+ */
|
|
|
|
+ if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
|
|
/*
|
|
/*
|
|
- * Only reconfig the SCD for the queue if the window size has
|
|
|
|
- * changed from current (become smaller)
|
|
|
|
|
|
+ * If reconfiguring an existing queue, it first must be
|
|
|
|
+ * drained
|
|
*/
|
|
*/
|
|
- if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
|
|
|
|
- /*
|
|
|
|
- * If reconfiguring an existing queue, it first must be
|
|
|
|
- * drained
|
|
|
|
- */
|
|
|
|
- ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
|
|
|
|
- BIT(queue));
|
|
|
|
- if (ret) {
|
|
|
|
- IWL_ERR(mvm,
|
|
|
|
- "Error draining queue before reconfig\n");
|
|
|
|
- return ret;
|
|
|
|
- }
|
|
|
|
|
|
+ ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
|
|
|
|
+ BIT(queue));
|
|
|
|
+ if (ret) {
|
|
|
|
+ IWL_ERR(mvm,
|
|
|
|
+ "Error draining queue before reconfig\n");
|
|
|
|
+ return ret;
|
|
|
|
+ }
|
|
|
|
|
|
- ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
|
|
|
|
- mvmsta->sta_id, tid,
|
|
|
|
- buf_size, ssn);
|
|
|
|
- if (ret) {
|
|
|
|
- IWL_ERR(mvm,
|
|
|
|
- "Error reconfiguring TXQ #%d\n", queue);
|
|
|
|
- return ret;
|
|
|
|
- }
|
|
|
|
|
|
+ ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
|
|
|
|
+ mvmsta->sta_id, tid,
|
|
|
|
+ buf_size, ssn);
|
|
|
|
+ if (ret) {
|
|
|
|
+ IWL_ERR(mvm,
|
|
|
|
+ "Error reconfiguring TXQ #%d\n", queue);
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2855,18 +2639,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
"ssn = %d, next_recl = %d\n",
|
|
"ssn = %d, next_recl = %d\n",
|
|
tid_data->ssn, tid_data->next_reclaimed);
|
|
tid_data->ssn, tid_data->next_reclaimed);
|
|
|
|
|
|
- /*
|
|
|
|
- * There are still packets for this RA / TID in the HW.
|
|
|
|
- * Not relevant for DQA mode, since there is no need to disable
|
|
|
|
- * the queue.
|
|
|
|
- */
|
|
|
|
- if (!iwl_mvm_is_dqa_supported(mvm) &&
|
|
|
|
- tid_data->ssn != tid_data->next_reclaimed) {
|
|
|
|
- tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
|
|
|
|
- err = 0;
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
tid_data->ssn = 0xffff;
|
|
tid_data->ssn = 0xffff;
|
|
tid_data->state = IWL_AGG_OFF;
|
|
tid_data->state = IWL_AGG_OFF;
|
|
spin_unlock_bh(&mvmsta->lock);
|
|
spin_unlock_bh(&mvmsta->lock);
|
|
@@ -2874,12 +2646,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
|
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
|
|
|
|
|
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
|
|
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
|
|
-
|
|
|
|
- if (!iwl_mvm_is_dqa_supported(mvm)) {
|
|
|
|
- int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
|
|
|
|
-
|
|
|
|
- iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
|
|
|
|
- }
|
|
|
|
return 0;
|
|
return 0;
|
|
case IWL_AGG_STARTING:
|
|
case IWL_AGG_STARTING:
|
|
case IWL_EMPTYING_HW_QUEUE_ADDBA:
|
|
case IWL_EMPTYING_HW_QUEUE_ADDBA:
|
|
@@ -2949,13 +2715,6 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
iwl_mvm_drain_sta(mvm, mvmsta, false);
|
|
iwl_mvm_drain_sta(mvm, mvmsta, false);
|
|
|
|
|
|
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
|
|
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
|
|
-
|
|
|
|
- if (!iwl_mvm_is_dqa_supported(mvm)) {
|
|
|
|
- int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
|
|
|
|
-
|
|
|
|
- iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
|
|
|
|
- tid, 0);
|
|
|
|
- }
|
|
|
|
}
|
|
}
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
@@ -3574,15 +3333,6 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
|
|
u16 n_queued;
|
|
u16 n_queued;
|
|
|
|
|
|
tid_data = &mvmsta->tid_data[tid];
|
|
tid_data = &mvmsta->tid_data[tid];
|
|
- if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
|
|
|
|
- tid_data->state != IWL_AGG_ON &&
|
|
|
|
- tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
|
|
|
|
- "TID %d state is %d\n",
|
|
|
|
- tid, tid_data->state)) {
|
|
|
|
- spin_unlock_bh(&mvmsta->lock);
|
|
|
|
- ieee80211_sta_eosp(sta);
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
|
|
|
|
n_queued = iwl_mvm_tid_queued(mvm, tid_data);
|
|
n_queued = iwl_mvm_tid_queued(mvm, tid_data);
|
|
if (n_queued > remaining) {
|
|
if (n_queued > remaining) {
|
|
@@ -3676,13 +3426,8 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
|
|
|
|
|
|
mvm_sta->disable_tx = disable;
|
|
mvm_sta->disable_tx = disable;
|
|
|
|
|
|
- /*
|
|
|
|
- * Tell mac80211 to start/stop queuing tx for this station,
|
|
|
|
- * but don't stop queuing if there are still pending frames
|
|
|
|
- * for this station.
|
|
|
|
- */
|
|
|
|
- if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
|
|
|
|
- ieee80211_sta_block_awake(mvm->hw, sta, disable);
|
|
|
|
|
|
+ /* Tell mac80211 to start/stop queuing tx for this station */
|
|
|
|
+ ieee80211_sta_block_awake(mvm->hw, sta, disable);
|
|
|
|
|
|
iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
|
|
iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
|
|
|
|
|