|
@@ -234,7 +234,9 @@ static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
|
|
|
/* Found a place for all queues - enable them */
|
|
|
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
|
|
|
iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
|
|
|
- iwl_mvm_ac_to_tx_fifo[ac], wdg_timeout);
|
|
|
+ mvmsta->hw_queue[ac],
|
|
|
+ iwl_mvm_ac_to_tx_fifo[ac], 0,
|
|
|
+ wdg_timeout);
|
|
|
mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
|
|
|
}
|
|
|
|
|
@@ -253,7 +255,7 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
|
|
|
/* disable the TDLS STA-specific queues */
|
|
|
sta_msk = mvmsta->tfd_queue_msk;
|
|
|
for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
|
|
|
- iwl_mvm_disable_txq(mvm, i, 0);
|
|
|
+ iwl_mvm_disable_txq(mvm, i, i, 0, 0);
|
|
|
}
|
|
|
|
|
|
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
|
@@ -277,7 +279,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
|
|
|
|
|
if (vif->type == NL80211_IFTYPE_AP) {
|
|
|
mvmvif->ap_assoc_sta_count++;
|
|
|
- iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, FW_CTXT_ACTION_MODIFY);
|
|
|
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
|
|
|
}
|
|
|
|
|
|
spin_lock_init(&mvm_sta->lock);
|
|
@@ -292,7 +294,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
|
|
|
|
|
/* HW restart, don't assume the memory has been zeroed */
|
|
|
atomic_set(&mvm->pending_frames[sta_id], 0);
|
|
|
- mvm_sta->tid_disable_agg = 0;
|
|
|
+ mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
|
|
|
mvm_sta->tfd_queue_msk = 0;
|
|
|
|
|
|
/* allocate new queues for a TDLS station */
|
|
@@ -472,7 +474,7 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
|
|
|
unsigned long i, msk = mvm->tfd_drained[sta_id];
|
|
|
|
|
|
for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
|
|
|
- iwl_mvm_disable_txq(mvm, i, 0);
|
|
|
+ iwl_mvm_disable_txq(mvm, i, i, 0, 0);
|
|
|
|
|
|
mvm->tfd_drained[sta_id] = 0;
|
|
|
IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
|
|
@@ -651,8 +653,8 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
|
|
|
lockdep_assert_held(&mvm->mutex);
|
|
|
|
|
|
/* Map Aux queue to fifo - needs to happen before adding Aux station */
|
|
|
- iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue,
|
|
|
- IWL_MVM_TX_FIFO_MCAST, wdg_timeout);
|
|
|
+ iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
|
|
|
+ IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
|
|
|
|
|
|
/* Allocate aux station and assign to it the aux queue */
|
|
|
ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
|
|
@@ -923,6 +925,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
|
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
|
struct iwl_mvm_tid_data *tid_data;
|
|
|
int txq_id;
|
|
|
+ int ret;
|
|
|
|
|
|
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
|
|
|
return -EINVAL;
|
|
@@ -935,17 +938,6 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
|
|
|
|
lockdep_assert_held(&mvm->mutex);
|
|
|
|
|
|
- for (txq_id = mvm->first_agg_queue;
|
|
|
- txq_id <= mvm->last_agg_queue; txq_id++)
|
|
|
- if (mvm->queue_to_mac80211[txq_id] ==
|
|
|
- IWL_INVALID_MAC80211_QUEUE)
|
|
|
- break;
|
|
|
-
|
|
|
- if (txq_id > mvm->last_agg_queue) {
|
|
|
- IWL_ERR(mvm, "Failed to allocate agg queue\n");
|
|
|
- return -EIO;
|
|
|
- }
|
|
|
-
|
|
|
spin_lock_bh(&mvmsta->lock);
|
|
|
|
|
|
/* possible race condition - we entered D0i3 while starting agg */
|
|
@@ -955,8 +947,18 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
|
return -EIO;
|
|
|
}
|
|
|
|
|
|
- /* the new tx queue is still connected to the same mac80211 queue */
|
|
|
- mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_mac80211_ac[tid]];
|
|
|
+ spin_lock_bh(&mvm->queue_info_lock);
|
|
|
+
|
|
|
+ txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
|
|
|
+ mvm->last_agg_queue);
|
|
|
+ if (txq_id < 0) {
|
|
|
+ ret = txq_id;
|
|
|
+ spin_unlock_bh(&mvm->queue_info_lock);
|
|
|
+ IWL_ERR(mvm, "Failed to allocate agg queue\n");
|
|
|
+ goto release_locks;
|
|
|
+ }
|
|
|
+ mvm->queue_info[txq_id].setup_reserved = true;
|
|
|
+ spin_unlock_bh(&mvm->queue_info_lock);
|
|
|
|
|
|
tid_data = &mvmsta->tid_data[tid];
|
|
|
tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
|
|
@@ -975,9 +977,12 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
|
tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
|
|
|
}
|
|
|
|
|
|
+ ret = 0;
|
|
|
+
|
|
|
+release_locks:
|
|
|
spin_unlock_bh(&mvmsta->lock);
|
|
|
|
|
|
- return 0;
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
@@ -1005,13 +1010,19 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
|
|
|
|
fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
|
|
|
|
|
|
- iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
|
|
|
- buf_size, ssn, wdg_timeout);
|
|
|
+ iwl_mvm_enable_agg_txq(mvm, queue,
|
|
|
+ vif->hw_queue[tid_to_mac80211_ac[tid]], fifo,
|
|
|
+ mvmsta->sta_id, tid, buf_size, ssn, wdg_timeout);
|
|
|
|
|
|
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
|
|
|
if (ret)
|
|
|
return -EIO;
|
|
|
|
|
|
+ /* No need to mark as reserved */
|
|
|
+ spin_lock_bh(&mvm->queue_info_lock);
|
|
|
+ mvm->queue_info[queue].setup_reserved = false;
|
|
|
+ spin_unlock_bh(&mvm->queue_info_lock);
|
|
|
+
|
|
|
/*
|
|
|
* Even though in theory the peer could have different
|
|
|
* aggregation reorder buffer sizes for different sessions,
|
|
@@ -1056,6 +1067,11 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
|
|
|
|
mvmsta->agg_tids &= ~BIT(tid);
|
|
|
|
|
|
+ /* No need to mark as reserved anymore */
|
|
|
+ spin_lock_bh(&mvm->queue_info_lock);
|
|
|
+ mvm->queue_info[txq_id].setup_reserved = false;
|
|
|
+ spin_unlock_bh(&mvm->queue_info_lock);
|
|
|
+
|
|
|
switch (tid_data->state) {
|
|
|
case IWL_AGG_ON:
|
|
|
tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
|
|
@@ -1073,14 +1089,15 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
|
|
|
|
tid_data->ssn = 0xffff;
|
|
|
tid_data->state = IWL_AGG_OFF;
|
|
|
- mvm->queue_to_mac80211[txq_id] = IWL_INVALID_MAC80211_QUEUE;
|
|
|
spin_unlock_bh(&mvmsta->lock);
|
|
|
|
|
|
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
|
|
|
|
|
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
|
|
|
|
|
|
- iwl_mvm_disable_txq(mvm, txq_id, 0);
|
|
|
+ iwl_mvm_disable_txq(mvm, txq_id,
|
|
|
+ vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
|
|
|
+ 0);
|
|
|
return 0;
|
|
|
case IWL_AGG_STARTING:
|
|
|
case IWL_EMPTYING_HW_QUEUE_ADDBA:
|
|
@@ -1091,7 +1108,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
|
|
|
|
/* No barriers since we are under mutex */
|
|
|
lockdep_assert_held(&mvm->mutex);
|
|
|
- mvm->queue_to_mac80211[txq_id] = IWL_INVALID_MAC80211_QUEUE;
|
|
|
|
|
|
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
|
|
tid_data->state = IWL_AGG_OFF;
|
|
@@ -1132,6 +1148,11 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
|
mvmsta->agg_tids &= ~BIT(tid);
|
|
|
spin_unlock_bh(&mvmsta->lock);
|
|
|
|
|
|
+ /* No need to mark as reserved */
|
|
|
+ spin_lock_bh(&mvm->queue_info_lock);
|
|
|
+ mvm->queue_info[txq_id].setup_reserved = false;
|
|
|
+ spin_unlock_bh(&mvm->queue_info_lock);
|
|
|
+
|
|
|
if (old_state >= IWL_AGG_ON) {
|
|
|
iwl_mvm_drain_sta(mvm, mvmsta, true);
|
|
|
if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
|
|
@@ -1142,12 +1163,11 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
|
|
|
|
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
|
|
|
|
|
|
- iwl_mvm_disable_txq(mvm, tid_data->txq_id, 0);
|
|
|
+ iwl_mvm_disable_txq(mvm, tid_data->txq_id,
|
|
|
+ vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
|
|
|
+ 0);
|
|
|
}
|
|
|
|
|
|
- mvm->queue_to_mac80211[tid_data->txq_id] =
|
|
|
- IWL_INVALID_MAC80211_QUEUE;
|
|
|
-
|
|
|
return 0;
|
|
|
}
|
|
|
|