|
@@ -1695,7 +1695,8 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
|
|
|
u32 qmask, enum nl80211_iftype iftype,
|
|
|
enum iwl_sta_type type)
|
|
|
{
|
|
|
- if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
|
|
|
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
|
|
|
+ sta->sta_id == IWL_MVM_INVALID_STA) {
|
|
|
sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
|
|
|
if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
|
|
|
return -ENOSPC;
|
|
@@ -2478,28 +2479,12 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
|
|
|
|
/*
|
|
|
* Note the possible cases:
|
|
|
- * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
|
|
|
- * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
|
|
|
- * one and mark it as reserved
|
|
|
- * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
|
|
|
- * non-DQA mode, since the TXQ hasn't yet been allocated
|
|
|
- * Don't support case 3 for new TX path as it is not expected to happen
|
|
|
- * and aggregation will be offloaded soon anyway
|
|
|
+ * 1. An enabled TXQ - TXQ needs to become agg'ed
|
|
|
+ * 2. The TXQ hasn't yet been enabled, so find a free one and mark
|
|
|
+ * it as reserved
|
|
|
*/
|
|
|
txq_id = mvmsta->tid_data[tid].txq_id;
|
|
|
- if (iwl_mvm_has_new_tx_api(mvm)) {
|
|
|
- if (txq_id == IWL_MVM_INVALID_QUEUE) {
|
|
|
- ret = -ENXIO;
|
|
|
- goto release_locks;
|
|
|
- }
|
|
|
- } else if (unlikely(mvm->queue_info[txq_id].status ==
|
|
|
- IWL_MVM_QUEUE_SHARED)) {
|
|
|
- ret = -ENXIO;
|
|
|
- IWL_DEBUG_TX_QUEUES(mvm,
|
|
|
- "Can't start tid %d agg on shared queue!\n",
|
|
|
- tid);
|
|
|
- goto release_locks;
|
|
|
- } else if (mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
|
|
|
+ if (txq_id == IWL_MVM_INVALID_QUEUE) {
|
|
|
txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
|
|
|
IWL_MVM_DQA_MIN_DATA_QUEUE,
|
|
|
IWL_MVM_DQA_MAX_DATA_QUEUE);
|
|
@@ -2508,16 +2493,16 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
|
IWL_ERR(mvm, "Failed to allocate agg queue\n");
|
|
|
goto release_locks;
|
|
|
}
|
|
|
- /*
|
|
|
- * TXQ shouldn't be in inactive mode for non-DQA, so getting
|
|
|
- * an inactive queue from iwl_mvm_find_free_queue() is
|
|
|
- * certainly a bug
|
|
|
- */
|
|
|
- WARN_ON(mvm->queue_info[txq_id].status ==
|
|
|
- IWL_MVM_QUEUE_INACTIVE);
|
|
|
|
|
|
/* TXQ hasn't yet been enabled, so mark it only as reserved */
|
|
|
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
|
|
|
+ } else if (unlikely(mvm->queue_info[txq_id].status ==
|
|
|
+ IWL_MVM_QUEUE_SHARED)) {
|
|
|
+ ret = -ENXIO;
|
|
|
+ IWL_DEBUG_TX_QUEUES(mvm,
|
|
|
+ "Can't start tid %d agg on shared queue!\n",
|
|
|
+ tid);
|
|
|
+ goto release_locks;
|
|
|
}
|
|
|
|
|
|
spin_unlock(&mvm->queue_info_lock);
|
|
@@ -2696,8 +2681,10 @@ out:
|
|
|
|
|
|
static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
|
|
|
struct iwl_mvm_sta *mvmsta,
|
|
|
- u16 txq_id)
|
|
|
+ struct iwl_mvm_tid_data *tid_data)
|
|
|
{
|
|
|
+ u16 txq_id = tid_data->txq_id;
|
|
|
+
|
|
|
if (iwl_mvm_has_new_tx_api(mvm))
|
|
|
return;
|
|
|
|
|
@@ -2709,8 +2696,10 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
|
|
|
* allocated through iwl_mvm_enable_txq, so we can just mark it back as
|
|
|
* free.
|
|
|
*/
|
|
|
- if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
|
|
|
+ if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
|
|
|
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
|
|
|
+ tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
|
|
|
+ }
|
|
|
|
|
|
spin_unlock_bh(&mvm->queue_info_lock);
|
|
|
}
|
|
@@ -2741,7 +2730,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
|
|
|
|
mvmsta->agg_tids &= ~BIT(tid);
|
|
|
|
|
|
- iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
|
|
|
+ iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
|
|
|
|
|
|
switch (tid_data->state) {
|
|
|
case IWL_AGG_ON:
|
|
@@ -2808,7 +2797,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
|
mvmsta->agg_tids &= ~BIT(tid);
|
|
|
spin_unlock_bh(&mvmsta->lock);
|
|
|
|
|
|
- iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
|
|
|
+ iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
|
|
|
|
|
|
if (old_state >= IWL_AGG_ON) {
|
|
|
iwl_mvm_drain_sta(mvm, mvmsta, true);
|
|
@@ -3233,17 +3222,9 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
|
|
|
}
|
|
|
sta_id = mvm_sta->sta_id;
|
|
|
|
|
|
- if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
|
|
|
- keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
|
|
|
- keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
|
|
|
- ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id,
|
|
|
- false);
|
|
|
- goto end;
|
|
|
- }
|
|
|
-
|
|
|
/*
|
|
|
* It is possible that the 'sta' parameter is NULL, and thus
|
|
|
- * there is a need to retrieve the sta from the local station
|
|
|
+ * there is a need to retrieve the sta from the local station
|
|
|
* table.
|
|
|
*/
|
|
|
if (!sta) {
|
|
@@ -3258,6 +3239,17 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
|
|
|
|
|
|
if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
|
|
|
return -EINVAL;
|
|
|
+ } else {
|
|
|
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
|
|
+
|
|
|
+ sta_id = mvmvif->mcast_sta.sta_id;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
|
|
|
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
|
|
|
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
|
|
|
+ ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
|
|
|
+ goto end;
|
|
|
}
|
|
|
|
|
|
/* If the key_offset is not pre-assigned, we need to find a
|