|
@@ -1584,41 +1584,16 @@ void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
|
iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
|
|
iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
|
|
}
|
|
}
|
|
|
|
|
|
-static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
|
|
|
|
- struct iwl_mvm_ba_notif *ba_notif,
|
|
|
|
- struct iwl_mvm_tid_data *tid_data)
|
|
|
|
|
|
+static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
|
|
|
|
+ int txq, int index,
|
|
|
|
+ struct ieee80211_tx_info *ba_info, u32 rate)
|
|
{
|
|
{
|
|
- info->flags |= IEEE80211_TX_STAT_AMPDU;
|
|
|
|
- info->status.ampdu_ack_len = ba_notif->txed_2_done;
|
|
|
|
- info->status.ampdu_len = ba_notif->txed;
|
|
|
|
- iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
|
|
|
|
- info);
|
|
|
|
- /* TODO: not accounted if the whole A-MPDU failed */
|
|
|
|
- info->status.tx_time = tid_data->tx_time;
|
|
|
|
- info->status.status_driver_data[0] =
|
|
|
|
- (void *)(uintptr_t)ba_notif->reduced_txp;
|
|
|
|
- info->status.status_driver_data[1] =
|
|
|
|
- (void *)(uintptr_t)tid_data->rate_n_flags;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
|
|
|
-{
|
|
|
|
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
|
|
- struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
|
|
|
|
struct sk_buff_head reclaimed_skbs;
|
|
struct sk_buff_head reclaimed_skbs;
|
|
struct iwl_mvm_tid_data *tid_data;
|
|
struct iwl_mvm_tid_data *tid_data;
|
|
struct ieee80211_sta *sta;
|
|
struct ieee80211_sta *sta;
|
|
struct iwl_mvm_sta *mvmsta;
|
|
struct iwl_mvm_sta *mvmsta;
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
- int sta_id, tid, freed;
|
|
|
|
- /* "flow" corresponds to Tx queue */
|
|
|
|
- u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
|
|
|
|
- /* "ssn" is start of block-ack Tx window, corresponds to index
|
|
|
|
- * (in Tx queue's circular buffer) of first TFD/frame in window */
|
|
|
|
- u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
|
|
|
|
-
|
|
|
|
- sta_id = ba_notif->sta_id;
|
|
|
|
- tid = ba_notif->tid;
|
|
|
|
|
|
+ int freed;
|
|
|
|
|
|
if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
|
|
if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
|
|
tid >= IWL_MAX_TID_COUNT,
|
|
tid >= IWL_MAX_TID_COUNT,
|
|
@@ -1638,10 +1613,10 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
|
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
tid_data = &mvmsta->tid_data[tid];
|
|
tid_data = &mvmsta->tid_data[tid];
|
|
|
|
|
|
- if (tid_data->txq_id != scd_flow) {
|
|
|
|
|
|
+ if (tid_data->txq_id != txq) {
|
|
IWL_ERR(mvm,
|
|
IWL_ERR(mvm,
|
|
- "invalid BA notification: Q %d, tid %d, flow %d\n",
|
|
|
|
- tid_data->txq_id, tid, scd_flow);
|
|
|
|
|
|
+ "invalid BA notification: Q %d, tid %d\n",
|
|
|
|
+ tid_data->txq_id, tid);
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
@@ -1655,27 +1630,14 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
|
* block-ack window (we assume that they've been successfully
|
|
* block-ack window (we assume that they've been successfully
|
|
* transmitted ... if not, it's too late anyway).
|
|
* transmitted ... if not, it's too late anyway).
|
|
*/
|
|
*/
|
|
- iwl_trans_reclaim(mvm->trans, scd_flow, ba_resp_scd_ssn,
|
|
|
|
- &reclaimed_skbs);
|
|
|
|
|
|
+ iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
|
|
|
|
|
|
- IWL_DEBUG_TX_REPLY(mvm,
|
|
|
|
- "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
|
|
|
|
- (u8 *)&ba_notif->sta_addr_lo32,
|
|
|
|
- ba_notif->sta_id);
|
|
|
|
- IWL_DEBUG_TX_REPLY(mvm,
|
|
|
|
- "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
|
|
|
|
- ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
|
|
|
|
- (unsigned long long)le64_to_cpu(ba_notif->bitmap),
|
|
|
|
- scd_flow, ba_resp_scd_ssn, ba_notif->txed,
|
|
|
|
- ba_notif->txed_2_done);
|
|
|
|
-
|
|
|
|
- IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
|
|
|
|
- ba_notif->reduced_txp);
|
|
|
|
- tid_data->next_reclaimed = ba_resp_scd_ssn;
|
|
|
|
|
|
+ tid_data->next_reclaimed = index;
|
|
|
|
|
|
iwl_mvm_check_ratid_empty(mvm, sta, tid);
|
|
iwl_mvm_check_ratid_empty(mvm, sta, tid);
|
|
|
|
|
|
freed = 0;
|
|
freed = 0;
|
|
|
|
+ ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
|
|
|
|
|
|
skb_queue_walk(&reclaimed_skbs, skb) {
|
|
skb_queue_walk(&reclaimed_skbs, skb) {
|
|
struct ieee80211_hdr *hdr = (void *)skb->data;
|
|
struct ieee80211_hdr *hdr = (void *)skb->data;
|
|
@@ -1697,8 +1659,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
|
|
|
|
|
/* this is the first skb we deliver in this batch */
|
|
/* this is the first skb we deliver in this batch */
|
|
/* put the rate scaling data there */
|
|
/* put the rate scaling data there */
|
|
- if (freed == 1)
|
|
|
|
- iwl_mvm_tx_info_from_ba_notif(info, ba_notif, tid_data);
|
|
|
|
|
|
+ if (freed == 1) {
|
|
|
|
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
|
|
|
|
+ memcpy(&info->status, &ba_info->status,
|
|
|
|
+ sizeof(ba_info->status));
|
|
|
|
+ iwl_mvm_hwrate_to_tx_status(rate, info);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
spin_unlock_bh(&mvmsta->lock);
|
|
spin_unlock_bh(&mvmsta->lock);
|
|
@@ -1708,7 +1674,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
|
* Still it's important to update RS about sent vs. acked.
|
|
* Still it's important to update RS about sent vs. acked.
|
|
*/
|
|
*/
|
|
if (skb_queue_empty(&reclaimed_skbs)) {
|
|
if (skb_queue_empty(&reclaimed_skbs)) {
|
|
- struct ieee80211_tx_info ba_info = {};
|
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf = NULL;
|
|
struct ieee80211_chanctx_conf *chanctx_conf = NULL;
|
|
|
|
|
|
if (mvmsta->vif)
|
|
if (mvmsta->vif)
|
|
@@ -1718,11 +1683,11 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
|
if (WARN_ON_ONCE(!chanctx_conf))
|
|
if (WARN_ON_ONCE(!chanctx_conf))
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
- ba_info.band = chanctx_conf->def.chan->band;
|
|
|
|
- iwl_mvm_tx_info_from_ba_notif(&ba_info, ba_notif, tid_data);
|
|
|
|
|
|
+ ba_info->band = chanctx_conf->def.chan->band;
|
|
|
|
+ iwl_mvm_hwrate_to_tx_status(rate, ba_info);
|
|
|
|
|
|
IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
|
|
IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
|
|
- iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info, false);
|
|
|
|
|
|
+ iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
|
|
}
|
|
}
|
|
|
|
|
|
out:
|
|
out:
|
|
@@ -1734,6 +1699,92 @@ out:
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
|
|
|
+{
|
|
|
|
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
|
|
+ int sta_id, tid, txq, index;
|
|
|
|
+ struct ieee80211_tx_info ba_info = {};
|
|
|
|
+ struct iwl_mvm_ba_notif *ba_notif;
|
|
|
|
+ struct iwl_mvm_tid_data *tid_data;
|
|
|
|
+ struct iwl_mvm_sta *mvmsta;
|
|
|
|
+
|
|
|
|
+ if (iwl_mvm_has_new_tx_api(mvm)) {
|
|
|
|
+ struct iwl_mvm_compressed_ba_notif *ba_res =
|
|
|
|
+ (void *)pkt->data;
|
|
|
|
+
|
|
|
|
+ sta_id = ba_res->sta_id;
|
|
|
|
+ ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
|
|
|
|
+ ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
|
|
|
|
+ ba_info.status.tx_time =
|
|
|
|
+ (u16)le32_to_cpu(ba_res->wireless_time);
|
|
|
|
+ ba_info.status.status_driver_data[0] =
|
|
|
|
+ (void *)(uintptr_t)ba_res->reduced_txp;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * TODO:
|
|
|
|
+ * When supporting multi TID aggregations - we need to move
|
|
|
|
+ * next_reclaimed to be per TXQ and not per TID or handle it
|
|
|
|
+ * in a different way.
|
|
|
|
+ * This will go together with SN and AddBA offload and cannot
|
|
|
|
+ * be handled properly for now.
|
|
|
|
+ */
|
|
|
|
+ WARN_ON(le16_to_cpu(ba_res->tfd_cnt) != 1);
|
|
|
|
+ iwl_mvm_tx_reclaim(mvm, sta_id, ba_res->ra_tid[0].tid,
|
|
|
|
+ (int)ba_res->tfd[0].q_num,
|
|
|
|
+ le16_to_cpu(ba_res->tfd[0].tfd_index),
|
|
|
|
+ &ba_info, le32_to_cpu(ba_res->tx_rate));
|
|
|
|
+
|
|
|
|
+ IWL_DEBUG_TX_REPLY(mvm,
|
|
|
|
+ "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
|
|
|
|
+ sta_id, le32_to_cpu(ba_res->flags),
|
|
|
|
+ le16_to_cpu(ba_res->txed),
|
|
|
|
+ le16_to_cpu(ba_res->done));
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ ba_notif = (void *)pkt->data;
|
|
|
|
+ sta_id = ba_notif->sta_id;
|
|
|
|
+ tid = ba_notif->tid;
|
|
|
|
+ /* "flow" corresponds to Tx queue */
|
|
|
|
+ txq = le16_to_cpu(ba_notif->scd_flow);
|
|
|
|
+ /* "ssn" is start of block-ack Tx window, corresponds to index
|
|
|
|
+ * (in Tx queue's circular buffer) of first TFD/frame in window */
|
|
|
|
+ index = le16_to_cpu(ba_notif->scd_ssn);
|
|
|
|
+
|
|
|
|
+ rcu_read_lock();
|
|
|
|
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
|
|
|
|
+ if (WARN_ON_ONCE(!mvmsta)) {
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ tid_data = &mvmsta->tid_data[tid];
|
|
|
|
+
|
|
|
|
+ ba_info.status.ampdu_ack_len = ba_notif->txed_2_done;
|
|
|
|
+ ba_info.status.ampdu_len = ba_notif->txed;
|
|
|
|
+ ba_info.status.tx_time = tid_data->tx_time;
|
|
|
|
+ ba_info.status.status_driver_data[0] =
|
|
|
|
+ (void *)(uintptr_t)ba_notif->reduced_txp;
|
|
|
|
+
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
+
|
|
|
|
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
|
|
|
|
+ tid_data->rate_n_flags);
|
|
|
|
+
|
|
|
|
+ IWL_DEBUG_TX_REPLY(mvm,
|
|
|
|
+ "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
|
|
|
|
+ (u8 *)&ba_notif->sta_addr_lo32, ba_notif->sta_id);
|
|
|
|
+
|
|
|
|
+ IWL_DEBUG_TX_REPLY(mvm,
|
|
|
|
+ "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
|
|
|
|
+ ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
|
|
|
|
+ le64_to_cpu(ba_notif->bitmap), txq, index,
|
|
|
|
+ ba_notif->txed, ba_notif->txed_2_done);
|
|
|
|
+
|
|
|
|
+ IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
|
|
|
|
+ ba_notif->reduced_txp);
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Note that there are transports that buffer frames before they reach
|
|
* Note that there are transports that buffer frames before they reach
|
|
* the firmware. This means that after flush_tx_path is called, the
|
|
* the firmware. This means that after flush_tx_path is called, the
|