|
@@ -1282,6 +1282,26 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
|
|
|
+ * @tx_resp: the Tx response from the fw (agg or non-agg)
|
|
|
+ *
|
|
|
+ * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
|
|
|
+ * it can't know that everything will go well until the end of the AMPDU, it
|
|
|
+ * can't know in advance the number of MPDUs that will be sent in the current
|
|
|
+ * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
|
|
|
+ * Hence, it can't know in advance what the SSN of the SCD will be at the end
|
|
|
+ * of the batch. This is why the SSN of the SCD is written at the end of the
|
|
|
+ * whole struct at a variable offset. This function knows how to cope with the
|
|
|
+ * variable offset and returns the SSN of the SCD.
|
|
|
+ */
|
|
|
+static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
|
|
|
+ struct iwl_mvm_tx_resp *tx_resp)
|
|
|
+{
|
|
|
+ return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
|
|
|
+ tx_resp->frame_count) & 0xfff;
|
|
|
+}
|
|
|
+
|
|
|
static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
|
|
struct iwl_rx_packet *pkt)
|
|
|
{
|
|
@@ -1291,8 +1311,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
|
|
struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
|
|
|
int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
|
|
|
int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
|
|
|
- u32 status = le16_to_cpu(tx_resp->status.status);
|
|
|
- u16 ssn = iwl_mvm_get_scd_ssn(tx_resp);
|
|
|
+ struct agg_tx_status *agg_status =
|
|
|
+ iwl_mvm_get_agg_status(mvm, tx_resp);
|
|
|
+ u32 status = le16_to_cpu(agg_status->status);
|
|
|
+ u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp);
|
|
|
struct iwl_mvm_sta *mvmsta;
|
|
|
struct sk_buff_head skbs;
|
|
|
u8 skb_freed = 0;
|
|
@@ -1301,6 +1323,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
|
|
|
|
|
__skb_queue_head_init(&skbs);
|
|
|
|
|
|
+ if (iwl_mvm_has_new_tx_api(mvm))
|
|
|
+ txq_id = le16_to_cpu(tx_resp->v6.tx_queue);
|
|
|
+
|
|
|
seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
|
|
|
|
|
|
/* we can free until ssn % q.n_bd not inclusive */
|
|
@@ -1557,7 +1582,8 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
|
|
|
struct iwl_rx_packet *pkt)
|
|
|
{
|
|
|
struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
|
|
|
- struct agg_tx_status *frame_status = &tx_resp->status;
|
|
|
+ struct agg_tx_status *frame_status =
|
|
|
+ iwl_mvm_get_agg_status(mvm, tx_resp);
|
|
|
int i;
|
|
|
|
|
|
for (i = 0; i < tx_resp->frame_count; i++) {
|
|
@@ -1772,7 +1798,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
|
|
if (tid == IWL_MGMT_TID)
|
|
|
tid = IWL_MAX_TID_COUNT;
|
|
|
iwl_mvm_tx_reclaim(mvm, sta_id, tid,
|
|
|
- (int)ba_res->tfd[0].q_num,
|
|
|
+ (int)(le16_to_cpu(ba_res->tfd[0].q_num)),
|
|
|
le16_to_cpu(ba_res->tfd[0].tfd_index),
|
|
|
&ba_info, le32_to_cpu(ba_res->tx_rate));
|
|
|
|