|
@@ -88,14 +88,14 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
|
|
|
int num_tbs)
|
|
|
{
|
|
|
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
|
|
|
- int write_ptr = txq->write_ptr;
|
|
|
+ int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
|
|
|
u8 filled_tfd_size, num_fetch_chunks;
|
|
|
u16 len = byte_cnt;
|
|
|
__le16 bc_ent;
|
|
|
|
|
|
len = DIV_ROUND_UP(len, 4);
|
|
|
|
|
|
- if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
|
|
|
+ if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
|
|
|
return;
|
|
|
|
|
|
filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
|
|
@@ -111,7 +111,7 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
|
|
|
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
|
|
|
|
|
|
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
|
|
|
- scd_bc_tbl->tfd_offset[write_ptr] = bc_ent;
|
|
|
+ scd_bc_tbl->tfd_offset[idx] = bc_ent;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -176,16 +176,12 @@ static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
|
|
|
/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
|
|
|
* idx is bounded by n_window
|
|
|
*/
|
|
|
- int rd_ptr = txq->read_ptr;
|
|
|
- int idx = get_cmd_index(txq, rd_ptr);
|
|
|
+ int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
|
|
|
|
|
|
lockdep_assert_held(&txq->lock);
|
|
|
|
|
|
- /* We have only q->n_window txq->entries, but we use
|
|
|
- * TFD_QUEUE_SIZE_MAX tfds
|
|
|
- */
|
|
|
iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
|
|
|
- iwl_pcie_get_tfd(trans_pcie, txq, rd_ptr));
|
|
|
+ iwl_pcie_get_tfd(trans_pcie, txq, idx));
|
|
|
|
|
|
/* free SKB */
|
|
|
if (txq->entries) {
|
|
@@ -373,8 +369,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
|
|
|
{
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
|
|
+ int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
|
|
|
struct iwl_tfh_tfd *tfd =
|
|
|
- iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
|
|
|
+ iwl_pcie_get_tfd(trans_pcie, txq, idx);
|
|
|
dma_addr_t tb_phys;
|
|
|
bool amsdu;
|
|
|
int i, len, tb1_len, tb2_len, hdr_len;
|
|
@@ -386,10 +383,10 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
|
|
|
(*ieee80211_get_qos_ctl(hdr) &
|
|
|
IEEE80211_QOS_CTL_A_MSDU_PRESENT);
|
|
|
|
|
|
- tb_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr);
|
|
|
+ tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
|
|
|
/* The first TB points to bi-directional DMA data */
|
|
|
if (!amsdu)
|
|
|
- memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
|
|
|
+ memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
|
|
|
IWL_FIRST_TB_SIZE);
|
|
|
|
|
|
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
|
|
@@ -431,7 +428,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
|
|
|
* building the A-MSDU might have changed this data, so memcpy
|
|
|
* it now
|
|
|
*/
|
|
|
- memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
|
|
|
+ memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
|
|
|
IWL_FIRST_TB_SIZE);
|
|
|
return tfd;
|
|
|
}
|
|
@@ -484,6 +481,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
|
|
|
struct iwl_cmd_meta *out_meta;
|
|
|
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
|
|
+ int idx;
|
|
|
void *tfd;
|
|
|
|
|
|
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
|
|
@@ -497,16 +495,18 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
|
|
|
spin_lock(&txq->lock);
|
|
|
|
|
|
+ idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
|
|
|
+
|
|
|
/* Set up driver data for this TFD */
|
|
|
- txq->entries[txq->write_ptr].skb = skb;
|
|
|
- txq->entries[txq->write_ptr].cmd = dev_cmd;
|
|
|
+ txq->entries[idx].skb = skb;
|
|
|
+ txq->entries[idx].cmd = dev_cmd;
|
|
|
|
|
|
dev_cmd->hdr.sequence =
|
|
|
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
|
|
|
- INDEX_TO_SEQ(txq->write_ptr)));
|
|
|
+ INDEX_TO_SEQ(idx)));
|
|
|
|
|
|
/* Set up first empty entry in queue's array of Tx/cmd buffers */
|
|
|
- out_meta = &txq->entries[txq->write_ptr].meta;
|
|
|
+ out_meta = &txq->entries[idx].meta;
|
|
|
out_meta->flags = 0;
|
|
|
|
|
|
tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
|
|
@@ -562,7 +562,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
|
|
|
unsigned long flags;
|
|
|
void *dup_buf = NULL;
|
|
|
dma_addr_t phys_addr;
|
|
|
- int idx, i, cmd_pos;
|
|
|
+ int i, cmd_pos, idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
|
|
|
u16 copy_size, cmd_size, tb0_size;
|
|
|
bool had_nocopy = false;
|
|
|
u8 group_id = iwl_cmd_groupid(cmd->id);
|
|
@@ -651,7 +651,6 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
|
|
|
goto free_dup_buf;
|
|
|
}
|
|
|
|
|
|
- idx = get_cmd_index(txq, txq->write_ptr);
|
|
|
out_cmd = txq->entries[idx].cmd;
|
|
|
out_meta = &txq->entries[idx].meta;
|
|
|
|
|
@@ -938,7 +937,7 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
|
|
|
txq_id, txq->read_ptr);
|
|
|
|
|
|
if (txq_id != trans_pcie->cmd_queue) {
|
|
|
- int idx = get_cmd_index(txq, txq->read_ptr);
|
|
|
+ int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
|
|
|
struct sk_buff *skb = txq->entries[idx].skb;
|
|
|
|
|
|
if (WARN_ON_ONCE(!skb))
|
|
@@ -1070,7 +1069,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
|
|
|
|
|
|
cmd->tfdq_addr = cpu_to_le64(txq->dma_addr);
|
|
|
cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
|
|
|
- cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX));
|
|
|
+ cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_TX_CMD_SLOTS));
|
|
|
|
|
|
ret = iwl_trans_send_cmd(trans, &hcmd);
|
|
|
if (ret)
|