|
@@ -71,7 +71,7 @@
|
|
|
*
|
|
|
***************************************************/
|
|
|
|
|
|
-static int iwl_queue_space(const struct iwl_queue *q)
|
|
|
+static int iwl_queue_space(const struct iwl_txq *q)
|
|
|
{
|
|
|
unsigned int max;
|
|
|
unsigned int used;
|
|
@@ -102,7 +102,7 @@ static int iwl_queue_space(const struct iwl_queue *q)
|
|
|
/*
|
|
|
* iwl_queue_init - Initialize queue's high/low-water and read/write indexes
|
|
|
*/
|
|
|
-static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
|
|
|
+static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id)
|
|
|
{
|
|
|
q->n_window = slots_num;
|
|
|
q->id = id;
|
|
@@ -158,13 +158,13 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
|
|
|
|
|
|
spin_lock(&txq->lock);
|
|
|
/* check if triggered erroneously */
|
|
|
- if (txq->q.read_ptr == txq->q.write_ptr) {
|
|
|
+ if (txq->read_ptr == txq->write_ptr) {
|
|
|
spin_unlock(&txq->lock);
|
|
|
return;
|
|
|
}
|
|
|
spin_unlock(&txq->lock);
|
|
|
|
|
|
- IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
|
|
|
+ IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->id,
|
|
|
jiffies_to_msecs(txq->wd_timeout));
|
|
|
|
|
|
iwl_trans_pcie_log_scd_error(trans, txq);
|
|
@@ -176,22 +176,21 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
|
|
|
* iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
|
|
|
*/
|
|
|
static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
|
|
|
- struct iwl_txq *txq, u16 byte_cnt)
|
|
|
+ struct iwl_txq *txq, u16 byte_cnt,
|
|
|
+ int num_tbs)
|
|
|
{
|
|
|
struct iwlagn_scd_bc_tbl *scd_bc_tbl;
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
- int write_ptr = txq->q.write_ptr;
|
|
|
- int txq_id = txq->q.id;
|
|
|
+ int write_ptr = txq->write_ptr;
|
|
|
+ int txq_id = txq->id;
|
|
|
u8 sec_ctl = 0;
|
|
|
- u8 sta_id = 0;
|
|
|
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
|
|
|
__le16 bc_ent;
|
|
|
struct iwl_tx_cmd *tx_cmd =
|
|
|
- (void *) txq->entries[txq->q.write_ptr].cmd->payload;
|
|
|
+ (void *)txq->entries[txq->write_ptr].cmd->payload;
|
|
|
|
|
|
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
|
|
|
|
|
|
- sta_id = tx_cmd->sta_id;
|
|
|
sec_ctl = tx_cmd->sec_ctl;
|
|
|
|
|
|
switch (sec_ctl & TX_CMD_SEC_MSK) {
|
|
@@ -205,14 +204,32 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
|
|
|
len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
|
|
|
break;
|
|
|
}
|
|
|
-
|
|
|
if (trans_pcie->bc_table_dword)
|
|
|
len = DIV_ROUND_UP(len, 4);
|
|
|
|
|
|
if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
|
|
|
return;
|
|
|
|
|
|
- bc_ent = cpu_to_le16(len | (sta_id << 12));
|
|
|
+ if (trans->cfg->use_tfh) {
|
|
|
+ u8 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
|
|
|
+ num_tbs * sizeof(struct iwl_tfh_tb);
|
|
|
+ /*
|
|
|
+ * filled_tfd_size contains the number of filled bytes in the
|
|
|
+ * TFD.
|
|
|
+ * Dividing it by 64 will give the number of chunks to fetch
|
|
|
+ * to SRAM- 0 for one chunk, 1 for 2 and so on.
|
|
|
+ * If, for example, TFD contains only 3 TBs then 32 bytes
|
|
|
+ * of the TFD are used, and only one chunk of 64 bytes should
|
|
|
+ * be fetched
|
|
|
+ */
|
|
|
+ u8 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
|
|
|
+
|
|
|
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
|
|
|
+ } else {
|
|
|
+ u8 sta_id = tx_cmd->sta_id;
|
|
|
+
|
|
|
+ bc_ent = cpu_to_le16(len | (sta_id << 12));
|
|
|
+ }
|
|
|
|
|
|
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
|
|
|
|
|
@@ -227,12 +244,12 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
|
|
|
struct iwl_trans_pcie *trans_pcie =
|
|
|
IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
|
|
|
- int txq_id = txq->q.id;
|
|
|
- int read_ptr = txq->q.read_ptr;
|
|
|
+ int txq_id = txq->id;
|
|
|
+ int read_ptr = txq->read_ptr;
|
|
|
u8 sta_id = 0;
|
|
|
__le16 bc_ent;
|
|
|
struct iwl_tx_cmd *tx_cmd =
|
|
|
- (void *)txq->entries[txq->q.read_ptr].cmd->payload;
|
|
|
+ (void *)txq->entries[read_ptr].cmd->payload;
|
|
|
|
|
|
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
|
|
|
|
|
@@ -240,6 +257,7 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
|
|
|
sta_id = tx_cmd->sta_id;
|
|
|
|
|
|
bc_ent = cpu_to_le16(1 | (sta_id << 12));
|
|
|
+
|
|
|
scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
|
|
|
|
|
|
if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
|
|
@@ -255,7 +273,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
|
|
|
{
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
u32 reg = 0;
|
|
|
- int txq_id = txq->q.id;
|
|
|
+ int txq_id = txq->id;
|
|
|
|
|
|
lockdep_assert_held(&txq->lock);
|
|
|
|
|
@@ -289,10 +307,10 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
|
|
|
* if not in power-save mode, uCode will never sleep when we're
|
|
|
* trying to tx (during RFKILL, we're not trying to tx).
|
|
|
*/
|
|
|
- IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
|
|
|
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
|
|
|
if (!txq->block)
|
|
|
iwl_write32(trans, HBUS_TARG_WRPTR,
|
|
|
- txq->q.write_ptr | (txq_id << 8));
|
|
|
+ txq->write_ptr | (txq_id << 8));
|
|
|
}
|
|
|
|
|
|
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
|
|
@@ -312,11 +330,30 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
|
|
|
+static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
|
|
|
+ struct iwl_txq *txq, int idx)
|
|
|
+{
|
|
|
+ return txq->tfds + trans_pcie->tfd_size * idx;
|
|
|
+}
|
|
|
+
|
|
|
+static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
|
|
|
+ void *tfd, u8 idx)
|
|
|
{
|
|
|
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
|
|
|
+ struct iwl_tfd *tfd_fh;
|
|
|
+ struct iwl_tfd_tb *tb;
|
|
|
+ dma_addr_t addr;
|
|
|
+
|
|
|
+ if (trans->cfg->use_tfh) {
|
|
|
+ struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
|
|
|
+ struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx];
|
|
|
+
|
|
|
+ return (dma_addr_t)(le64_to_cpu(tb->addr));
|
|
|
+ }
|
|
|
+
|
|
|
+ tfd_fh = (void *)tfd;
|
|
|
+ tb = &tfd_fh->tbs[idx];
|
|
|
+ addr = get_unaligned_le32(&tb->lo);
|
|
|
|
|
|
- dma_addr_t addr = get_unaligned_le32(&tb->lo);
|
|
|
if (sizeof(dma_addr_t) > sizeof(u32))
|
|
|
addr |=
|
|
|
((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
|
|
@@ -324,37 +361,59 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
|
|
|
return addr;
|
|
|
}
|
|
|
|
|
|
-static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
|
|
|
- dma_addr_t addr, u16 len)
|
|
|
+static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
|
|
|
+ u8 idx, dma_addr_t addr, u16 len)
|
|
|
{
|
|
|
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
|
|
|
- u16 hi_n_len = len << 4;
|
|
|
+ if (trans->cfg->use_tfh) {
|
|
|
+ struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
|
|
|
+ struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx];
|
|
|
|
|
|
- put_unaligned_le32(addr, &tb->lo);
|
|
|
- if (sizeof(dma_addr_t) > sizeof(u32))
|
|
|
- hi_n_len |= ((addr >> 16) >> 16) & 0xF;
|
|
|
+ put_unaligned_le64(addr, &tb->addr);
|
|
|
+ tb->tb_len = cpu_to_le16(len);
|
|
|
+
|
|
|
+ tfd_fh->num_tbs = cpu_to_le16(idx + 1);
|
|
|
+ } else {
|
|
|
+ struct iwl_tfd *tfd_fh = (void *)tfd;
|
|
|
+ struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
|
|
|
+
|
|
|
+ u16 hi_n_len = len << 4;
|
|
|
+
|
|
|
+ put_unaligned_le32(addr, &tb->lo);
|
|
|
+ if (sizeof(dma_addr_t) > sizeof(u32))
|
|
|
+ hi_n_len |= ((addr >> 16) >> 16) & 0xF;
|
|
|
|
|
|
- tb->hi_n_len = cpu_to_le16(hi_n_len);
|
|
|
+ tb->hi_n_len = cpu_to_le16(hi_n_len);
|
|
|
|
|
|
- tfd->num_tbs = idx + 1;
|
|
|
+ tfd_fh->num_tbs = idx + 1;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
-static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
|
|
|
+static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *tfd)
|
|
|
{
|
|
|
- return tfd->num_tbs & 0x1f;
|
|
|
+ struct iwl_tfd *tfd_fh;
|
|
|
+
|
|
|
+ if (trans->cfg->use_tfh) {
|
|
|
+ struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
|
|
|
+
|
|
|
+ return le16_to_cpu(tfd_fh->num_tbs) & 0x1f;
|
|
|
+ }
|
|
|
+
|
|
|
+ tfd_fh = (void *)tfd;
|
|
|
+ return tfd_fh->num_tbs & 0x1f;
|
|
|
}
|
|
|
|
|
|
static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
|
|
|
struct iwl_cmd_meta *meta,
|
|
|
- struct iwl_tfd *tfd)
|
|
|
+ struct iwl_txq *txq, int index)
|
|
|
{
|
|
|
- int i;
|
|
|
- int num_tbs;
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
+ int i, num_tbs;
|
|
|
+ void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index);
|
|
|
|
|
|
/* Sanity check on number of chunks */
|
|
|
- num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
|
|
|
+ num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
|
|
|
|
|
|
- if (num_tbs >= IWL_NUM_OF_TBS) {
|
|
|
+ if (num_tbs >= trans_pcie->max_tbs) {
|
|
|
IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
|
|
|
/* @todo issue fatal error, it is quite serious situation */
|
|
|
return;
|
|
@@ -363,18 +422,30 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
|
|
|
/* first TB is never freed - it's the bidirectional DMA data */
|
|
|
|
|
|
for (i = 1; i < num_tbs; i++) {
|
|
|
- if (meta->flags & BIT(i + CMD_TB_BITMAP_POS))
|
|
|
+ if (meta->tbs & BIT(i))
|
|
|
dma_unmap_page(trans->dev,
|
|
|
- iwl_pcie_tfd_tb_get_addr(tfd, i),
|
|
|
- iwl_pcie_tfd_tb_get_len(tfd, i),
|
|
|
+ iwl_pcie_tfd_tb_get_addr(trans, tfd, i),
|
|
|
+ iwl_pcie_tfd_tb_get_len(trans, tfd, i),
|
|
|
DMA_TO_DEVICE);
|
|
|
else
|
|
|
dma_unmap_single(trans->dev,
|
|
|
- iwl_pcie_tfd_tb_get_addr(tfd, i),
|
|
|
- iwl_pcie_tfd_tb_get_len(tfd, i),
|
|
|
+ iwl_pcie_tfd_tb_get_addr(trans, tfd,
|
|
|
+ i),
|
|
|
+ iwl_pcie_tfd_tb_get_len(trans, tfd,
|
|
|
+ i),
|
|
|
DMA_TO_DEVICE);
|
|
|
}
|
|
|
- tfd->num_tbs = 0;
|
|
|
+
|
|
|
+ if (trans->cfg->use_tfh) {
|
|
|
+ struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
|
|
|
+
|
|
|
+ tfd_fh->num_tbs = 0;
|
|
|
+ } else {
|
|
|
+ struct iwl_tfd *tfd_fh = (void *)tfd;
|
|
|
+
|
|
|
+ tfd_fh->num_tbs = 0;
|
|
|
+ }
|
|
|
+
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -388,20 +459,18 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
|
|
|
*/
|
|
|
static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
|
|
|
{
|
|
|
- struct iwl_tfd *tfd_tmp = txq->tfds;
|
|
|
-
|
|
|
/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
|
|
|
* idx is bounded by n_window
|
|
|
*/
|
|
|
- int rd_ptr = txq->q.read_ptr;
|
|
|
- int idx = get_cmd_index(&txq->q, rd_ptr);
|
|
|
+ int rd_ptr = txq->read_ptr;
|
|
|
+ int idx = get_cmd_index(txq, rd_ptr);
|
|
|
|
|
|
lockdep_assert_held(&txq->lock);
|
|
|
|
|
|
/* We have only q->n_window txq->entries, but we use
|
|
|
* TFD_QUEUE_SIZE_MAX tfds
|
|
|
*/
|
|
|
- iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
|
|
|
+ iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
|
|
|
|
|
|
/* free SKB */
|
|
|
if (txq->entries) {
|
|
@@ -423,23 +492,21 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
|
|
|
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
|
|
|
dma_addr_t addr, u16 len, bool reset)
|
|
|
{
|
|
|
- struct iwl_queue *q;
|
|
|
- struct iwl_tfd *tfd, *tfd_tmp;
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
+ void *tfd;
|
|
|
u32 num_tbs;
|
|
|
|
|
|
- q = &txq->q;
|
|
|
- tfd_tmp = txq->tfds;
|
|
|
- tfd = &tfd_tmp[q->write_ptr];
|
|
|
+ tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr;
|
|
|
|
|
|
if (reset)
|
|
|
- memset(tfd, 0, sizeof(*tfd));
|
|
|
+ memset(tfd, 0, trans_pcie->tfd_size);
|
|
|
|
|
|
- num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
|
|
|
+ num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
|
|
|
|
|
|
- /* Each TFD can point to a maximum 20 Tx buffers */
|
|
|
- if (num_tbs >= IWL_NUM_OF_TBS) {
|
|
|
+ /* Each TFD can point to a maximum max_tbs Tx buffers */
|
|
|
+ if (num_tbs >= trans_pcie->max_tbs) {
|
|
|
IWL_ERR(trans, "Error can not send more than %d chunks\n",
|
|
|
- IWL_NUM_OF_TBS);
|
|
|
+ trans_pcie->max_tbs);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
@@ -447,7 +514,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
|
|
|
"Unaligned address = %llx\n", (unsigned long long)addr))
|
|
|
return -EINVAL;
|
|
|
|
|
|
- iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
|
|
|
+ iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len);
|
|
|
|
|
|
return num_tbs;
|
|
|
}
|
|
@@ -457,7 +524,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
|
|
|
u32 txq_id)
|
|
|
{
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
- size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
|
|
|
+ size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX;
|
|
|
size_t tb0_buf_sz;
|
|
|
int i;
|
|
|
|
|
@@ -468,7 +535,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
|
|
|
(unsigned long)txq);
|
|
|
txq->trans_pcie = trans_pcie;
|
|
|
|
|
|
- txq->q.n_window = slots_num;
|
|
|
+ txq->n_window = slots_num;
|
|
|
|
|
|
txq->entries = kcalloc(slots_num,
|
|
|
sizeof(struct iwl_pcie_txq_entry),
|
|
@@ -489,7 +556,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
|
|
|
/* Circular buffer of transmit frame descriptors (TFDs),
|
|
|
* shared with device */
|
|
|
txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
|
|
|
- &txq->q.dma_addr, GFP_KERNEL);
|
|
|
+ &txq->dma_addr, GFP_KERNEL);
|
|
|
if (!txq->tfds)
|
|
|
goto error;
|
|
|
|
|
@@ -503,11 +570,11 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
|
|
|
if (!txq->first_tb_bufs)
|
|
|
goto err_free_tfds;
|
|
|
|
|
|
- txq->q.id = txq_id;
|
|
|
+ txq->id = txq_id;
|
|
|
|
|
|
return 0;
|
|
|
err_free_tfds:
|
|
|
- dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
|
|
|
+ dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
|
|
|
error:
|
|
|
if (txq->entries && txq_id == trans_pcie->cmd_queue)
|
|
|
for (i = 0; i < slots_num; i++)
|
|
@@ -531,7 +598,7 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
|
|
|
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
|
|
|
|
|
|
/* Initialize queue's high/low-water marks, and head/tail indexes */
|
|
|
- ret = iwl_queue_init(&txq->q, slots_num, txq_id);
|
|
|
+ ret = iwl_queue_init(txq, slots_num, txq_id);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
@@ -545,10 +612,10 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
|
|
|
if (trans->cfg->use_tfh)
|
|
|
iwl_write_direct64(trans,
|
|
|
FH_MEM_CBBC_QUEUE(trans, txq_id),
|
|
|
- txq->q.dma_addr);
|
|
|
+ txq->dma_addr);
|
|
|
else
|
|
|
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
|
|
|
- txq->q.dma_addr >> 8);
|
|
|
+ txq->dma_addr >> 8);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -595,15 +662,14 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
|
|
|
{
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
|
|
|
- struct iwl_queue *q = &txq->q;
|
|
|
|
|
|
spin_lock_bh(&txq->lock);
|
|
|
- while (q->write_ptr != q->read_ptr) {
|
|
|
+ while (txq->write_ptr != txq->read_ptr) {
|
|
|
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
|
|
|
- txq_id, q->read_ptr);
|
|
|
+ txq_id, txq->read_ptr);
|
|
|
|
|
|
if (txq_id != trans_pcie->cmd_queue) {
|
|
|
- struct sk_buff *skb = txq->entries[q->read_ptr].skb;
|
|
|
+ struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
|
|
|
|
|
|
if (WARN_ON_ONCE(!skb))
|
|
|
continue;
|
|
@@ -611,15 +677,15 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
|
|
|
iwl_pcie_free_tso_page(trans_pcie, skb);
|
|
|
}
|
|
|
iwl_pcie_txq_free_tfd(trans, txq);
|
|
|
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
|
|
|
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
|
|
|
|
|
|
- if (q->read_ptr == q->write_ptr) {
|
|
|
+ if (txq->read_ptr == txq->write_ptr) {
|
|
|
unsigned long flags;
|
|
|
|
|
|
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
|
|
|
if (txq_id != trans_pcie->cmd_queue) {
|
|
|
IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
|
|
|
- q->id);
|
|
|
+ txq->id);
|
|
|
iwl_trans_unref(trans);
|
|
|
} else {
|
|
|
iwl_pcie_clear_cmd_in_flight(trans);
|
|
@@ -663,7 +729,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
|
|
|
|
|
|
/* De-alloc array of command/tx buffers */
|
|
|
if (txq_id == trans_pcie->cmd_queue)
|
|
|
- for (i = 0; i < txq->q.n_window; i++) {
|
|
|
+ for (i = 0; i < txq->n_window; i++) {
|
|
|
kzfree(txq->entries[i].cmd);
|
|
|
kzfree(txq->entries[i].free_buf);
|
|
|
}
|
|
@@ -671,13 +737,13 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
|
|
|
/* De-alloc circular buffer of TFDs */
|
|
|
if (txq->tfds) {
|
|
|
dma_free_coherent(dev,
|
|
|
- sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
|
|
|
- txq->tfds, txq->q.dma_addr);
|
|
|
- txq->q.dma_addr = 0;
|
|
|
+ trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
|
|
|
+ txq->tfds, txq->dma_addr);
|
|
|
+ txq->dma_addr = 0;
|
|
|
txq->tfds = NULL;
|
|
|
|
|
|
dma_free_coherent(dev,
|
|
|
- sizeof(*txq->first_tb_bufs) * txq->q.n_window,
|
|
|
+ sizeof(*txq->first_tb_bufs) * txq->n_window,
|
|
|
txq->first_tb_bufs, txq->first_tb_dma);
|
|
|
}
|
|
|
|
|
@@ -761,14 +827,14 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
|
|
|
if (trans->cfg->use_tfh)
|
|
|
iwl_write_direct64(trans,
|
|
|
FH_MEM_CBBC_QUEUE(trans, txq_id),
|
|
|
- txq->q.dma_addr);
|
|
|
+ txq->dma_addr);
|
|
|
else
|
|
|
iwl_write_direct32(trans,
|
|
|
FH_MEM_CBBC_QUEUE(trans, txq_id),
|
|
|
- txq->q.dma_addr >> 8);
|
|
|
+ txq->dma_addr >> 8);
|
|
|
iwl_pcie_txq_unmap(trans, txq_id);
|
|
|
- txq->q.read_ptr = 0;
|
|
|
- txq->q.write_ptr = 0;
|
|
|
+ txq->read_ptr = 0;
|
|
|
+ txq->write_ptr = 0;
|
|
|
}
|
|
|
|
|
|
/* Tell NIC where to find the "keep warm" buffer */
|
|
@@ -1012,7 +1078,7 @@ static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
|
|
|
* if empty delete timer, otherwise move timer forward
|
|
|
* since we're making progress on this queue
|
|
|
*/
|
|
|
- if (txq->q.read_ptr == txq->q.write_ptr)
|
|
|
+ if (txq->read_ptr == txq->write_ptr)
|
|
|
del_timer(&txq->stuck_timer);
|
|
|
else
|
|
|
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
|
|
@@ -1025,7 +1091,6 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
|
|
|
int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
|
|
|
- struct iwl_queue *q = &txq->q;
|
|
|
int last_to_free;
|
|
|
|
|
|
/* This function is not meant to release cmd queue*/
|
|
@@ -1040,21 +1105,21 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- if (txq->q.read_ptr == tfd_num)
|
|
|
+ if (txq->read_ptr == tfd_num)
|
|
|
goto out;
|
|
|
|
|
|
IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
|
|
|
- txq_id, txq->q.read_ptr, tfd_num, ssn);
|
|
|
+ txq_id, txq->read_ptr, tfd_num, ssn);
|
|
|
|
|
|
/*Since we free until index _not_ inclusive, the one before index is
|
|
|
* the last we will free. This one must be used */
|
|
|
last_to_free = iwl_queue_dec_wrap(tfd_num);
|
|
|
|
|
|
- if (!iwl_queue_used(q, last_to_free)) {
|
|
|
+ if (!iwl_queue_used(txq, last_to_free)) {
|
|
|
IWL_ERR(trans,
|
|
|
"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
|
|
|
__func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
|
|
|
- q->write_ptr, q->read_ptr);
|
|
|
+ txq->write_ptr, txq->read_ptr);
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
@@ -1062,9 +1127,9 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
|
|
goto out;
|
|
|
|
|
|
for (;
|
|
|
- q->read_ptr != tfd_num;
|
|
|
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
|
|
|
- struct sk_buff *skb = txq->entries[txq->q.read_ptr].skb;
|
|
|
+ txq->read_ptr != tfd_num;
|
|
|
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
|
|
|
+ struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
|
|
|
|
|
|
if (WARN_ON_ONCE(!skb))
|
|
|
continue;
|
|
@@ -1073,16 +1138,17 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
|
|
|
|
|
__skb_queue_tail(skbs, skb);
|
|
|
|
|
|
- txq->entries[txq->q.read_ptr].skb = NULL;
|
|
|
+ txq->entries[txq->read_ptr].skb = NULL;
|
|
|
|
|
|
- iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
|
|
|
+ if (!trans->cfg->use_tfh)
|
|
|
+ iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
|
|
|
|
|
|
iwl_pcie_txq_free_tfd(trans, txq);
|
|
|
}
|
|
|
|
|
|
iwl_pcie_txq_progress(txq);
|
|
|
|
|
|
- if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
|
|
|
+ if (iwl_queue_space(txq) > txq->low_mark &&
|
|
|
test_bit(txq_id, trans_pcie->queue_stopped)) {
|
|
|
struct sk_buff_head overflow_skbs;
|
|
|
|
|
@@ -1114,12 +1180,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
|
|
}
|
|
|
spin_lock_bh(&txq->lock);
|
|
|
|
|
|
- if (iwl_queue_space(&txq->q) > txq->q.low_mark)
|
|
|
+ if (iwl_queue_space(txq) > txq->low_mark)
|
|
|
iwl_wake_queue(trans, txq);
|
|
|
}
|
|
|
|
|
|
- if (q->read_ptr == q->write_ptr) {
|
|
|
- IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
|
|
|
+ if (txq->read_ptr == txq->write_ptr) {
|
|
|
+ IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id);
|
|
|
iwl_trans_unref(trans);
|
|
|
}
|
|
|
|
|
@@ -1181,31 +1247,30 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
|
|
|
{
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
|
|
|
- struct iwl_queue *q = &txq->q;
|
|
|
unsigned long flags;
|
|
|
int nfreed = 0;
|
|
|
|
|
|
lockdep_assert_held(&txq->lock);
|
|
|
|
|
|
- if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
|
|
|
+ if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) {
|
|
|
IWL_ERR(trans,
|
|
|
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
|
|
|
__func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
|
|
|
- q->write_ptr, q->read_ptr);
|
|
|
+ txq->write_ptr, txq->read_ptr);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
|
|
|
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
|
|
|
+ for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx;
|
|
|
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
|
|
|
|
|
|
if (nfreed++ > 0) {
|
|
|
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
|
|
|
- idx, q->write_ptr, q->read_ptr);
|
|
|
+ idx, txq->write_ptr, txq->read_ptr);
|
|
|
iwl_force_nmi(trans);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if (q->read_ptr == q->write_ptr) {
|
|
|
+ if (txq->read_ptr == txq->write_ptr) {
|
|
|
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
|
|
|
iwl_pcie_clear_cmd_in_flight(trans);
|
|
|
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
|
|
@@ -1291,14 +1356,14 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
|
|
|
*/
|
|
|
iwl_scd_txq_disable_agg(trans, txq_id);
|
|
|
|
|
|
- ssn = txq->q.read_ptr;
|
|
|
+ ssn = txq->read_ptr;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
/* Place first TFD at index corresponding to start sequence number.
|
|
|
* Assumes that ssn_idx is valid (!= 0xFFF) */
|
|
|
- txq->q.read_ptr = (ssn & 0xff);
|
|
|
- txq->q.write_ptr = (ssn & 0xff);
|
|
|
+ txq->read_ptr = (ssn & 0xff);
|
|
|
+ txq->write_ptr = (ssn & 0xff);
|
|
|
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
|
|
|
(ssn & 0xff) | (txq_id << 8));
|
|
|
|
|
@@ -1351,6 +1416,14 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
|
|
|
txq->ampdu = !shared_mode;
|
|
|
}
|
|
|
|
|
|
+dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq)
|
|
|
+{
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
+
|
|
|
+ return trans_pcie->scd_bc_tbls.dma +
|
|
|
+ txq * sizeof(struct iwlagn_scd_bc_tbl);
|
|
|
+}
|
|
|
+
|
|
|
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
|
|
|
bool configure_scd)
|
|
|
{
|
|
@@ -1406,7 +1479,6 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
|
|
{
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
|
|
|
- struct iwl_queue *q = &txq->q;
|
|
|
struct iwl_device_cmd *out_cmd;
|
|
|
struct iwl_cmd_meta *out_meta;
|
|
|
unsigned long flags;
|
|
@@ -1505,7 +1577,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
|
|
|
|
|
spin_lock_bh(&txq->lock);
|
|
|
|
|
|
- if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
|
|
|
+ if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
|
|
|
spin_unlock_bh(&txq->lock);
|
|
|
|
|
|
IWL_ERR(trans, "No space in command queue\n");
|
|
@@ -1514,7 +1586,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
|
|
goto free_dup_buf;
|
|
|
}
|
|
|
|
|
|
- idx = get_cmd_index(q, q->write_ptr);
|
|
|
+ idx = get_cmd_index(txq, txq->write_ptr);
|
|
|
out_cmd = txq->entries[idx].cmd;
|
|
|
out_meta = &txq->entries[idx].meta;
|
|
|
|
|
@@ -1533,7 +1605,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
|
|
out_cmd->hdr_wide.reserved = 0;
|
|
|
out_cmd->hdr_wide.sequence =
|
|
|
cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
|
|
|
- INDEX_TO_SEQ(q->write_ptr));
|
|
|
+ INDEX_TO_SEQ(txq->write_ptr));
|
|
|
|
|
|
cmd_pos = sizeof(struct iwl_cmd_header_wide);
|
|
|
copy_size = sizeof(struct iwl_cmd_header_wide);
|
|
@@ -1541,7 +1613,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
|
|
out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
|
|
|
out_cmd->hdr.sequence =
|
|
|
cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
|
|
|
- INDEX_TO_SEQ(q->write_ptr));
|
|
|
+ INDEX_TO_SEQ(txq->write_ptr));
|
|
|
out_cmd->hdr.group_id = 0;
|
|
|
|
|
|
cmd_pos = sizeof(struct iwl_cmd_header);
|
|
@@ -1591,7 +1663,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
|
|
iwl_get_cmd_string(trans, cmd->id),
|
|
|
group_id, out_cmd->hdr.cmd,
|
|
|
le16_to_cpu(out_cmd->hdr.sequence),
|
|
|
- cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
|
|
|
+ cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
|
|
|
|
|
|
/* start the TFD with the minimum copy bytes */
|
|
|
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
|
|
@@ -1607,8 +1679,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
|
|
copy_size - tb0_size,
|
|
|
DMA_TO_DEVICE);
|
|
|
if (dma_mapping_error(trans->dev, phys_addr)) {
|
|
|
- iwl_pcie_tfd_unmap(trans, out_meta,
|
|
|
- &txq->tfds[q->write_ptr]);
|
|
|
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
|
|
|
+ txq->write_ptr);
|
|
|
idx = -ENOMEM;
|
|
|
goto out;
|
|
|
}
|
|
@@ -1631,8 +1703,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
|
|
phys_addr = dma_map_single(trans->dev, (void *)data,
|
|
|
cmdlen[i], DMA_TO_DEVICE);
|
|
|
if (dma_mapping_error(trans->dev, phys_addr)) {
|
|
|
- iwl_pcie_tfd_unmap(trans, out_meta,
|
|
|
- &txq->tfds[q->write_ptr]);
|
|
|
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
|
|
|
+ txq->write_ptr);
|
|
|
idx = -ENOMEM;
|
|
|
goto out;
|
|
|
}
|
|
@@ -1640,8 +1712,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
|
|
iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
|
|
|
}
|
|
|
|
|
|
- BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS >
|
|
|
- sizeof(out_meta->flags) * BITS_PER_BYTE);
|
|
|
+ BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
|
|
|
out_meta->flags = cmd->flags;
|
|
|
if (WARN_ON_ONCE(txq->entries[idx].free_buf))
|
|
|
kzfree(txq->entries[idx].free_buf);
|
|
@@ -1650,7 +1721,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
|
|
trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
|
|
|
|
|
|
/* start timer if queue currently empty */
|
|
|
- if (q->read_ptr == q->write_ptr && txq->wd_timeout)
|
|
|
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
|
|
|
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
|
|
|
|
|
|
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
|
|
@@ -1662,7 +1733,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
|
|
}
|
|
|
|
|
|
/* Increment and update queue's write index */
|
|
|
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
|
|
|
+ txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
|
|
|
iwl_pcie_txq_inc_wr_ptr(trans, txq);
|
|
|
|
|
|
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
|
|
@@ -1700,20 +1771,20 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
|
|
|
if (WARN(txq_id != trans_pcie->cmd_queue,
|
|
|
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
|
|
|
txq_id, trans_pcie->cmd_queue, sequence,
|
|
|
- trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
|
|
|
- trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
|
|
|
+ trans_pcie->txq[trans_pcie->cmd_queue].read_ptr,
|
|
|
+ trans_pcie->txq[trans_pcie->cmd_queue].write_ptr)) {
|
|
|
iwl_print_hex_error(trans, pkt, 32);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
spin_lock_bh(&txq->lock);
|
|
|
|
|
|
- cmd_index = get_cmd_index(&txq->q, index);
|
|
|
+ cmd_index = get_cmd_index(txq, index);
|
|
|
cmd = txq->entries[cmd_index].cmd;
|
|
|
meta = &txq->entries[cmd_index].meta;
|
|
|
cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
|
|
|
|
|
|
- iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
|
|
|
+ iwl_pcie_tfd_unmap(trans, meta, txq, index);
|
|
|
|
|
|
/* Input error checking is done when commands are added to queue. */
|
|
|
if (meta->flags & CMD_WANT_SKB) {
|
|
@@ -1826,14 +1897,13 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
|
|
|
HOST_COMPLETE_TIMEOUT);
|
|
|
if (!ret) {
|
|
|
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
|
|
|
- struct iwl_queue *q = &txq->q;
|
|
|
|
|
|
IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
|
|
|
iwl_get_cmd_string(trans, cmd->id),
|
|
|
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
|
|
|
|
|
|
IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
|
|
|
- q->read_ptr, q->write_ptr);
|
|
|
+ txq->read_ptr, txq->write_ptr);
|
|
|
|
|
|
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
|
|
|
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
|
|
@@ -1911,7 +1981,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
struct iwl_cmd_meta *out_meta,
|
|
|
struct iwl_device_cmd *dev_cmd, u16 tb1_len)
|
|
|
{
|
|
|
- struct iwl_queue *q = &txq->q;
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
u16 tb2_len;
|
|
|
int i;
|
|
|
|
|
@@ -1926,8 +1996,8 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
skb->data + hdr_len,
|
|
|
tb2_len, DMA_TO_DEVICE);
|
|
|
if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
|
|
|
- iwl_pcie_tfd_unmap(trans, out_meta,
|
|
|
- &txq->tfds[q->write_ptr]);
|
|
|
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
|
|
|
+ txq->write_ptr);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
|
|
@@ -1946,19 +2016,19 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
skb_frag_size(frag), DMA_TO_DEVICE);
|
|
|
|
|
|
if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
|
|
|
- iwl_pcie_tfd_unmap(trans, out_meta,
|
|
|
- &txq->tfds[q->write_ptr]);
|
|
|
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
|
|
|
+ txq->write_ptr);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
|
|
|
skb_frag_size(frag), false);
|
|
|
|
|
|
- out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS);
|
|
|
+ out_meta->tbs |= BIT(tb_idx);
|
|
|
}
|
|
|
|
|
|
trace_iwlwifi_dev_tx(trans->dev, skb,
|
|
|
- &txq->tfds[txq->q.write_ptr],
|
|
|
- sizeof(struct iwl_tfd),
|
|
|
+ iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
|
|
|
+ trans_pcie->tfd_size,
|
|
|
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
|
|
|
skb->data + hdr_len, tb2_len);
|
|
|
trace_iwlwifi_dev_tx_data(trans->dev, skb,
|
|
@@ -2019,7 +2089,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
struct ieee80211_hdr *hdr = (void *)skb->data;
|
|
|
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
|
|
|
unsigned int mss = skb_shinfo(skb)->gso_size;
|
|
|
- struct iwl_queue *q = &txq->q;
|
|
|
u16 length, iv_len, amsdu_pad;
|
|
|
u8 *start_hdr;
|
|
|
struct iwl_tso_hdr_page *hdr_page;
|
|
@@ -2033,8 +2102,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
IEEE80211_CCMP_HDR_LEN : 0;
|
|
|
|
|
|
trace_iwlwifi_dev_tx(trans->dev, skb,
|
|
|
- &txq->tfds[txq->q.write_ptr],
|
|
|
- sizeof(struct iwl_tfd),
|
|
|
+ iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
|
|
|
+ trans_pcie->tfd_size,
|
|
|
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
|
|
|
NULL, 0);
|
|
|
|
|
@@ -2190,7 +2259,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
return 0;
|
|
|
|
|
|
out_unmap:
|
|
|
- iwl_pcie_tfd_unmap(trans, out_meta, &txq->tfds[q->write_ptr]);
|
|
|
+ iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
|
|
|
return ret;
|
|
|
}
|
|
|
#else /* CONFIG_INET */
|
|
@@ -2214,9 +2283,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
|
|
|
struct iwl_cmd_meta *out_meta;
|
|
|
struct iwl_txq *txq;
|
|
|
- struct iwl_queue *q;
|
|
|
dma_addr_t tb0_phys, tb1_phys, scratch_phys;
|
|
|
void *tb1_addr;
|
|
|
+ void *tfd;
|
|
|
u16 len, tb1_len;
|
|
|
bool wait_write_ptr;
|
|
|
__le16 fc;
|
|
@@ -2225,7 +2294,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
bool amsdu;
|
|
|
|
|
|
txq = &trans_pcie->txq[txq_id];
|
|
|
- q = &txq->q;
|
|
|
|
|
|
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
|
|
|
"TX on unused queue %d\n", txq_id))
|
|
@@ -2247,7 +2315,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
}
|
|
|
|
|
|
if (skb_is_nonlinear(skb) &&
|
|
|
- skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
|
|
|
+ skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
|
|
|
__skb_linearize(skb))
|
|
|
return -ENOMEM;
|
|
|
|
|
@@ -2260,11 +2328,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
|
|
|
spin_lock(&txq->lock);
|
|
|
|
|
|
- if (iwl_queue_space(q) < q->high_mark) {
|
|
|
+ if (iwl_queue_space(txq) < txq->high_mark) {
|
|
|
iwl_stop_queue(trans, txq);
|
|
|
|
|
|
/* don't put the packet on the ring, if there is no room */
|
|
|
- if (unlikely(iwl_queue_space(q) < 3)) {
|
|
|
+ if (unlikely(iwl_queue_space(txq) < 3)) {
|
|
|
struct iwl_device_cmd **dev_cmd_ptr;
|
|
|
|
|
|
dev_cmd_ptr = (void *)((u8 *)skb->cb +
|
|
@@ -2285,19 +2353,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
*/
|
|
|
wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
|
|
|
WARN_ONCE(txq->ampdu &&
|
|
|
- (wifi_seq & 0xff) != q->write_ptr,
|
|
|
+ (wifi_seq & 0xff) != txq->write_ptr,
|
|
|
"Q: %d WiFi Seq %d tfdNum %d",
|
|
|
- txq_id, wifi_seq, q->write_ptr);
|
|
|
+ txq_id, wifi_seq, txq->write_ptr);
|
|
|
|
|
|
/* Set up driver data for this TFD */
|
|
|
- txq->entries[q->write_ptr].skb = skb;
|
|
|
- txq->entries[q->write_ptr].cmd = dev_cmd;
|
|
|
+ txq->entries[txq->write_ptr].skb = skb;
|
|
|
+ txq->entries[txq->write_ptr].cmd = dev_cmd;
|
|
|
|
|
|
dev_cmd->hdr.sequence =
|
|
|
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
|
|
|
- INDEX_TO_SEQ(q->write_ptr)));
|
|
|
+ INDEX_TO_SEQ(txq->write_ptr)));
|
|
|
|
|
|
- tb0_phys = iwl_pcie_get_first_tb_dma(txq, q->write_ptr);
|
|
|
+ tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr);
|
|
|
scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
|
|
|
offsetof(struct iwl_tx_cmd, scratch);
|
|
|
|
|
@@ -2305,7 +2373,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
|
|
|
|
|
|
/* Set up first empty entry in queue's array of Tx/cmd buffers */
|
|
|
- out_meta = &txq->entries[q->write_ptr].meta;
|
|
|
+ out_meta = &txq->entries[txq->write_ptr].meta;
|
|
|
out_meta->flags = 0;
|
|
|
|
|
|
/*
|
|
@@ -2330,7 +2398,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
}
|
|
|
|
|
|
/* The first TB points to bi-directional DMA data */
|
|
|
- memcpy(&txq->first_tb_bufs[q->write_ptr], &dev_cmd->hdr,
|
|
|
+ memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
|
|
|
IWL_FIRST_TB_SIZE);
|
|
|
iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
|
|
|
IWL_FIRST_TB_SIZE, true);
|
|
@@ -2355,13 +2423,15 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
goto out_err;
|
|
|
}
|
|
|
|
|
|
+ tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
|
|
|
/* Set up entry for this TFD in Tx byte-count array */
|
|
|
- iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
|
|
|
+ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
|
|
|
+ iwl_pcie_tfd_get_num_tbs(trans, tfd));
|
|
|
|
|
|
wait_write_ptr = ieee80211_has_morefrags(fc);
|
|
|
|
|
|
/* start timer if queue currently empty */
|
|
|
- if (q->read_ptr == q->write_ptr) {
|
|
|
+ if (txq->read_ptr == txq->write_ptr) {
|
|
|
if (txq->wd_timeout) {
|
|
|
/*
|
|
|
* If the TXQ is active, then set the timer, if not,
|
|
@@ -2375,12 +2445,12 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
else
|
|
|
txq->frozen_expiry_remainder = txq->wd_timeout;
|
|
|
}
|
|
|
- IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
|
|
|
+ IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
|
|
|
iwl_trans_ref(trans);
|
|
|
}
|
|
|
|
|
|
/* Tell device the write index *just past* this latest filled TFD */
|
|
|
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
|
|
|
+ txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
|
|
|
if (!wait_write_ptr)
|
|
|
iwl_pcie_txq_inc_wr_ptr(trans, txq);
|
|
|
|