|
@@ -1,6 +1,6 @@
|
|
/******************************************************************************
|
|
/******************************************************************************
|
|
*
|
|
*
|
|
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
|
|
|
|
|
|
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
|
|
*
|
|
*
|
|
* Portions of this file are derived from the ipw3945 project, as well
|
|
* Portions of this file are derived from the ipw3945 project, as well
|
|
* as portions of the ieee80211 subsystem header files.
|
|
* as portions of the ieee80211 subsystem header files.
|
|
@@ -148,10 +148,9 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
|
|
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
|
|
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
|
|
struct iwl_rxq *rxq)
|
|
struct iwl_rxq *rxq)
|
|
{
|
|
{
|
|
- unsigned long flags;
|
|
|
|
u32 reg;
|
|
u32 reg;
|
|
|
|
|
|
- spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_lock(&rxq->lock);
|
|
|
|
|
|
if (rxq->need_update == 0)
|
|
if (rxq->need_update == 0)
|
|
goto exit_unlock;
|
|
goto exit_unlock;
|
|
@@ -190,7 +189,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
|
|
rxq->need_update = 0;
|
|
rxq->need_update = 0;
|
|
|
|
|
|
exit_unlock:
|
|
exit_unlock:
|
|
- spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_unlock(&rxq->lock);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -209,7 +208,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
|
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
|
struct iwl_rx_mem_buffer *rxb;
|
|
struct iwl_rx_mem_buffer *rxb;
|
|
- unsigned long flags;
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
* If the device isn't enabled - not need to try to add buffers...
|
|
* If the device isn't enabled - not need to try to add buffers...
|
|
@@ -222,7 +220,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
|
|
if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
|
|
if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
|
|
return;
|
|
return;
|
|
|
|
|
|
- spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_lock(&rxq->lock);
|
|
while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
|
|
while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
|
|
/* The overwritten rxb must be a used one */
|
|
/* The overwritten rxb must be a used one */
|
|
rxb = rxq->queue[rxq->write];
|
|
rxb = rxq->queue[rxq->write];
|
|
@@ -239,7 +237,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
|
|
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
|
|
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
|
|
rxq->free_count--;
|
|
rxq->free_count--;
|
|
}
|
|
}
|
|
- spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_unlock(&rxq->lock);
|
|
/* If the pre-allocated buffer pool is dropping low, schedule to
|
|
/* If the pre-allocated buffer pool is dropping low, schedule to
|
|
* refill it */
|
|
* refill it */
|
|
if (rxq->free_count <= RX_LOW_WATERMARK)
|
|
if (rxq->free_count <= RX_LOW_WATERMARK)
|
|
@@ -248,9 +246,9 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
|
|
/* If we've added more space for the firmware to place data, tell it.
|
|
/* If we've added more space for the firmware to place data, tell it.
|
|
* Increment device's write pointer in multiples of 8. */
|
|
* Increment device's write pointer in multiples of 8. */
|
|
if (rxq->write_actual != (rxq->write & ~0x7)) {
|
|
if (rxq->write_actual != (rxq->write & ~0x7)) {
|
|
- spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_lock(&rxq->lock);
|
|
rxq->need_update = 1;
|
|
rxq->need_update = 1;
|
|
- spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_unlock(&rxq->lock);
|
|
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
|
|
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -270,16 +268,15 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
|
|
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
|
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
|
struct iwl_rx_mem_buffer *rxb;
|
|
struct iwl_rx_mem_buffer *rxb;
|
|
struct page *page;
|
|
struct page *page;
|
|
- unsigned long flags;
|
|
|
|
gfp_t gfp_mask = priority;
|
|
gfp_t gfp_mask = priority;
|
|
|
|
|
|
while (1) {
|
|
while (1) {
|
|
- spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_lock(&rxq->lock);
|
|
if (list_empty(&rxq->rx_used)) {
|
|
if (list_empty(&rxq->rx_used)) {
|
|
- spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_unlock(&rxq->lock);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
- spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_unlock(&rxq->lock);
|
|
|
|
|
|
if (rxq->free_count > RX_LOW_WATERMARK)
|
|
if (rxq->free_count > RX_LOW_WATERMARK)
|
|
gfp_mask |= __GFP_NOWARN;
|
|
gfp_mask |= __GFP_NOWARN;
|
|
@@ -308,17 +305,17 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_lock(&rxq->lock);
|
|
|
|
|
|
if (list_empty(&rxq->rx_used)) {
|
|
if (list_empty(&rxq->rx_used)) {
|
|
- spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_unlock(&rxq->lock);
|
|
__free_pages(page, trans_pcie->rx_page_order);
|
|
__free_pages(page, trans_pcie->rx_page_order);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
|
|
rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
|
|
list);
|
|
list);
|
|
list_del(&rxb->list);
|
|
list_del(&rxb->list);
|
|
- spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_unlock(&rxq->lock);
|
|
|
|
|
|
BUG_ON(rxb->page);
|
|
BUG_ON(rxb->page);
|
|
rxb->page = page;
|
|
rxb->page = page;
|
|
@@ -329,9 +326,9 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
|
|
DMA_FROM_DEVICE);
|
|
DMA_FROM_DEVICE);
|
|
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
|
|
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
|
|
rxb->page = NULL;
|
|
rxb->page = NULL;
|
|
- spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_lock(&rxq->lock);
|
|
list_add(&rxb->list, &rxq->rx_used);
|
|
list_add(&rxb->list, &rxq->rx_used);
|
|
- spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_unlock(&rxq->lock);
|
|
__free_pages(page, trans_pcie->rx_page_order);
|
|
__free_pages(page, trans_pcie->rx_page_order);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
@@ -340,12 +337,12 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
|
|
/* and also 256 byte aligned! */
|
|
/* and also 256 byte aligned! */
|
|
BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
|
|
BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
|
|
|
|
|
|
- spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_lock(&rxq->lock);
|
|
|
|
|
|
list_add_tail(&rxb->list, &rxq->rx_free);
|
|
list_add_tail(&rxb->list, &rxq->rx_free);
|
|
rxq->free_count++;
|
|
rxq->free_count++;
|
|
|
|
|
|
- spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_unlock(&rxq->lock);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -379,13 +376,12 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
|
|
static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
|
|
static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
|
|
{
|
|
{
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
- unsigned long flags;
|
|
|
|
|
|
|
|
iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
|
|
iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
|
|
|
|
|
|
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
|
|
|
|
|
|
+ spin_lock(&trans_pcie->irq_lock);
|
|
iwl_pcie_rxq_restock(trans);
|
|
iwl_pcie_rxq_restock(trans);
|
|
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
|
|
|
|
|
|
+ spin_unlock(&trans_pcie->irq_lock);
|
|
}
|
|
}
|
|
|
|
|
|
static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
|
|
static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
|
|
@@ -511,7 +507,6 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
|
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
|
int i, err;
|
|
int i, err;
|
|
- unsigned long flags;
|
|
|
|
|
|
|
|
if (!rxq->bd) {
|
|
if (!rxq->bd) {
|
|
err = iwl_pcie_rx_alloc(trans);
|
|
err = iwl_pcie_rx_alloc(trans);
|
|
@@ -519,7 +514,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|
|
- spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_lock(&rxq->lock);
|
|
|
|
|
|
INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
|
|
INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
|
|
|
|
|
|
@@ -535,16 +530,16 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
|
|
rxq->read = rxq->write = 0;
|
|
rxq->read = rxq->write = 0;
|
|
rxq->write_actual = 0;
|
|
rxq->write_actual = 0;
|
|
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
|
|
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
|
|
- spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_unlock(&rxq->lock);
|
|
|
|
|
|
iwl_pcie_rx_replenish(trans);
|
|
iwl_pcie_rx_replenish(trans);
|
|
|
|
|
|
iwl_pcie_rx_hw_init(trans, rxq);
|
|
iwl_pcie_rx_hw_init(trans, rxq);
|
|
|
|
|
|
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
|
|
|
|
|
|
+ spin_lock(&trans_pcie->irq_lock);
|
|
rxq->need_update = 1;
|
|
rxq->need_update = 1;
|
|
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
|
|
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
|
|
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
|
|
|
|
|
|
+ spin_unlock(&trans_pcie->irq_lock);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -553,7 +548,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
|
|
{
|
|
{
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
|
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
|
- unsigned long flags;
|
|
|
|
|
|
|
|
/*if rxq->bd is NULL, it means that nothing has been allocated,
|
|
/*if rxq->bd is NULL, it means that nothing has been allocated,
|
|
* exit now */
|
|
* exit now */
|
|
@@ -564,9 +558,9 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
|
|
|
|
|
|
cancel_work_sync(&trans_pcie->rx_replenish);
|
|
cancel_work_sync(&trans_pcie->rx_replenish);
|
|
|
|
|
|
- spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_lock(&rxq->lock);
|
|
iwl_pcie_rxq_free_rbs(trans);
|
|
iwl_pcie_rxq_free_rbs(trans);
|
|
- spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_unlock(&rxq->lock);
|
|
|
|
|
|
dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
|
|
dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
|
|
rxq->bd, rxq->bd_dma);
|
|
rxq->bd, rxq->bd_dma);
|
|
@@ -589,7 +583,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
|
struct iwl_rxq *rxq = &trans_pcie->rxq;
|
|
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
|
|
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
|
|
- unsigned long flags;
|
|
|
|
bool page_stolen = false;
|
|
bool page_stolen = false;
|
|
int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
|
|
int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
|
|
u32 offset = 0;
|
|
u32 offset = 0;
|
|
@@ -691,7 +684,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
|
/* Reuse the page if possible. For notification packets and
|
|
/* Reuse the page if possible. For notification packets and
|
|
* SKBs that fail to Rx correctly, add them back into the
|
|
* SKBs that fail to Rx correctly, add them back into the
|
|
* rx_free list for reuse later. */
|
|
* rx_free list for reuse later. */
|
|
- spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_lock(&rxq->lock);
|
|
if (rxb->page != NULL) {
|
|
if (rxb->page != NULL) {
|
|
rxb->page_dma =
|
|
rxb->page_dma =
|
|
dma_map_page(trans->dev, rxb->page, 0,
|
|
dma_map_page(trans->dev, rxb->page, 0,
|
|
@@ -712,7 +705,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
|
}
|
|
}
|
|
} else
|
|
} else
|
|
list_add_tail(&rxb->list, &rxq->rx_used);
|
|
list_add_tail(&rxb->list, &rxq->rx_used);
|
|
- spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
|
|
+ spin_unlock(&rxq->lock);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -807,6 +800,87 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
|
|
wake_up(&trans_pcie->wait_command_queue);
|
|
wake_up(&trans_pcie->wait_command_queue);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
|
|
|
|
+{
|
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
+ u32 inta;
|
|
|
|
+
|
|
|
|
+ lockdep_assert_held(&trans_pcie->irq_lock);
|
|
|
|
+
|
|
|
|
+ trace_iwlwifi_dev_irq(trans->dev);
|
|
|
|
+
|
|
|
|
+ /* Discover which interrupts are active/pending */
|
|
|
|
+ inta = iwl_read32(trans, CSR_INT);
|
|
|
|
+
|
|
|
|
+ /* the thread will service interrupts and re-enable them */
|
|
|
|
+ return inta;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* a device (PCI-E) page is 4096 bytes long */
|
|
|
|
+#define ICT_SHIFT 12
|
|
|
|
+#define ICT_SIZE (1 << ICT_SHIFT)
|
|
|
|
+#define ICT_COUNT (ICT_SIZE / sizeof(u32))
|
|
|
|
+
|
|
|
|
+/* interrupt handler using ict table, with this interrupt driver will
|
|
|
|
+ * stop using INTA register to get device's interrupt, reading this register
|
|
|
|
+ * is expensive, device will write interrupts in ICT dram table, increment
|
|
|
|
+ * index then will fire interrupt to driver, driver will OR all ICT table
|
|
|
|
+ * entries from current index up to table entry with 0 value. the result is
|
|
|
|
+ * the interrupt we need to service, driver will set the entries back to 0 and
|
|
|
|
+ * set index.
|
|
|
|
+ */
|
|
|
|
+static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
|
|
|
|
+{
|
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
+ u32 inta;
|
|
|
|
+ u32 val = 0;
|
|
|
|
+ u32 read;
|
|
|
|
+
|
|
|
|
+ trace_iwlwifi_dev_irq(trans->dev);
|
|
|
|
+
|
|
|
|
+ /* Ignore interrupt if there's nothing in NIC to service.
|
|
|
|
+ * This may be due to IRQ shared with another device,
|
|
|
|
+ * or due to sporadic interrupts thrown from our NIC. */
|
|
|
|
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
|
|
|
|
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
|
|
|
|
+ if (!read)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Collect all entries up to the first 0, starting from ict_index;
|
|
|
|
+ * note we already read at ict_index.
|
|
|
|
+ */
|
|
|
|
+ do {
|
|
|
|
+ val |= read;
|
|
|
|
+ IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
|
|
|
|
+ trans_pcie->ict_index, read);
|
|
|
|
+ trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
|
|
|
|
+ trans_pcie->ict_index =
|
|
|
|
+ iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
|
|
|
|
+
|
|
|
|
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
|
|
|
|
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
|
|
|
|
+ read);
|
|
|
|
+ } while (read);
|
|
|
|
+
|
|
|
|
+ /* We should not get this value, just ignore it. */
|
|
|
|
+ if (val == 0xffffffff)
|
|
|
|
+ val = 0;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
|
|
|
|
+ * (bit 15 before shifting it to 31) to clear when using interrupt
|
|
|
|
+ * coalescing. fortunately, bits 18 and 19 stay set when this happens
|
|
|
|
+ * so we use them to decide on the real state of the Rx bit.
|
|
|
|
+ * In order words, bit 15 is set if bit 18 or bit 19 are set.
|
|
|
|
+ */
|
|
|
|
+ if (val & 0xC0000)
|
|
|
|
+ val |= 0x8000;
|
|
|
|
+
|
|
|
|
+ inta = (0xff & val) | ((0xff00 & val) << 16);
|
|
|
|
+ return inta;
|
|
|
|
+}
|
|
|
|
+
|
|
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
|
|
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
|
|
{
|
|
{
|
|
struct iwl_trans *trans = dev_id;
|
|
struct iwl_trans *trans = dev_id;
|
|
@@ -814,12 +888,61 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
|
|
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
|
|
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
|
|
u32 inta = 0;
|
|
u32 inta = 0;
|
|
u32 handled = 0;
|
|
u32 handled = 0;
|
|
- unsigned long flags;
|
|
|
|
u32 i;
|
|
u32 i;
|
|
|
|
|
|
lock_map_acquire(&trans->sync_cmd_lockdep_map);
|
|
lock_map_acquire(&trans->sync_cmd_lockdep_map);
|
|
|
|
|
|
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
|
|
|
|
|
|
+ spin_lock(&trans_pcie->irq_lock);
|
|
|
|
+
|
|
|
|
+ /* dram interrupt table not set yet,
|
|
|
|
+ * use legacy interrupt.
|
|
|
|
+ */
|
|
|
|
+ if (likely(trans_pcie->use_ict))
|
|
|
|
+ inta = iwl_pcie_int_cause_ict(trans);
|
|
|
|
+ else
|
|
|
|
+ inta = iwl_pcie_int_cause_non_ict(trans);
|
|
|
|
+
|
|
|
|
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
|
|
|
|
+ IWL_DEBUG_ISR(trans,
|
|
|
|
+ "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
|
|
|
|
+ inta, trans_pcie->inta_mask,
|
|
|
|
+ iwl_read32(trans, CSR_INT_MASK),
|
|
|
|
+ iwl_read32(trans, CSR_FH_INT_STATUS));
|
|
|
|
+ if (inta & (~trans_pcie->inta_mask))
|
|
|
|
+ IWL_DEBUG_ISR(trans,
|
|
|
|
+ "We got a masked interrupt (0x%08x)\n",
|
|
|
|
+ inta & (~trans_pcie->inta_mask));
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ inta &= trans_pcie->inta_mask;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Ignore interrupt if there's nothing in NIC to service.
|
|
|
|
+ * This may be due to IRQ shared with another device,
|
|
|
|
+ * or due to sporadic interrupts thrown from our NIC.
|
|
|
|
+ */
|
|
|
|
+ if (unlikely(!inta)) {
|
|
|
|
+ IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
|
|
|
|
+ /*
|
|
|
|
+ * Re-enable interrupts here since we don't
|
|
|
|
+ * have anything to service
|
|
|
|
+ */
|
|
|
|
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
|
|
|
|
+ iwl_enable_interrupts(trans);
|
|
|
|
+ spin_unlock(&trans_pcie->irq_lock);
|
|
|
|
+ lock_map_release(&trans->sync_cmd_lockdep_map);
|
|
|
|
+ return IRQ_NONE;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
|
|
|
|
+ /*
|
|
|
|
+ * Hardware disappeared. It might have
|
|
|
|
+ * already raised an interrupt.
|
|
|
|
+ */
|
|
|
|
+ IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
|
|
|
|
+ spin_unlock(&trans_pcie->irq_lock);
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
|
|
|
|
/* Ack/clear/reset pending uCode interrupts.
|
|
/* Ack/clear/reset pending uCode interrupts.
|
|
* Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
|
|
* Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
|
|
@@ -832,19 +955,13 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
|
|
* hardware bugs here by ACKing all the possible interrupts so that
|
|
* hardware bugs here by ACKing all the possible interrupts so that
|
|
* interrupt coalescing can still be achieved.
|
|
* interrupt coalescing can still be achieved.
|
|
*/
|
|
*/
|
|
- iwl_write32(trans, CSR_INT,
|
|
|
|
- trans_pcie->inta | ~trans_pcie->inta_mask);
|
|
|
|
-
|
|
|
|
- inta = trans_pcie->inta;
|
|
|
|
|
|
+ iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
|
|
|
|
|
|
if (iwl_have_debug_level(IWL_DL_ISR))
|
|
if (iwl_have_debug_level(IWL_DL_ISR))
|
|
IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
|
|
IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
|
|
inta, iwl_read32(trans, CSR_INT_MASK));
|
|
inta, iwl_read32(trans, CSR_INT_MASK));
|
|
|
|
|
|
- /* saved interrupt in inta variable now we can reset trans_pcie->inta */
|
|
|
|
- trans_pcie->inta = 0;
|
|
|
|
-
|
|
|
|
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
|
|
|
|
|
|
+ spin_unlock(&trans_pcie->irq_lock);
|
|
|
|
|
|
/* Now service all interrupt bits discovered above. */
|
|
/* Now service all interrupt bits discovered above. */
|
|
if (inta & CSR_INT_BIT_HW_ERR) {
|
|
if (inta & CSR_INT_BIT_HW_ERR) {
|
|
@@ -1019,11 +1136,6 @@ out:
|
|
*
|
|
*
|
|
******************************************************************************/
|
|
******************************************************************************/
|
|
|
|
|
|
-/* a device (PCI-E) page is 4096 bytes long */
|
|
|
|
-#define ICT_SHIFT 12
|
|
|
|
-#define ICT_SIZE (1 << ICT_SHIFT)
|
|
|
|
-#define ICT_COUNT (ICT_SIZE / sizeof(u32))
|
|
|
|
-
|
|
|
|
/* Free dram table */
|
|
/* Free dram table */
|
|
void iwl_pcie_free_ict(struct iwl_trans *trans)
|
|
void iwl_pcie_free_ict(struct iwl_trans *trans)
|
|
{
|
|
{
|
|
@@ -1048,7 +1160,7 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans)
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
trans_pcie->ict_tbl =
|
|
trans_pcie->ict_tbl =
|
|
- dma_alloc_coherent(trans->dev, ICT_SIZE,
|
|
|
|
|
|
+ dma_zalloc_coherent(trans->dev, ICT_SIZE,
|
|
&trans_pcie->ict_tbl_dma,
|
|
&trans_pcie->ict_tbl_dma,
|
|
GFP_KERNEL);
|
|
GFP_KERNEL);
|
|
if (!trans_pcie->ict_tbl)
|
|
if (!trans_pcie->ict_tbl)
|
|
@@ -1060,17 +1172,10 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans)
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
- IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
|
|
|
|
- (unsigned long long)trans_pcie->ict_tbl_dma);
|
|
|
|
-
|
|
|
|
- IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
|
|
|
|
|
|
+ IWL_DEBUG_ISR(trans, "ict dma addr %Lx ict vir addr %p\n",
|
|
|
|
+ (unsigned long long)trans_pcie->ict_tbl_dma,
|
|
|
|
+ trans_pcie->ict_tbl);
|
|
|
|
|
|
- /* reset table and index to all 0 */
|
|
|
|
- memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
|
|
|
|
- trans_pcie->ict_index = 0;
|
|
|
|
-
|
|
|
|
- /* add periodic RX interrupt */
|
|
|
|
- trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1081,12 +1186,11 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
|
|
{
|
|
{
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
u32 val;
|
|
u32 val;
|
|
- unsigned long flags;
|
|
|
|
|
|
|
|
if (!trans_pcie->ict_tbl)
|
|
if (!trans_pcie->ict_tbl)
|
|
return;
|
|
return;
|
|
|
|
|
|
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
|
|
|
|
|
|
+ spin_lock(&trans_pcie->irq_lock);
|
|
iwl_disable_interrupts(trans);
|
|
iwl_disable_interrupts(trans);
|
|
|
|
|
|
memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
|
|
memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
|
|
@@ -1103,120 +1207,26 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
|
|
trans_pcie->ict_index = 0;
|
|
trans_pcie->ict_index = 0;
|
|
iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
|
|
iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
|
|
iwl_enable_interrupts(trans);
|
|
iwl_enable_interrupts(trans);
|
|
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
|
|
|
|
|
|
+ spin_unlock(&trans_pcie->irq_lock);
|
|
}
|
|
}
|
|
|
|
|
|
/* Device is going down disable ict interrupt usage */
|
|
/* Device is going down disable ict interrupt usage */
|
|
void iwl_pcie_disable_ict(struct iwl_trans *trans)
|
|
void iwl_pcie_disable_ict(struct iwl_trans *trans)
|
|
{
|
|
{
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
- unsigned long flags;
|
|
|
|
|
|
|
|
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
|
|
|
|
|
|
+ spin_lock(&trans_pcie->irq_lock);
|
|
trans_pcie->use_ict = false;
|
|
trans_pcie->use_ict = false;
|
|
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
|
|
|
|
-static irqreturn_t iwl_pcie_isr(int irq, void *data)
|
|
|
|
-{
|
|
|
|
- struct iwl_trans *trans = data;
|
|
|
|
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
- u32 inta, inta_mask;
|
|
|
|
-
|
|
|
|
- lockdep_assert_held(&trans_pcie->irq_lock);
|
|
|
|
-
|
|
|
|
- trace_iwlwifi_dev_irq(trans->dev);
|
|
|
|
-
|
|
|
|
- /* Disable (but don't clear!) interrupts here to avoid
|
|
|
|
- * back-to-back ISRs and sporadic interrupts from our NIC.
|
|
|
|
- * If we have something to service, the irq thread will re-enable ints.
|
|
|
|
- * If we *don't* have something, we'll re-enable before leaving here. */
|
|
|
|
- inta_mask = iwl_read32(trans, CSR_INT_MASK);
|
|
|
|
- iwl_write32(trans, CSR_INT_MASK, 0x00000000);
|
|
|
|
-
|
|
|
|
- /* Discover which interrupts are active/pending */
|
|
|
|
- inta = iwl_read32(trans, CSR_INT);
|
|
|
|
-
|
|
|
|
- if (inta & (~inta_mask)) {
|
|
|
|
- IWL_DEBUG_ISR(trans,
|
|
|
|
- "We got a masked interrupt (0x%08x)...Ack and ignore\n",
|
|
|
|
- inta & (~inta_mask));
|
|
|
|
- iwl_write32(trans, CSR_INT, inta & (~inta_mask));
|
|
|
|
- inta &= inta_mask;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* Ignore interrupt if there's nothing in NIC to service.
|
|
|
|
- * This may be due to IRQ shared with another device,
|
|
|
|
- * or due to sporadic interrupts thrown from our NIC. */
|
|
|
|
- if (!inta) {
|
|
|
|
- IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
|
|
|
|
- /*
|
|
|
|
- * Re-enable interrupts here since we don't have anything to
|
|
|
|
- * service, but only in case the handler won't run. Note that
|
|
|
|
- * the handler can be scheduled because of a previous
|
|
|
|
- * interrupt.
|
|
|
|
- */
|
|
|
|
- if (test_bit(STATUS_INT_ENABLED, &trans->status) &&
|
|
|
|
- !trans_pcie->inta)
|
|
|
|
- iwl_enable_interrupts(trans);
|
|
|
|
- return IRQ_NONE;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
|
|
|
|
- /* Hardware disappeared. It might have already raised
|
|
|
|
- * an interrupt */
|
|
|
|
- IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
|
|
|
|
- return IRQ_HANDLED;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (iwl_have_debug_level(IWL_DL_ISR))
|
|
|
|
- IWL_DEBUG_ISR(trans,
|
|
|
|
- "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
|
|
|
|
- inta, inta_mask,
|
|
|
|
- iwl_read32(trans, CSR_FH_INT_STATUS));
|
|
|
|
-
|
|
|
|
- trans_pcie->inta |= inta;
|
|
|
|
- /* the thread will service interrupts and re-enable them */
|
|
|
|
- return IRQ_WAKE_THREAD;
|
|
|
|
|
|
+ spin_unlock(&trans_pcie->irq_lock);
|
|
}
|
|
}
|
|
|
|
|
|
-/* interrupt handler using ict table, with this interrupt driver will
|
|
|
|
- * stop using INTA register to get device's interrupt, reading this register
|
|
|
|
- * is expensive, device will write interrupts in ICT dram table, increment
|
|
|
|
- * index then will fire interrupt to driver, driver will OR all ICT table
|
|
|
|
- * entries from current index up to table entry with 0 value. the result is
|
|
|
|
- * the interrupt we need to service, driver will set the entries back to 0 and
|
|
|
|
- * set index.
|
|
|
|
- */
|
|
|
|
-irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
|
|
|
|
|
|
+irqreturn_t iwl_pcie_isr(int irq, void *data)
|
|
{
|
|
{
|
|
struct iwl_trans *trans = data;
|
|
struct iwl_trans *trans = data;
|
|
- struct iwl_trans_pcie *trans_pcie;
|
|
|
|
- u32 inta;
|
|
|
|
- u32 val = 0;
|
|
|
|
- u32 read;
|
|
|
|
- unsigned long flags;
|
|
|
|
- irqreturn_t ret = IRQ_NONE;
|
|
|
|
|
|
|
|
if (!trans)
|
|
if (!trans)
|
|
return IRQ_NONE;
|
|
return IRQ_NONE;
|
|
|
|
|
|
- trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
-
|
|
|
|
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
|
|
|
|
-
|
|
|
|
- /* dram interrupt table not set yet,
|
|
|
|
- * use legacy interrupt.
|
|
|
|
- */
|
|
|
|
- if (unlikely(!trans_pcie->use_ict)) {
|
|
|
|
- ret = iwl_pcie_isr(irq, data);
|
|
|
|
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
|
|
|
|
- return ret;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- trace_iwlwifi_dev_irq(trans->dev);
|
|
|
|
-
|
|
|
|
/* Disable (but don't clear!) interrupts here to avoid
|
|
/* Disable (but don't clear!) interrupts here to avoid
|
|
* back-to-back ISRs and sporadic interrupts from our NIC.
|
|
* back-to-back ISRs and sporadic interrupts from our NIC.
|
|
* If we have something to service, the tasklet will re-enable ints.
|
|
* If we have something to service, the tasklet will re-enable ints.
|
|
@@ -1224,73 +1234,5 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
|
|
*/
|
|
*/
|
|
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
|
|
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
|
|
|
|
|
|
- /* Ignore interrupt if there's nothing in NIC to service.
|
|
|
|
- * This may be due to IRQ shared with another device,
|
|
|
|
- * or due to sporadic interrupts thrown from our NIC. */
|
|
|
|
- read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
|
|
|
|
- trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
|
|
|
|
- if (!read) {
|
|
|
|
- IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
|
|
|
|
- goto none;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Collect all entries up to the first 0, starting from ict_index;
|
|
|
|
- * note we already read at ict_index.
|
|
|
|
- */
|
|
|
|
- do {
|
|
|
|
- val |= read;
|
|
|
|
- IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
|
|
|
|
- trans_pcie->ict_index, read);
|
|
|
|
- trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
|
|
|
|
- trans_pcie->ict_index =
|
|
|
|
- iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
|
|
|
|
-
|
|
|
|
- read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
|
|
|
|
- trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
|
|
|
|
- read);
|
|
|
|
- } while (read);
|
|
|
|
-
|
|
|
|
- /* We should not get this value, just ignore it. */
|
|
|
|
- if (val == 0xffffffff)
|
|
|
|
- val = 0;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
|
|
|
|
- * (bit 15 before shifting it to 31) to clear when using interrupt
|
|
|
|
- * coalescing. fortunately, bits 18 and 19 stay set when this happens
|
|
|
|
- * so we use them to decide on the real state of the Rx bit.
|
|
|
|
- * In order words, bit 15 is set if bit 18 or bit 19 are set.
|
|
|
|
- */
|
|
|
|
- if (val & 0xC0000)
|
|
|
|
- val |= 0x8000;
|
|
|
|
-
|
|
|
|
- inta = (0xff & val) | ((0xff00 & val) << 16);
|
|
|
|
- IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n",
|
|
|
|
- inta, trans_pcie->inta_mask, val);
|
|
|
|
- if (iwl_have_debug_level(IWL_DL_ISR))
|
|
|
|
- IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n",
|
|
|
|
- iwl_read32(trans, CSR_INT_MASK));
|
|
|
|
-
|
|
|
|
- inta &= trans_pcie->inta_mask;
|
|
|
|
- trans_pcie->inta |= inta;
|
|
|
|
-
|
|
|
|
- /* iwl_pcie_tasklet() will service interrupts and re-enable them */
|
|
|
|
- if (likely(inta)) {
|
|
|
|
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
|
|
|
|
- return IRQ_WAKE_THREAD;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- ret = IRQ_HANDLED;
|
|
|
|
-
|
|
|
|
- none:
|
|
|
|
- /* re-enable interrupts here since we don't have anything to service.
|
|
|
|
- * only Re-enable if disabled by irq.
|
|
|
|
- */
|
|
|
|
- if (test_bit(STATUS_INT_ENABLED, &trans->status) &&
|
|
|
|
- !trans_pcie->inta)
|
|
|
|
- iwl_enable_interrupts(trans);
|
|
|
|
-
|
|
|
|
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
|
|
|
|
- return ret;
|
|
|
|
|
|
+ return IRQ_WAKE_THREAD;
|
|
}
|
|
}
|