|
@@ -1001,12 +1001,12 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority)
|
|
struct list_head *element;
|
|
struct list_head *element;
|
|
struct il_rx_buf *rxb;
|
|
struct il_rx_buf *rxb;
|
|
struct page *page;
|
|
struct page *page;
|
|
|
|
+ dma_addr_t page_dma;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
gfp_t gfp_mask = priority;
|
|
gfp_t gfp_mask = priority;
|
|
|
|
|
|
while (1) {
|
|
while (1) {
|
|
spin_lock_irqsave(&rxq->lock, flags);
|
|
spin_lock_irqsave(&rxq->lock, flags);
|
|
-
|
|
|
|
if (list_empty(&rxq->rx_used)) {
|
|
if (list_empty(&rxq->rx_used)) {
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
|
return;
|
|
return;
|
|
@@ -1035,26 +1035,34 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /* Get physical address of RB/SKB */
|
|
|
|
+ page_dma =
|
|
|
|
+ pci_map_page(il->pci_dev, page, 0,
|
|
|
|
+ PAGE_SIZE << il->hw_params.rx_page_order,
|
|
|
|
+ PCI_DMA_FROMDEVICE);
|
|
|
|
+
|
|
|
|
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
|
|
|
|
+ __free_pages(page, il->hw_params.rx_page_order);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
spin_lock_irqsave(&rxq->lock, flags);
|
|
spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
+
|
|
if (list_empty(&rxq->rx_used)) {
|
|
if (list_empty(&rxq->rx_used)) {
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
+ pci_unmap_page(il->pci_dev, page_dma,
|
|
|
|
+ PAGE_SIZE << il->hw_params.rx_page_order,
|
|
|
|
+ PCI_DMA_FROMDEVICE);
|
|
__free_pages(page, il->hw_params.rx_page_order);
|
|
__free_pages(page, il->hw_params.rx_page_order);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
+
|
|
element = rxq->rx_used.next;
|
|
element = rxq->rx_used.next;
|
|
rxb = list_entry(element, struct il_rx_buf, list);
|
|
rxb = list_entry(element, struct il_rx_buf, list);
|
|
list_del(element);
|
|
list_del(element);
|
|
- spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
|
|
|
|
rxb->page = page;
|
|
rxb->page = page;
|
|
- /* Get physical address of RB/SKB */
|
|
|
|
- rxb->page_dma =
|
|
|
|
- pci_map_page(il->pci_dev, page, 0,
|
|
|
|
- PAGE_SIZE << il->hw_params.rx_page_order,
|
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
|
-
|
|
|
|
- spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
-
|
|
|
|
|
|
+ rxb->page_dma = page_dma;
|
|
list_add_tail(&rxb->list, &rxq->rx_free);
|
|
list_add_tail(&rxb->list, &rxq->rx_free);
|
|
rxq->free_count++;
|
|
rxq->free_count++;
|
|
il->alloc_rxb_page++;
|
|
il->alloc_rxb_page++;
|
|
@@ -1284,8 +1292,15 @@ il3945_rx_handle(struct il_priv *il)
|
|
pci_map_page(il->pci_dev, rxb->page, 0,
|
|
pci_map_page(il->pci_dev, rxb->page, 0,
|
|
PAGE_SIZE << il->hw_params.
|
|
PAGE_SIZE << il->hw_params.
|
|
rx_page_order, PCI_DMA_FROMDEVICE);
|
|
rx_page_order, PCI_DMA_FROMDEVICE);
|
|
- list_add_tail(&rxb->list, &rxq->rx_free);
|
|
|
|
- rxq->free_count++;
|
|
|
|
|
|
+ if (unlikely(pci_dma_mapping_error(il->pci_dev,
|
|
|
|
+ rxb->page_dma))) {
|
|
|
|
+ __il_free_pages(il, rxb->page);
|
|
|
|
+ rxb->page = NULL;
|
|
|
|
+ list_add_tail(&rxb->list, &rxq->rx_used);
|
|
|
|
+ } else {
|
|
|
|
+ list_add_tail(&rxb->list, &rxq->rx_free);
|
|
|
|
+ rxq->free_count++;
|
|
|
|
+ }
|
|
} else
|
|
} else
|
|
list_add_tail(&rxb->list, &rxq->rx_used);
|
|
list_add_tail(&rxb->list, &rxq->rx_used);
|
|
|
|
|