|
@@ -225,10 +225,26 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
|
|
|
ath10k_htt_rx_msdu_buff_replenish(htt);
|
|
|
}
|
|
|
|
|
|
-void ath10k_htt_rx_detach(struct ath10k_htt *htt)
|
|
|
+static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
|
|
|
{
|
|
|
- int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
|
|
|
+ struct sk_buff *skb;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < htt->rx_ring.size; i++) {
|
|
|
+ skb = htt->rx_ring.netbufs_ring[i];
|
|
|
+ if (!skb)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
|
|
|
+ skb->len + skb_tailroom(skb),
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+ dev_kfree_skb_any(skb);
|
|
|
+ htt->rx_ring.netbufs_ring[i] = NULL;
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
+void ath10k_htt_rx_detach(struct ath10k_htt *htt)
|
|
|
+{
|
|
|
del_timer_sync(&htt->rx_ring.refill_retry_timer);
|
|
|
tasklet_kill(&htt->rx_replenish_task);
|
|
|
tasklet_kill(&htt->txrx_compl_task);
|
|
@@ -236,18 +252,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt)
|
|
|
skb_queue_purge(&htt->tx_compl_q);
|
|
|
skb_queue_purge(&htt->rx_compl_q);
|
|
|
|
|
|
- while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
|
|
|
- struct sk_buff *skb =
|
|
|
- htt->rx_ring.netbufs_ring[sw_rd_idx];
|
|
|
- struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
|
|
|
-
|
|
|
- dma_unmap_single(htt->ar->dev, cb->paddr,
|
|
|
- skb->len + skb_tailroom(skb),
|
|
|
- DMA_FROM_DEVICE);
|
|
|
- dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
|
|
|
- sw_rd_idx++;
|
|
|
- sw_rd_idx &= htt->rx_ring.size_mask;
|
|
|
- }
|
|
|
+ ath10k_htt_rx_ring_clean_up(htt);
|
|
|
|
|
|
dma_free_coherent(htt->ar->dev,
|
|
|
(htt->rx_ring.size *
|
|
@@ -277,6 +282,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
|
|
|
|
|
|
idx = htt->rx_ring.sw_rd_idx.msdu_payld;
|
|
|
msdu = htt->rx_ring.netbufs_ring[idx];
|
|
|
+ htt->rx_ring.netbufs_ring[idx] = NULL;
|
|
|
|
|
|
idx++;
|
|
|
idx &= htt->rx_ring.size_mask;
|
|
@@ -494,7 +500,7 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
|
|
|
htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
|
|
|
|
|
|
htt->rx_ring.netbufs_ring =
|
|
|
- kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
|
|
|
+ kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
|
|
|
GFP_KERNEL);
|
|
|
if (!htt->rx_ring.netbufs_ring)
|
|
|
goto err_netbuf;
|