|
@@ -33,7 +33,7 @@
|
|
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
|
|
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
|
|
|
|
|
|
static struct sk_buff *
|
|
static struct sk_buff *
|
|
-ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
|
|
|
|
|
|
+ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
|
|
{
|
|
{
|
|
struct ath10k_skb_rxcb *rxcb;
|
|
struct ath10k_skb_rxcb *rxcb;
|
|
|
|
|
|
@@ -81,6 +81,60 @@ static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
|
|
htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
|
|
htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
|
|
|
|
+{
|
|
|
|
+ return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
|
|
|
|
+{
|
|
|
|
+ return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
|
|
|
|
+ void *vaddr)
|
|
|
|
+{
|
|
|
|
+ htt->rx_ring.paddrs_ring_32 = vaddr;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
|
|
|
|
+ void *vaddr)
|
|
|
|
+{
|
|
|
|
+ htt->rx_ring.paddrs_ring_64 = vaddr;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
|
|
|
|
+ dma_addr_t paddr, int idx)
|
|
|
|
+{
|
|
|
|
+ htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
|
|
|
|
+ dma_addr_t paddr, int idx)
|
|
|
|
+{
|
|
|
|
+ htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
|
|
|
|
+{
|
|
|
|
+ htt->rx_ring.paddrs_ring_32[idx] = 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
|
|
|
|
+{
|
|
|
|
+ htt->rx_ring.paddrs_ring_64[idx] = 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
|
|
|
|
+{
|
|
|
|
+ return (void *)htt->rx_ring.paddrs_ring_32;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
|
|
|
|
+{
|
|
|
|
+ return (void *)htt->rx_ring.paddrs_ring_64;
|
|
|
|
+}
|
|
|
|
+
|
|
static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
|
|
static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
|
|
{
|
|
{
|
|
struct htt_rx_desc *rx_desc;
|
|
struct htt_rx_desc *rx_desc;
|
|
@@ -126,13 +180,13 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
|
|
rxcb = ATH10K_SKB_RXCB(skb);
|
|
rxcb = ATH10K_SKB_RXCB(skb);
|
|
rxcb->paddr = paddr;
|
|
rxcb->paddr = paddr;
|
|
htt->rx_ring.netbufs_ring[idx] = skb;
|
|
htt->rx_ring.netbufs_ring[idx] = skb;
|
|
- htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
|
|
|
|
|
|
+ htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
|
|
htt->rx_ring.fill_cnt++;
|
|
htt->rx_ring.fill_cnt++;
|
|
|
|
|
|
if (htt->rx_ring.in_ord_rx) {
|
|
if (htt->rx_ring.in_ord_rx) {
|
|
hash_add(htt->rx_ring.skb_table,
|
|
hash_add(htt->rx_ring.skb_table,
|
|
&ATH10K_SKB_RXCB(skb)->hlist,
|
|
&ATH10K_SKB_RXCB(skb)->hlist,
|
|
- (u32)paddr);
|
|
|
|
|
|
+ paddr);
|
|
}
|
|
}
|
|
|
|
|
|
num--;
|
|
num--;
|
|
@@ -231,9 +285,8 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
|
|
ath10k_htt_rx_ring_free(htt);
|
|
ath10k_htt_rx_ring_free(htt);
|
|
|
|
|
|
dma_free_coherent(htt->ar->dev,
|
|
dma_free_coherent(htt->ar->dev,
|
|
- (htt->rx_ring.size *
|
|
|
|
- sizeof(htt->rx_ring.paddrs_ring)),
|
|
|
|
- htt->rx_ring.paddrs_ring,
|
|
|
|
|
|
+ htt->rx_ops->htt_get_rx_ring_size(htt),
|
|
|
|
+ htt->rx_ops->htt_get_vaddr_ring(htt),
|
|
htt->rx_ring.base_paddr);
|
|
htt->rx_ring.base_paddr);
|
|
|
|
|
|
dma_free_coherent(htt->ar->dev,
|
|
dma_free_coherent(htt->ar->dev,
|
|
@@ -260,7 +313,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
|
|
idx = htt->rx_ring.sw_rd_idx.msdu_payld;
|
|
idx = htt->rx_ring.sw_rd_idx.msdu_payld;
|
|
msdu = htt->rx_ring.netbufs_ring[idx];
|
|
msdu = htt->rx_ring.netbufs_ring[idx];
|
|
htt->rx_ring.netbufs_ring[idx] = NULL;
|
|
htt->rx_ring.netbufs_ring[idx] = NULL;
|
|
- htt->rx_ring.paddrs_ring[idx] = 0;
|
|
|
|
|
|
+ htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
|
|
|
|
|
|
idx++;
|
|
idx++;
|
|
idx &= htt->rx_ring.size_mask;
|
|
idx &= htt->rx_ring.size_mask;
|
|
@@ -380,7 +433,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
|
|
}
|
|
}
|
|
|
|
|
|
static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
|
|
static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
|
|
- u32 paddr)
|
|
|
|
|
|
+ u64 paddr)
|
|
{
|
|
{
|
|
struct ath10k *ar = htt->ar;
|
|
struct ath10k *ar = htt->ar;
|
|
struct ath10k_skb_rxcb *rxcb;
|
|
struct ath10k_skb_rxcb *rxcb;
|
|
@@ -508,7 +561,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
|
|
{
|
|
{
|
|
struct ath10k *ar = htt->ar;
|
|
struct ath10k *ar = htt->ar;
|
|
dma_addr_t paddr;
|
|
dma_addr_t paddr;
|
|
- void *vaddr;
|
|
|
|
|
|
+ void *vaddr, *vaddr_ring;
|
|
size_t size;
|
|
size_t size;
|
|
struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
|
|
struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
|
|
|
|
|
|
@@ -532,13 +585,13 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
|
|
if (!htt->rx_ring.netbufs_ring)
|
|
if (!htt->rx_ring.netbufs_ring)
|
|
goto err_netbuf;
|
|
goto err_netbuf;
|
|
|
|
|
|
- size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
|
|
|
|
|
|
+ size = htt->rx_ops->htt_get_rx_ring_size(htt);
|
|
|
|
|
|
- vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
|
|
|
|
- if (!vaddr)
|
|
|
|
|
|
+ vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
|
|
|
|
+ if (!vaddr_ring)
|
|
goto err_dma_ring;
|
|
goto err_dma_ring;
|
|
|
|
|
|
- htt->rx_ring.paddrs_ring = vaddr;
|
|
|
|
|
|
+ htt->rx_ops->htt_config_paddrs_ring(htt, vaddr_ring);
|
|
htt->rx_ring.base_paddr = paddr;
|
|
htt->rx_ring.base_paddr = paddr;
|
|
|
|
|
|
vaddr = dma_alloc_coherent(htt->ar->dev,
|
|
vaddr = dma_alloc_coherent(htt->ar->dev,
|
|
@@ -572,9 +625,8 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
|
|
|
|
|
|
err_dma_idx:
|
|
err_dma_idx:
|
|
dma_free_coherent(htt->ar->dev,
|
|
dma_free_coherent(htt->ar->dev,
|
|
- (htt->rx_ring.size *
|
|
|
|
- sizeof(htt->rx_ring.paddrs_ring)),
|
|
|
|
- htt->rx_ring.paddrs_ring,
|
|
|
|
|
|
+ htt->rx_ops->htt_get_rx_ring_size(htt),
|
|
|
|
+ vaddr_ring,
|
|
htt->rx_ring.base_paddr);
|
|
htt->rx_ring.base_paddr);
|
|
err_dma_ring:
|
|
err_dma_ring:
|
|
kfree(htt->rx_ring.netbufs_ring);
|
|
kfree(htt->rx_ring.netbufs_ring);
|
|
@@ -2847,3 +2899,29 @@ exit:
|
|
return done;
|
|
return done;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
|
|
EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
|
|
|
|
+
|
|
|
|
+static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
|
|
|
|
+ .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
|
|
|
|
+ .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
|
|
|
|
+ .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
|
|
|
|
+ .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
|
|
|
|
+ .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
|
|
|
|
+ .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
|
|
|
|
+ .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
|
|
|
|
+ .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
|
|
|
|
+ .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
|
|
|
|
+ .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
|
|
|
|
+{
|
|
|
|
+ struct ath10k *ar = htt->ar;
|
|
|
|
+
|
|
|
|
+ if (ar->hw_params.target_64bit)
|
|
|
|
+ htt->rx_ops = &htt_rx_ops_64;
|
|
|
|
+ else
|
|
|
|
+ htt->rx_ops = &htt_rx_ops_32;
|
|
|
|
+}
|