Browse Source

Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git

ath.git patches for 4.16. Major changes:

ath10k

* more preparation work for wcn3990 support

* add memory dump to firmware coredump files

wil6210

* support scheduled scan

* support 40-bit DMA addresses
Kalle Valo 7 năm trước cách đây
mục cha
commit
a9f894634e
61 tập tin đã thay đổi với 3700 bổ sung713 xóa
  1. 1 0
      drivers/net/wireless/ath/ath10k/Makefile
  2. 1 1
      drivers/net/wireless/ath/ath10k/ahb.c
  3. 1 1
      drivers/net/wireless/ath/ath10k/bmi.c
  4. 1 1
      drivers/net/wireless/ath/ath10k/bmi.h
  5. 542 94
      drivers/net/wireless/ath/ath10k/ce.c
  6. 53 7
      drivers/net/wireless/ath/ath10k/ce.h
  7. 162 79
      drivers/net/wireless/ath/ath10k/core.c
  8. 21 5
      drivers/net/wireless/ath/ath10k/core.h
  9. 993 0
      drivers/net/wireless/ath/ath10k/coredump.c
  10. 225 0
      drivers/net/wireless/ath/ath10k/coredump.h
  11. 1 276
      drivers/net/wireless/ath/ath10k/debug.c
  12. 2 17
      drivers/net/wireless/ath/ath10k/debug.h
  13. 1 1
      drivers/net/wireless/ath/ath10k/debugfs_sta.c
  14. 1 1
      drivers/net/wireless/ath/ath10k/hif.h
  15. 1 1
      drivers/net/wireless/ath/ath10k/htc.c
  16. 1 1
      drivers/net/wireless/ath/ath10k/htc.h
  17. 6 3
      drivers/net/wireless/ath/ath10k/htt.c
  18. 127 18
      drivers/net/wireless/ath/ath10k/htt.h
  19. 157 27
      drivers/net/wireless/ath/ath10k/htt_rx.c
  20. 540 60
      drivers/net/wireless/ath/ath10k/htt_tx.c
  21. 1 1
      drivers/net/wireless/ath/ath10k/hw.c
  22. 8 1
      drivers/net/wireless/ath/ath10k/hw.h
  23. 4 3
      drivers/net/wireless/ath/ath10k/mac.c
  24. 1 1
      drivers/net/wireless/ath/ath10k/mac.h
  25. 226 7
      drivers/net/wireless/ath/ath10k/pci.c
  26. 1 1
      drivers/net/wireless/ath/ath10k/pci.h
  27. 51 1
      drivers/net/wireless/ath/ath10k/rx_desc.h
  28. 1 1
      drivers/net/wireless/ath/ath10k/spectral.c
  29. 1 1
      drivers/net/wireless/ath/ath10k/spectral.h
  30. 1 1
      drivers/net/wireless/ath/ath10k/swap.c
  31. 1 1
      drivers/net/wireless/ath/ath10k/swap.h
  32. 1 1
      drivers/net/wireless/ath/ath10k/targaddrs.h
  33. 1 1
      drivers/net/wireless/ath/ath10k/testmode.c
  34. 1 1
      drivers/net/wireless/ath/ath10k/testmode_i.h
  35. 1 1
      drivers/net/wireless/ath/ath10k/thermal.c
  36. 1 1
      drivers/net/wireless/ath/ath10k/thermal.h
  37. 1 1
      drivers/net/wireless/ath/ath10k/trace.h
  38. 1 1
      drivers/net/wireless/ath/ath10k/txrx.c
  39. 1 1
      drivers/net/wireless/ath/ath10k/txrx.h
  40. 1 1
      drivers/net/wireless/ath/ath10k/wmi-ops.h
  41. 1 3
      drivers/net/wireless/ath/ath10k/wmi-tlv.c
  42. 1 1
      drivers/net/wireless/ath/ath10k/wmi-tlv.h
  43. 1 1
      drivers/net/wireless/ath/ath10k/wmi.c
  44. 7 2
      drivers/net/wireless/ath/ath10k/wmi.h
  45. 1 1
      drivers/net/wireless/ath/ath10k/wow.c
  46. 1 1
      drivers/net/wireless/ath/ath10k/wow.h
  47. 1 1
      drivers/net/wireless/ath/wcn36xx/smd.c
  48. 66 2
      drivers/net/wireless/ath/wil6210/cfg80211.c
  49. 0 1
      drivers/net/wireless/ath/wil6210/debugfs.c
  50. 1 1
      drivers/net/wireless/ath/wil6210/interrupt.c
  51. 56 11
      drivers/net/wireless/ath/wil6210/main.c
  52. 1 1
      drivers/net/wireless/ath/wil6210/netdev.c
  53. 33 28
      drivers/net/wireless/ath/wil6210/pcie_bus.c
  54. 17 0
      drivers/net/wireless/ath/wil6210/pm.c
  55. 6 5
      drivers/net/wireless/ath/wil6210/pmc.c
  56. 7 7
      drivers/net/wireless/ath/wil6210/txrx.c
  57. 13 4
      drivers/net/wireless/ath/wil6210/wil6210.h
  58. 11 0
      drivers/net/wireless/ath/wil6210/wil_crash_dump.c
  59. 14 1
      drivers/net/wireless/ath/wil6210/wil_platform.h
  60. 238 3
      drivers/net/wireless/ath/wil6210/wmi.c
  61. 82 18
      drivers/net/wireless/ath/wil6210/wmi.h

+ 1 - 0
drivers/net/wireless/ath/ath10k/Makefile

@@ -21,6 +21,7 @@ ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
 ath10k_core-$(CONFIG_THERMAL) += thermal.o
 ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
 ath10k_core-$(CONFIG_PM) += wow.o
+ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o
 
 obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
 ath10k_pci-y += pci.o \

+ 1 - 1
drivers/net/wireless/ath/ath10k/ahb.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved.
+ * Copyright (c) 2016-2017 Qualcomm Atheros, Inc. All rights reserved.
  * Copyright (c) 2015 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any

+ 1 - 1
drivers/net/wireless/ath/ath10k/bmi.c

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
drivers/net/wireless/ath/ath10k/bmi.h

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 542 - 94
drivers/net/wireless/ath/ath10k/ce.c

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -327,12 +327,12 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
  * Guts of ath10k_ce_send.
  * The caller takes responsibility for any needed locking.
  */
-int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
-			  void *per_transfer_context,
-			  u32 buffer,
-			  unsigned int nbytes,
-			  unsigned int transfer_id,
-			  unsigned int flags)
+static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+				  void *per_transfer_context,
+				  dma_addr_t buffer,
+				  unsigned int nbytes,
+				  unsigned int transfer_id,
+				  unsigned int flags)
 {
 	struct ath10k *ar = ce_state->ar;
 	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
@@ -384,6 +384,87 @@ exit:
 	return ret;
 }
 
+static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
+				     void *per_transfer_context,
+				     dma_addr_t buffer,
+				     unsigned int nbytes,
+				     unsigned int transfer_id,
+				     unsigned int flags)
+{
+	struct ath10k *ar = ce_state->ar;
+	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+	struct ce_desc_64 *desc, sdesc;
+	unsigned int nentries_mask = src_ring->nentries_mask;
+	unsigned int sw_index = src_ring->sw_index;
+	unsigned int write_index = src_ring->write_index;
+	u32 ctrl_addr = ce_state->ctrl_addr;
+	__le32 *addr;
+	u32 desc_flags = 0;
+	int ret = 0;
+
+	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+		return -ESHUTDOWN;
+
+	if (nbytes > ce_state->src_sz_max)
+		ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
+			    __func__, nbytes, ce_state->src_sz_max);
+
+	if (unlikely(CE_RING_DELTA(nentries_mask,
+				   write_index, sw_index - 1) <= 0)) {
+		ret = -ENOSR;
+		goto exit;
+	}
+
+	desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
+				      write_index);
+
+	desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
+
+	if (flags & CE_SEND_FLAG_GATHER)
+		desc_flags |= CE_DESC_FLAGS_GATHER;
+
+	if (flags & CE_SEND_FLAG_BYTE_SWAP)
+		desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
+
+	addr = (__le32 *)&sdesc.addr;
+
+	flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK;
+	addr[0] = __cpu_to_le32(buffer);
+	addr[1] = __cpu_to_le32(flags);
+	if (flags & CE_SEND_FLAG_GATHER)
+		addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER);
+	else
+		addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER));
+
+	sdesc.nbytes = __cpu_to_le16(nbytes);
+	sdesc.flags  = __cpu_to_le16(desc_flags);
+
+	*desc = sdesc;
+
+	src_ring->per_transfer_context[write_index] = per_transfer_context;
+
+	/* Update Source Ring Write Index */
+	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+
+	if (!(flags & CE_SEND_FLAG_GATHER))
+		ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
+
+	src_ring->write_index = write_index;
+exit:
+	return ret;
+}
+
+int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+			  void *per_transfer_context,
+			  dma_addr_t buffer,
+			  unsigned int nbytes,
+			  unsigned int transfer_id,
+			  unsigned int flags)
+{
+	return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
+				    buffer, nbytes, transfer_id, flags);
+}
+
 void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
 {
 	struct ath10k *ar = pipe->ar;
@@ -413,7 +494,7 @@ void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
 
 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
 		   void *per_transfer_context,
-		   u32 buffer,
+		   dma_addr_t buffer,
 		   unsigned int nbytes,
 		   unsigned int transfer_id,
 		   unsigned int flags)
@@ -459,7 +540,8 @@ int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
 }
 
-int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
+static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+				   dma_addr_t paddr)
 {
 	struct ath10k *ar = pipe->ar;
 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
@@ -488,6 +570,39 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
 	return 0;
 }
 
+static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
+				      void *ctx,
+				      dma_addr_t paddr)
+{
+	struct ath10k *ar = pipe->ar;
+	struct ath10k_ce *ce = ath10k_ce_priv(ar);
+	struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+	unsigned int nentries_mask = dest_ring->nentries_mask;
+	unsigned int write_index = dest_ring->write_index;
+	unsigned int sw_index = dest_ring->sw_index;
+	struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+	struct ce_desc_64 *desc =
+			CE_DEST_RING_TO_DESC_64(base, write_index);
+	u32 ctrl_addr = pipe->ctrl_addr;
+
+	lockdep_assert_held(&ce->ce_lock);
+
+	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+		return -ENOSPC;
+
+	desc->addr = __cpu_to_le64(paddr);
+	desc->addr &= __cpu_to_le64(CE_DESC_37BIT_ADDR_MASK);
+
+	desc->nbytes = 0;
+
+	dest_ring->per_transfer_context[write_index] = ctx;
+	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+	ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+	dest_ring->write_index = write_index;
+
+	return 0;
+}
+
 void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
 {
 	struct ath10k *ar = pipe->ar;
@@ -508,14 +623,15 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
 	dest_ring->write_index = write_index;
 }
 
-int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+			  dma_addr_t paddr)
 {
 	struct ath10k *ar = pipe->ar;
 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
 	int ret;
 
 	spin_lock_bh(&ce->ce_lock);
-	ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
+	ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr);
 	spin_unlock_bh(&ce->ce_lock);
 
 	return ret;
@@ -525,9 +641,10 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
  * Guts of ath10k_ce_completed_recv_next.
  * The caller takes responsibility for any necessary locking.
  */
-int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
-					 void **per_transfer_contextp,
-					 unsigned int *nbytesp)
+static int
+	 _ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+					       void **per_transfer_contextp,
+					       unsigned int *nbytesp)
 {
 	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
 	unsigned int nentries_mask = dest_ring->nentries_mask;
@@ -574,6 +691,64 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
 	return 0;
 }
 
+static int
+_ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state,
+					 void **per_transfer_contextp,
+					 unsigned int *nbytesp)
+{
+	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+	unsigned int nentries_mask = dest_ring->nentries_mask;
+	unsigned int sw_index = dest_ring->sw_index;
+	struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+	struct ce_desc_64 *desc =
+		CE_DEST_RING_TO_DESC_64(base, sw_index);
+	struct ce_desc_64 sdesc;
+	u16 nbytes;
+
+	/* Copy in one go for performance reasons */
+	sdesc = *desc;
+
+	nbytes = __le16_to_cpu(sdesc.nbytes);
+	if (nbytes == 0) {
+		/* This closes a relatively unusual race where the Host
+		 * sees the updated DRRI before the update to the
+		 * corresponding descriptor has completed. We treat this
+		 * as a descriptor that is not yet done.
+		 */
+		return -EIO;
+	}
+
+	desc->nbytes = 0;
+
+	/* Return data from completed destination descriptor */
+	*nbytesp = nbytes;
+
+	if (per_transfer_contextp)
+		*per_transfer_contextp =
+			dest_ring->per_transfer_context[sw_index];
+
+	/* Copy engine 5 (HTT Rx) will reuse the same transfer context.
+	 * So update transfer context all CEs except CE5.
+	 */
+	if (ce_state->id != 5)
+		dest_ring->per_transfer_context[sw_index] = NULL;
+
+	/* Update sw_index */
+	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+	dest_ring->sw_index = sw_index;
+
+	return 0;
+}
+
+int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+					 void **per_transfer_ctx,
+					 unsigned int *nbytesp)
+{
+	return ce_state->ops->ce_completed_recv_next_nolock(ce_state,
+							    per_transfer_ctx,
+							    nbytesp);
+}
+
 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
 				  void **per_transfer_contextp,
 				  unsigned int *nbytesp)
@@ -583,17 +758,18 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
 	int ret;
 
 	spin_lock_bh(&ce->ce_lock);
-	ret = ath10k_ce_completed_recv_next_nolock(ce_state,
+	ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state,
 						   per_transfer_contextp,
 						   nbytesp);
+
 	spin_unlock_bh(&ce->ce_lock);
 
 	return ret;
 }
 
-int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
-			       void **per_transfer_contextp,
-			       u32 *bufferp)
+static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+				       void **per_transfer_contextp,
+				       dma_addr_t *bufferp)
 {
 	struct ath10k_ce_ring *dest_ring;
 	unsigned int nentries_mask;
@@ -644,6 +820,69 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
 	return ret;
 }
 
+static int _ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe *ce_state,
+					  void **per_transfer_contextp,
+					  dma_addr_t *bufferp)
+{
+	struct ath10k_ce_ring *dest_ring;
+	unsigned int nentries_mask;
+	unsigned int sw_index;
+	unsigned int write_index;
+	int ret;
+	struct ath10k *ar;
+	struct ath10k_ce *ce;
+
+	dest_ring = ce_state->dest_ring;
+
+	if (!dest_ring)
+		return -EIO;
+
+	ar = ce_state->ar;
+	ce = ath10k_ce_priv(ar);
+
+	spin_lock_bh(&ce->ce_lock);
+
+	nentries_mask = dest_ring->nentries_mask;
+	sw_index = dest_ring->sw_index;
+	write_index = dest_ring->write_index;
+	if (write_index != sw_index) {
+		struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+		struct ce_desc_64 *desc =
+			CE_DEST_RING_TO_DESC_64(base, sw_index);
+
+		/* Return data from completed destination descriptor */
+		*bufferp = __le64_to_cpu(desc->addr);
+
+		if (per_transfer_contextp)
+			*per_transfer_contextp =
+				dest_ring->per_transfer_context[sw_index];
+
+		/* sanity */
+		dest_ring->per_transfer_context[sw_index] = NULL;
+		desc->nbytes = 0;
+
+		/* Update sw_index */
+		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+		dest_ring->sw_index = sw_index;
+		ret = 0;
+	} else {
+		ret = -EIO;
+	}
+
+	spin_unlock_bh(&ce->ce_lock);
+
+	return ret;
+}
+
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+			       void **per_transfer_contextp,
+			       dma_addr_t *bufferp)
+{
+	return ce_state->ops->ce_revoke_recv_next(ce_state,
+						  per_transfer_contextp,
+						  bufferp);
+}
+
 /*
  * Guts of ath10k_ce_completed_send_next.
  * The caller takes responsibility for any necessary locking.
@@ -698,10 +937,45 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
 	return 0;
 }
 
+static void ath10k_ce_extract_desc_data(struct ath10k *ar,
+					struct ath10k_ce_ring *src_ring,
+					u32 sw_index,
+					dma_addr_t *bufferp,
+					u32 *nbytesp,
+					u32 *transfer_idp)
+{
+		struct ce_desc *base = src_ring->base_addr_owner_space;
+		struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
+
+		/* Return data from completed source descriptor */
+		*bufferp = __le32_to_cpu(desc->addr);
+		*nbytesp = __le16_to_cpu(desc->nbytes);
+		*transfer_idp = MS(__le16_to_cpu(desc->flags),
+				   CE_DESC_FLAGS_META_DATA);
+}
+
+static void ath10k_ce_extract_desc_data_64(struct ath10k *ar,
+					   struct ath10k_ce_ring *src_ring,
+					   u32 sw_index,
+					   dma_addr_t *bufferp,
+					   u32 *nbytesp,
+					   u32 *transfer_idp)
+{
+		struct ce_desc_64 *base = src_ring->base_addr_owner_space;
+		struct ce_desc_64 *desc =
+			CE_SRC_RING_TO_DESC_64(base, sw_index);
+
+		/* Return data from completed source descriptor */
+		*bufferp = __le64_to_cpu(desc->addr);
+		*nbytesp = __le16_to_cpu(desc->nbytes);
+		*transfer_idp = MS(__le16_to_cpu(desc->flags),
+				   CE_DESC_FLAGS_META_DATA);
+}
+
 /* NB: Modeled after ath10k_ce_completed_send_next */
 int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
 			       void **per_transfer_contextp,
-			       u32 *bufferp,
+			       dma_addr_t *bufferp,
 			       unsigned int *nbytesp,
 			       unsigned int *transfer_idp)
 {
@@ -728,14 +1002,9 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
 	write_index = src_ring->write_index;
 
 	if (write_index != sw_index) {
-		struct ce_desc *base = src_ring->base_addr_owner_space;
-		struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
-
-		/* Return data from completed source descriptor */
-		*bufferp = __le32_to_cpu(desc->addr);
-		*nbytesp = __le16_to_cpu(desc->nbytes);
-		*transfer_idp = MS(__le16_to_cpu(desc->flags),
-						CE_DESC_FLAGS_META_DATA);
+		ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index,
+						    bufferp, nbytesp,
+						    transfer_idp);
 
 		if (per_transfer_contextp)
 			*per_transfer_contextp =
@@ -897,8 +1166,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
 
 	nentries = roundup_pow_of_two(attr->src_nentries);
 
-	memset(src_ring->base_addr_owner_space, 0,
-	       nentries * sizeof(struct ce_desc));
+	if (ar->hw_params.target_64bit)
+		memset(src_ring->base_addr_owner_space, 0,
+		       nentries * sizeof(struct ce_desc_64));
+	else
+		memset(src_ring->base_addr_owner_space, 0,
+		       nentries * sizeof(struct ce_desc));
 
 	src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
 	src_ring->sw_index &= src_ring->nentries_mask;
@@ -934,8 +1207,12 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
 
 	nentries = roundup_pow_of_two(attr->dest_nentries);
 
-	memset(dest_ring->base_addr_owner_space, 0,
-	       nentries * sizeof(struct ce_desc));
+	if (ar->hw_params.target_64bit)
+		memset(dest_ring->base_addr_owner_space, 0,
+		       nentries * sizeof(struct ce_desc_64));
+	else
+		memset(dest_ring->base_addr_owner_space, 0,
+		       nentries * sizeof(struct ce_desc));
 
 	dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
 	dest_ring->sw_index &= dest_ring->nentries_mask;
@@ -993,12 +1270,57 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
 
 	src_ring->base_addr_ce_space_unaligned = base_addr;
 
-	src_ring->base_addr_owner_space = PTR_ALIGN(
-			src_ring->base_addr_owner_space_unaligned,
-			CE_DESC_RING_ALIGN);
-	src_ring->base_addr_ce_space = ALIGN(
-			src_ring->base_addr_ce_space_unaligned,
-			CE_DESC_RING_ALIGN);
+	src_ring->base_addr_owner_space =
+			PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
+				  CE_DESC_RING_ALIGN);
+	src_ring->base_addr_ce_space =
+			ALIGN(src_ring->base_addr_ce_space_unaligned,
+			      CE_DESC_RING_ALIGN);
+
+	return src_ring;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
+			    const struct ce_attr *attr)
+{
+	struct ath10k_ce_ring *src_ring;
+	u32 nentries = attr->src_nentries;
+	dma_addr_t base_addr;
+
+	nentries = roundup_pow_of_two(nentries);
+
+	src_ring = kzalloc(sizeof(*src_ring) +
+			   (nentries *
+			    sizeof(*src_ring->per_transfer_context)),
+			   GFP_KERNEL);
+	if (!src_ring)
+		return ERR_PTR(-ENOMEM);
+
+	src_ring->nentries = nentries;
+	src_ring->nentries_mask = nentries - 1;
+
+	/* Legacy platforms that do not support cache
+	 * coherent DMA are unsupported
+	 */
+	src_ring->base_addr_owner_space_unaligned =
+		dma_alloc_coherent(ar->dev,
+				   (nentries * sizeof(struct ce_desc_64) +
+				    CE_DESC_RING_ALIGN),
+				   &base_addr, GFP_KERNEL);
+	if (!src_ring->base_addr_owner_space_unaligned) {
+		kfree(src_ring);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	src_ring->base_addr_ce_space_unaligned = base_addr;
+
+	src_ring->base_addr_owner_space =
+			PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
+				  CE_DESC_RING_ALIGN);
+	src_ring->base_addr_ce_space =
+			ALIGN(src_ring->base_addr_ce_space_unaligned,
+			      CE_DESC_RING_ALIGN);
 
 	return src_ring;
 }
@@ -1039,12 +1361,63 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
 
 	dest_ring->base_addr_ce_space_unaligned = base_addr;
 
-	dest_ring->base_addr_owner_space = PTR_ALIGN(
-			dest_ring->base_addr_owner_space_unaligned,
-			CE_DESC_RING_ALIGN);
-	dest_ring->base_addr_ce_space = ALIGN(
-			dest_ring->base_addr_ce_space_unaligned,
-			CE_DESC_RING_ALIGN);
+	dest_ring->base_addr_owner_space =
+			PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
+				  CE_DESC_RING_ALIGN);
+	dest_ring->base_addr_ce_space =
+				ALIGN(dest_ring->base_addr_ce_space_unaligned,
+				      CE_DESC_RING_ALIGN);
+
+	return dest_ring;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
+			     const struct ce_attr *attr)
+{
+	struct ath10k_ce_ring *dest_ring;
+	u32 nentries;
+	dma_addr_t base_addr;
+
+	nentries = roundup_pow_of_two(attr->dest_nentries);
+
+	dest_ring = kzalloc(sizeof(*dest_ring) +
+			    (nentries *
+			     sizeof(*dest_ring->per_transfer_context)),
+			    GFP_KERNEL);
+	if (!dest_ring)
+		return ERR_PTR(-ENOMEM);
+
+	dest_ring->nentries = nentries;
+	dest_ring->nentries_mask = nentries - 1;
+
+	/* Legacy platforms that do not support cache
+	 * coherent DMA are unsupported
+	 */
+	dest_ring->base_addr_owner_space_unaligned =
+		dma_alloc_coherent(ar->dev,
+				   (nentries * sizeof(struct ce_desc_64) +
+				    CE_DESC_RING_ALIGN),
+				   &base_addr, GFP_KERNEL);
+	if (!dest_ring->base_addr_owner_space_unaligned) {
+		kfree(dest_ring);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	dest_ring->base_addr_ce_space_unaligned = base_addr;
+
+	/* Correctly initialize memory to 0 to prevent garbage
+	 * data crashing system when download firmware
+	 */
+	memset(dest_ring->base_addr_owner_space_unaligned, 0,
+	       nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN);
+
+	dest_ring->base_addr_owner_space =
+			PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
+				  CE_DESC_RING_ALIGN);
+	dest_ring->base_addr_ce_space =
+			ALIGN(dest_ring->base_addr_ce_space_unaligned,
+			      CE_DESC_RING_ALIGN);
 
 	return dest_ring;
 }
@@ -1107,65 +1480,36 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
 	ath10k_ce_deinit_dest_ring(ar, ce_id);
 }
 
-int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
-			 const struct ce_attr *attr)
+static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
 {
 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
 	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
-	int ret;
-
-	/*
-	 * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
-	 * additional TX locking checks.
-	 *
-	 * For the lack of a better place do the check here.
-	 */
-	BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
-		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
-	BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
-		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
-	BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
-		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
-
-	ce_state->ar = ar;
-	ce_state->id = ce_id;
-	ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
-	ce_state->attr_flags = attr->flags;
-	ce_state->src_sz_max = attr->src_sz_max;
 
-	if (attr->src_nentries)
-		ce_state->send_cb = attr->send_cb;
-
-	if (attr->dest_nentries)
-		ce_state->recv_cb = attr->recv_cb;
-
-	if (attr->src_nentries) {
-		ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
-		if (IS_ERR(ce_state->src_ring)) {
-			ret = PTR_ERR(ce_state->src_ring);
-			ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n",
-				   ce_id, ret);
-			ce_state->src_ring = NULL;
-			return ret;
-		}
+	if (ce_state->src_ring) {
+		dma_free_coherent(ar->dev,
+				  (ce_state->src_ring->nentries *
+				   sizeof(struct ce_desc) +
+				   CE_DESC_RING_ALIGN),
+				  ce_state->src_ring->base_addr_owner_space,
+				  ce_state->src_ring->base_addr_ce_space);
+		kfree(ce_state->src_ring);
 	}
 
-	if (attr->dest_nentries) {
-		ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
-								attr);
-		if (IS_ERR(ce_state->dest_ring)) {
-			ret = PTR_ERR(ce_state->dest_ring);
-			ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n",
-				   ce_id, ret);
-			ce_state->dest_ring = NULL;
-			return ret;
-		}
+	if (ce_state->dest_ring) {
+		dma_free_coherent(ar->dev,
+				  (ce_state->dest_ring->nentries *
+				   sizeof(struct ce_desc) +
+				   CE_DESC_RING_ALIGN),
+				  ce_state->dest_ring->base_addr_owner_space,
+				  ce_state->dest_ring->base_addr_ce_space);
+		kfree(ce_state->dest_ring);
 	}
 
-	return 0;
+	ce_state->src_ring = NULL;
+	ce_state->dest_ring = NULL;
 }
 
-void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
+static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
 {
 	struct ath10k_ce *ce = ath10k_ce_priv(ar);
 	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
@@ -1173,7 +1517,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
 	if (ce_state->src_ring) {
 		dma_free_coherent(ar->dev,
 				  (ce_state->src_ring->nentries *
-				   sizeof(struct ce_desc) +
+				   sizeof(struct ce_desc_64) +
 				   CE_DESC_RING_ALIGN),
 				  ce_state->src_ring->base_addr_owner_space,
 				  ce_state->src_ring->base_addr_ce_space);
@@ -1183,7 +1527,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
 	if (ce_state->dest_ring) {
 		dma_free_coherent(ar->dev,
 				  (ce_state->dest_ring->nentries *
-				   sizeof(struct ce_desc) +
+				   sizeof(struct ce_desc_64) +
 				   CE_DESC_RING_ALIGN),
 				  ce_state->dest_ring->base_addr_owner_space,
 				  ce_state->dest_ring->base_addr_ce_space);
@@ -1194,6 +1538,14 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
 	ce_state->dest_ring = NULL;
 }
 
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
+{
+	struct ath10k_ce *ce = ath10k_ce_priv(ar);
+	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+
+	ce_state->ops->ce_free_pipe(ar, ce_id);
+}
+
 void ath10k_ce_dump_registers(struct ath10k *ar,
 			      struct ath10k_fw_crash_data *crash_data)
 {
@@ -1232,3 +1584,99 @@ void ath10k_ce_dump_registers(struct ath10k *ar,
 
 	spin_unlock_bh(&ce->ce_lock);
 }
+
+static const struct ath10k_ce_ops ce_ops = {
+	.ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
+	.ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring,
+	.ce_rx_post_buf = __ath10k_ce_rx_post_buf,
+	.ce_completed_recv_next_nolock = _ath10k_ce_completed_recv_next_nolock,
+	.ce_revoke_recv_next = _ath10k_ce_revoke_recv_next,
+	.ce_extract_desc_data = ath10k_ce_extract_desc_data,
+	.ce_free_pipe = _ath10k_ce_free_pipe,
+	.ce_send_nolock = _ath10k_ce_send_nolock,
+};
+
+static const struct ath10k_ce_ops ce_64_ops = {
+	.ce_alloc_src_ring = ath10k_ce_alloc_src_ring_64,
+	.ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring_64,
+	.ce_rx_post_buf = __ath10k_ce_rx_post_buf_64,
+	.ce_completed_recv_next_nolock =
+				_ath10k_ce_completed_recv_next_nolock_64,
+	.ce_revoke_recv_next = _ath10k_ce_revoke_recv_next_64,
+	.ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
+	.ce_free_pipe = _ath10k_ce_free_pipe_64,
+	.ce_send_nolock = _ath10k_ce_send_nolock_64,
+};
+
+static void ath10k_ce_set_ops(struct ath10k *ar,
+			      struct ath10k_ce_pipe *ce_state)
+{
+	switch (ar->hw_rev) {
+	case ATH10K_HW_WCN3990:
+		ce_state->ops = &ce_64_ops;
+		break;
+	default:
+		ce_state->ops = &ce_ops;
+		break;
+	}
+}
+
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+			 const struct ce_attr *attr)
+{
+	struct ath10k_ce *ce = ath10k_ce_priv(ar);
+	struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+	int ret;
+
+	ath10k_ce_set_ops(ar, ce_state);
+	/* Make sure there's enough CE ringbuffer entries for HTT TX to avoid
+	 * additional TX locking checks.
+	 *
+	 * For the lack of a better place do the check here.
+	 */
+	BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
+		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+	BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
+		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+	BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
+		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+
+	ce_state->ar = ar;
+	ce_state->id = ce_id;
+	ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+	ce_state->attr_flags = attr->flags;
+	ce_state->src_sz_max = attr->src_sz_max;
+
+	if (attr->src_nentries)
+		ce_state->send_cb = attr->send_cb;
+
+	if (attr->dest_nentries)
+		ce_state->recv_cb = attr->recv_cb;
+
+	if (attr->src_nentries) {
+		ce_state->src_ring =
+			ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr);
+		if (IS_ERR(ce_state->src_ring)) {
+			ret = PTR_ERR(ce_state->src_ring);
+			ath10k_err(ar, "failed to alloc CE src ring %d: %d\n",
+				   ce_id, ret);
+			ce_state->src_ring = NULL;
+			return ret;
+		}
+	}
+
+	if (attr->dest_nentries) {
+		ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar,
+									ce_id,
+									attr);
+		if (IS_ERR(ce_state->dest_ring)) {
+			ret = PTR_ERR(ce_state->dest_ring);
+			ath10k_err(ar, "failed to alloc CE dest ring %d: %d\n",
+				   ce_id, ret);
+			ce_state->dest_ring = NULL;
+			return ret;
+		}
+	}
+
+	return 0;
+}

+ 53 - 7
drivers/net/wireless/ath/ath10k/ce.h

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -36,6 +36,10 @@ struct ath10k_ce_pipe;
 
 #define CE_DESC_FLAGS_GATHER         (1 << 0)
 #define CE_DESC_FLAGS_BYTE_SWAP      (1 << 1)
+#define CE_WCN3990_DESC_FLAGS_GATHER BIT(31)
+
+#define CE_DESC_FLAGS_GET_MASK		GENMASK(4, 0)
+#define CE_DESC_37BIT_ADDR_MASK		GENMASK_ULL(37, 0)
 
 /* Following desc flags are used in QCA99X0 */
 #define CE_DESC_FLAGS_HOST_INT_DIS	(1 << 2)
@@ -50,6 +54,16 @@ struct ce_desc {
 	__le16 flags; /* %CE_DESC_FLAGS_ */
 };
 
+struct ce_desc_64 {
+	__le64 addr;
+	__le16 nbytes; /* length in register map */
+	__le16 flags; /* fw_metadata_high */
+	__le32 toeplitz_hash_result;
+};
+
+#define CE_DESC_SIZE sizeof(struct ce_desc)
+#define CE_DESC_SIZE_64 sizeof(struct ce_desc_64)
+
 struct ath10k_ce_ring {
 	/* Number of entries in this ring; must be power of 2 */
 	unsigned int nentries;
@@ -117,6 +131,7 @@ struct ath10k_ce_pipe {
 	unsigned int src_sz_max;
 	struct ath10k_ce_ring *src_ring;
 	struct ath10k_ce_ring *dest_ring;
+	const struct ath10k_ce_ops *ops;
 };
 
 /* Copy Engine settable attributes */
@@ -160,7 +175,7 @@ struct ath10k_ce {
  */
 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
 		   void *per_transfer_send_context,
-		   u32 buffer,
+		   dma_addr_t buffer,
 		   unsigned int nbytes,
 		   /* 14 bits */
 		   unsigned int transfer_id,
@@ -168,7 +183,7 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
 
 int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
 			  void *per_transfer_context,
-			  u32 buffer,
+			  dma_addr_t buffer,
 			  unsigned int nbytes,
 			  unsigned int transfer_id,
 			  unsigned int flags);
@@ -180,8 +195,8 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
 /*==================Recv=======================*/
 
 int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
-int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
-int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+			  dma_addr_t paddr);
 void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries);
 
 /* recv flags */
@@ -222,7 +237,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
  */
 int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
 			       void **per_transfer_contextp,
-			       u32 *bufferp);
+			       dma_addr_t *bufferp);
 
 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
 					 void **per_transfer_contextp,
@@ -235,7 +250,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
  */
 int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
 			       void **per_transfer_contextp,
-			       u32 *bufferp,
+			       dma_addr_t *bufferp,
 			       unsigned int *nbytesp,
 			       unsigned int *transfer_idp);
 
@@ -281,6 +296,31 @@ struct ce_attr {
 	void (*recv_cb)(struct ath10k_ce_pipe *);
 };
 
+struct ath10k_ce_ops {
+	struct ath10k_ce_ring *(*ce_alloc_src_ring)(struct ath10k *ar,
+						    u32 ce_id,
+						    const struct ce_attr *attr);
+	struct ath10k_ce_ring *(*ce_alloc_dst_ring)(struct ath10k *ar,
+						    u32 ce_id,
+						    const struct ce_attr *attr);
+	int (*ce_rx_post_buf)(struct ath10k_ce_pipe *pipe, void *ctx,
+			      dma_addr_t paddr);
+	int (*ce_completed_recv_next_nolock)(struct ath10k_ce_pipe *ce_state,
+					     void **per_transfer_contextp,
+					     u32 *nbytesp);
+	int (*ce_revoke_recv_next)(struct ath10k_ce_pipe *ce_state,
+				   void **per_transfer_contextp,
+				   dma_addr_t *nbytesp);
+	void (*ce_extract_desc_data)(struct ath10k *ar,
+				     struct ath10k_ce_ring *src_ring,
+				     u32 sw_index, dma_addr_t *bufferp,
+				     u32 *nbytesp, u32 *transfer_idp);
+	void (*ce_free_pipe)(struct ath10k *ar, int ce_id);
+	int (*ce_send_nolock)(struct ath10k_ce_pipe *pipe,
+			      void *per_transfer_context,
+			      dma_addr_t buffer, u32 nbytes,
+			      u32 transfer_id, u32 flags);
+};
 static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
 {
 	return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
@@ -292,6 +332,12 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
 #define CE_DEST_RING_TO_DESC(baddr, idx) \
 	(&(((struct ce_desc *)baddr)[idx]))
 
+#define CE_SRC_RING_TO_DESC_64(baddr, idx) \
+	(&(((struct ce_desc_64 *)baddr)[idx]))
+
+#define CE_DEST_RING_TO_DESC_64(baddr, idx) \
+	(&(((struct ce_desc_64 *)baddr)[idx]))
+
 /* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
 #define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
 	(((int)(toidx) - (int)(fromidx)) & (nentries_mask))

+ 162 - 79
drivers/net/wireless/ath/ath10k/core.c

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -32,6 +32,7 @@
 #include "htt.h"
 #include "testmode.h"
 #include "wmi-ops.h"
+#include "coredump.h"
 
 unsigned int ath10k_debug_mask;
 static unsigned int ath10k_cryptmode_param;
@@ -39,17 +40,25 @@ static bool uart_print;
 static bool skip_otp;
 static bool rawmode;
 
+/* Enable ATH10K_FW_CRASH_DUMP_REGISTERS and ATH10K_FW_CRASH_DUMP_CE_DATA
+ * by default.
+ */
+unsigned long ath10k_coredump_mask = 0x3;
+
+/* FIXME: most of these should be readonly */
 module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
 module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
 module_param(uart_print, bool, 0644);
 module_param(skip_otp, bool, 0644);
 module_param(rawmode, bool, 0644);
+module_param_named(coredump_mask, ath10k_coredump_mask, ulong, 0444);
 
 MODULE_PARM_DESC(debug_mask, "Debugging mask");
 MODULE_PARM_DESC(uart_print, "Uart target debugging");
 MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
 MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
 MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath");
+MODULE_PARM_DESC(coredump_mask, "Bitfield of what to include in firmware crash file");
 
 static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 	{
@@ -78,6 +87,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.num_peers = TARGET_TLV_NUM_PEERS,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA9887_HW_1_0_VERSION,
@@ -105,6 +116,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.num_peers = TARGET_TLV_NUM_PEERS,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA6174_HW_2_1_VERSION,
@@ -131,6 +144,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.num_peers = TARGET_TLV_NUM_PEERS,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA6174_HW_2_1_VERSION,
@@ -157,6 +172,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.num_peers = TARGET_TLV_NUM_PEERS,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA6174_HW_3_0_VERSION,
@@ -183,6 +200,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.num_peers = TARGET_TLV_NUM_PEERS,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA6174_HW_3_2_VERSION,
@@ -212,6 +231,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.num_peers = TARGET_TLV_NUM_PEERS,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -244,6 +265,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.num_peers = TARGET_TLV_NUM_PEERS,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -281,6 +304,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.num_peers = TARGET_TLV_NUM_PEERS,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -317,6 +342,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.num_peers = TARGET_TLV_NUM_PEERS,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -343,6 +370,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.num_peers = TARGET_TLV_NUM_PEERS,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -371,6 +400,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.num_peers = TARGET_TLV_NUM_PEERS,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -404,6 +435,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.num_peers = TARGET_TLV_NUM_PEERS,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
+		.target_64bit = false,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
 	},
 	{
 		.id = WCN3990_HW_1_0_DEV_VERSION,
@@ -422,6 +455,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.num_peers = TARGET_HL_10_TLV_NUM_PEERS,
 		.ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT,
 		.num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
+		.target_64bit = true,
+		.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC,
 	},
 };
 
@@ -445,6 +480,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
 	[ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST] = "allows-mesh-bcast",
 	[ATH10K_FW_FEATURE_NO_PS] = "no-ps",
 	[ATH10K_FW_FEATURE_MGMT_TX_BY_REF] = "mgmt-tx-by-reference",
+	[ATH10K_FW_FEATURE_NON_BMI] = "non-bmi",
 };
 
 static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@ -1524,8 +1560,8 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
 		data += ie_len;
 	}
 
-	if (!fw_file->firmware_data ||
-	    !fw_file->firmware_len) {
+	if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, fw_file->fw_features) &&
+	    (!fw_file->firmware_data || !fw_file->firmware_len)) {
 		ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
 			    ar->hw_params.fw.dir, name);
 		ret = -ENOMEDIUM;
@@ -1551,6 +1587,7 @@ static void ath10k_core_get_fw_name(struct ath10k *ar, char *fw_name,
 		break;
 	case ATH10K_BUS_PCI:
 	case ATH10K_BUS_AHB:
+	case ATH10K_BUS_SNOC:
 		scnprintf(fw_name, fw_name_len, "%s-%d.bin",
 			  ATH10K_FW_FILE_BASE, fw_api);
 		break;
@@ -1836,7 +1873,7 @@ static void ath10k_core_restart(struct work_struct *work)
 
 	mutex_unlock(&ar->conf_mutex);
 
-	ret = ath10k_debug_fw_devcoredump(ar);
+	ret = ath10k_coredump_submit(ar);
 	if (ret)
 		ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d",
 			    ret);
@@ -2078,43 +2115,47 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
 
 	ar->running_fw = fw;
 
-	ath10k_bmi_start(ar);
-
-	if (ath10k_init_configure_target(ar)) {
-		status = -EINVAL;
-		goto err;
-	}
+	if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+		      ar->running_fw->fw_file.fw_features)) {
+		ath10k_bmi_start(ar);
 
-	status = ath10k_download_cal_data(ar);
-	if (status)
-		goto err;
+		if (ath10k_init_configure_target(ar)) {
+			status = -EINVAL;
+			goto err;
+		}
 
-	/* Some of of qca988x solutions are having global reset issue
-	 * during target initialization. Bypassing PLL setting before
-	 * downloading firmware and letting the SoC run on REF_CLK is
-	 * fixing the problem. Corresponding firmware change is also needed
-	 * to set the clock source once the target is initialized.
-	 */
-	if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
-		     ar->running_fw->fw_file.fw_features)) {
-		status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
-		if (status) {
-			ath10k_err(ar, "could not write to skip_clock_init: %d\n",
-				   status);
+		status = ath10k_download_cal_data(ar);
+		if (status)
 			goto err;
+
+		/* Some of of qca988x solutions are having global reset issue
+		 * during target initialization. Bypassing PLL setting before
+		 * downloading firmware and letting the SoC run on REF_CLK is
+		 * fixing the problem. Corresponding firmware change is also
+		 * needed to set the clock source once the target is
+		 * initialized.
+		 */
+		if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
+			     ar->running_fw->fw_file.fw_features)) {
+			status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
+			if (status) {
+				ath10k_err(ar, "could not write to skip_clock_init: %d\n",
+					   status);
+				goto err;
+			}
 		}
-	}
 
-	status = ath10k_download_fw(ar);
-	if (status)
-		goto err;
+		status = ath10k_download_fw(ar);
+		if (status)
+			goto err;
 
-	status = ath10k_init_uart(ar);
-	if (status)
-		goto err;
+		status = ath10k_init_uart(ar);
+		if (status)
+			goto err;
 
-	if (ar->hif.bus == ATH10K_BUS_SDIO)
-		ath10k_init_sdio(ar);
+		if (ar->hif.bus == ATH10K_BUS_SDIO)
+			ath10k_init_sdio(ar);
+	}
 
 	ar->htc.htc_ops.target_send_suspend_complete =
 		ath10k_send_suspend_complete;
@@ -2125,9 +2166,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
 		goto err;
 	}
 
-	status = ath10k_bmi_done(ar);
-	if (status)
-		goto err;
+	if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+		      ar->running_fw->fw_file.fw_features)) {
+		status = ath10k_bmi_done(ar);
+		if (status)
+			goto err;
+	}
 
 	status = ath10k_wmi_attach(ar);
 	if (status) {
@@ -2370,19 +2414,35 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
 		return ret;
 	}
 
-	memset(&target_info, 0, sizeof(target_info));
-	if (ar->hif.bus == ATH10K_BUS_SDIO)
+	switch (ar->hif.bus) {
+	case ATH10K_BUS_SDIO:
+		memset(&target_info, 0, sizeof(target_info));
 		ret = ath10k_bmi_get_target_info_sdio(ar, &target_info);
-	else
+		if (ret) {
+			ath10k_err(ar, "could not get target info (%d)\n", ret);
+			goto err_power_down;
+		}
+		ar->target_version = target_info.version;
+		ar->hw->wiphy->hw_version = target_info.version;
+		break;
+	case ATH10K_BUS_PCI:
+	case ATH10K_BUS_AHB:
+	case ATH10K_BUS_USB:
+		memset(&target_info, 0, sizeof(target_info));
 		ret = ath10k_bmi_get_target_info(ar, &target_info);
-	if (ret) {
-		ath10k_err(ar, "could not get target info (%d)\n", ret);
-		goto err_power_down;
+		if (ret) {
+			ath10k_err(ar, "could not get target info (%d)\n", ret);
+			goto err_power_down;
+		}
+		ar->target_version = target_info.version;
+		ar->hw->wiphy->hw_version = target_info.version;
+		break;
+	case ATH10K_BUS_SNOC:
+		break;
+	default:
+		ath10k_err(ar, "incorrect hif bus type: %d\n", ar->hif.bus);
 	}
 
-	ar->target_version = target_info.version;
-	ar->hw->wiphy->hw_version = target_info.version;
-
 	ret = ath10k_init_hw_params(ar);
 	if (ret) {
 		ath10k_err(ar, "could not get hw params (%d)\n", ret);
@@ -2402,37 +2462,40 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
 
 	ath10k_debug_print_hwfw_info(ar);
 
-	ret = ath10k_core_pre_cal_download(ar);
-	if (ret) {
-		/* pre calibration data download is not necessary
-		 * for all the chipsets. Ignore failures and continue.
-		 */
-		ath10k_dbg(ar, ATH10K_DBG_BOOT,
-			   "could not load pre cal data: %d\n", ret);
-	}
+	if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+		      ar->normal_mode_fw.fw_file.fw_features)) {
+		ret = ath10k_core_pre_cal_download(ar);
+		if (ret) {
+			/* pre calibration data download is not necessary
+			 * for all the chipsets. Ignore failures and continue.
+			 */
+			ath10k_dbg(ar, ATH10K_DBG_BOOT,
+				   "could not load pre cal data: %d\n", ret);
+		}
 
-	ret = ath10k_core_get_board_id_from_otp(ar);
-	if (ret && ret != -EOPNOTSUPP) {
-		ath10k_err(ar, "failed to get board id from otp: %d\n",
-			   ret);
-		goto err_free_firmware_files;
-	}
+		ret = ath10k_core_get_board_id_from_otp(ar);
+		if (ret && ret != -EOPNOTSUPP) {
+			ath10k_err(ar, "failed to get board id from otp: %d\n",
+				   ret);
+			goto err_free_firmware_files;
+		}
 
-	ret = ath10k_core_check_smbios(ar);
-	if (ret)
-		ath10k_dbg(ar, ATH10K_DBG_BOOT, "SMBIOS bdf variant name not set.\n");
+		ret = ath10k_core_check_smbios(ar);
+		if (ret)
+			ath10k_dbg(ar, ATH10K_DBG_BOOT, "SMBIOS bdf variant name not set.\n");
 
-	ret = ath10k_core_check_dt(ar);
-	if (ret)
-		ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n");
+		ret = ath10k_core_check_dt(ar);
+		if (ret)
+			ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n");
 
-	ret = ath10k_core_fetch_board_file(ar);
-	if (ret) {
-		ath10k_err(ar, "failed to fetch board file: %d\n", ret);
-		goto err_free_firmware_files;
-	}
+		ret = ath10k_core_fetch_board_file(ar);
+		if (ret) {
+			ath10k_err(ar, "failed to fetch board file: %d\n", ret);
+			goto err_free_firmware_files;
+		}
 
-	ath10k_debug_print_board_info(ar);
+		ath10k_debug_print_board_info(ar);
+	}
 
 	ret = ath10k_core_init_firmware_features(ar);
 	if (ret) {
@@ -2441,11 +2504,15 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
 		goto err_free_firmware_files;
 	}
 
-	ret = ath10k_swap_code_seg_init(ar, &ar->normal_mode_fw.fw_file);
-	if (ret) {
-		ath10k_err(ar, "failed to initialize code swap segment: %d\n",
-			   ret);
-		goto err_free_firmware_files;
+	if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+		      ar->normal_mode_fw.fw_file.fw_features)) {
+		ret = ath10k_swap_code_seg_init(ar,
+						&ar->normal_mode_fw.fw_file);
+		if (ret) {
+			ath10k_err(ar, "failed to initialize code swap segment: %d\n",
+				   ret);
+			goto err_free_firmware_files;
+		}
 	}
 
 	mutex_lock(&ar->conf_mutex);
@@ -2497,10 +2564,16 @@ static void ath10k_core_register_work(struct work_struct *work)
 		goto err_release_fw;
 	}
 
+	status = ath10k_coredump_register(ar);
+	if (status) {
+		ath10k_err(ar, "unable to register coredump\n");
+		goto err_unregister_mac;
+	}
+
 	status = ath10k_debug_register(ar);
 	if (status) {
 		ath10k_err(ar, "unable to initialize debugfs\n");
-		goto err_unregister_mac;
+		goto err_unregister_coredump;
 	}
 
 	status = ath10k_spectral_create(ar);
@@ -2523,6 +2596,8 @@ err_spectral_destroy:
 	ath10k_spectral_destroy(ar);
 err_debug_destroy:
 	ath10k_debug_destroy(ar);
+err_unregister_coredump:
+	ath10k_coredump_unregister(ar);
 err_unregister_mac:
 	ath10k_mac_unregister(ar);
 err_release_fw:
@@ -2677,12 +2752,19 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
 
 	init_dummy_netdev(&ar->napi_dev);
 
-	ret = ath10k_debug_create(ar);
+	ret = ath10k_coredump_create(ar);
 	if (ret)
 		goto err_free_aux_wq;
 
+	ret = ath10k_debug_create(ar);
+	if (ret)
+		goto err_free_coredump;
+
 	return ar;
 
+err_free_coredump:
+	ath10k_coredump_destroy(ar);
+
 err_free_aux_wq:
 	destroy_workqueue(ar->workqueue_aux);
 err_free_wq:
@@ -2704,6 +2786,7 @@ void ath10k_core_destroy(struct ath10k *ar)
 	destroy_workqueue(ar->workqueue_aux);
 
 	ath10k_debug_destroy(ar);
+	ath10k_coredump_destroy(ar);
 	ath10k_htt_tx_destroy(&ar->htt);
 	ath10k_wmi_free_host_mem(ar);
 	ath10k_mac_destroy(ar);

+ 21 - 5
drivers/net/wireless/ath/ath10k/core.h

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -92,6 +92,7 @@ enum ath10k_bus {
 	ATH10K_BUS_AHB,
 	ATH10K_BUS_SDIO,
 	ATH10K_BUS_USB,
+	ATH10K_BUS_SNOC,
 };
 
 static inline const char *ath10k_bus_str(enum ath10k_bus bus)
@@ -105,6 +106,8 @@ static inline const char *ath10k_bus_str(enum ath10k_bus bus)
 		return "sdio";
 	case ATH10K_BUS_USB:
 		return "usb";
+	case ATH10K_BUS_SNOC:
+		return "snoc";
 	}
 
 	return "unknown";
@@ -457,14 +460,17 @@ struct ath10k_ce_crash_hdr {
 	struct ath10k_ce_crash_data entries[];
 };
 
+#define MAX_MEM_DUMP_TYPE	5
+
 /* used for crash-dump storage, protected by data-lock */
 struct ath10k_fw_crash_data {
-	bool crashed_since_read;
-
 	guid_t guid;
 	struct timespec64 timestamp;
 	__le32 registers[REG_DUMP_COUNT_QCA988X];
 	struct ath10k_ce_crash_data ce_crash_data[CE_COUNT_MAX];
+
+	u8 *ramdump_buf;
+	size_t ramdump_buf_len;
 };
 
 struct ath10k_debug {
@@ -490,8 +496,6 @@ struct ath10k_debug {
 	u32 reg_addr;
 	u32 nf_cal_period;
 	void *cal_data;
-
-	struct ath10k_fw_crash_data *fw_crash_data;
 };
 
 enum ath10k_state {
@@ -616,6 +620,9 @@ enum ath10k_fw_features {
 	/* Firmware allows management tx by reference instead of by value. */
 	ATH10K_FW_FEATURE_MGMT_TX_BY_REF = 18,
 
+	/* Firmware load is done externally, not by bmi */
+	ATH10K_FW_FEATURE_NON_BMI = 19,
+
 	/* keep last */
 	ATH10K_FW_FEATURE_COUNT,
 };
@@ -965,6 +972,13 @@ struct ath10k {
 #endif
 
 	u32 pktlog_filter;
+
+#ifdef CONFIG_DEV_COREDUMP
+	struct {
+		struct ath10k_fw_crash_data *fw_crash_data;
+	} coredump;
+#endif
+
 	struct {
 		/* protected by conf_mutex */
 		struct ath10k_fw_components utf_mode_fw;
@@ -1018,6 +1032,8 @@ static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
 	return false;
 }
 
+extern unsigned long ath10k_coredump_mask;
+
 struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
 				  enum ath10k_bus bus,
 				  enum ath10k_hw_rev hw_rev,

+ 993 - 0
drivers/net/wireless/ath/ath10k/coredump.c

@@ -0,0 +1,993 @@
+/*
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "coredump.h"
+
+#include <linux/devcoredump.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/utsname.h>
+
+#include "debug.h"
+#include "hw.h"
+
+static const struct ath10k_mem_section qca6174_hw21_register_sections[] = {
+	{0x800, 0x810},
+	{0x820, 0x82C},
+	{0x830, 0x8F4},
+	{0x90C, 0x91C},
+	{0xA14, 0xA18},
+	{0xA84, 0xA94},
+	{0xAA8, 0xAD4},
+	{0xADC, 0xB40},
+	{0x1000, 0x10A4},
+	{0x10BC, 0x111C},
+	{0x1134, 0x1138},
+	{0x1144, 0x114C},
+	{0x1150, 0x115C},
+	{0x1160, 0x1178},
+	{0x1240, 0x1260},
+	{0x2000, 0x207C},
+	{0x3000, 0x3014},
+	{0x4000, 0x4014},
+	{0x5000, 0x5124},
+	{0x6000, 0x6040},
+	{0x6080, 0x60CC},
+	{0x6100, 0x611C},
+	{0x6140, 0x61D8},
+	{0x6200, 0x6238},
+	{0x6240, 0x628C},
+	{0x62C0, 0x62EC},
+	{0x6380, 0x63E8},
+	{0x6400, 0x6440},
+	{0x6480, 0x64CC},
+	{0x6500, 0x651C},
+	{0x6540, 0x6580},
+	{0x6600, 0x6638},
+	{0x6640, 0x668C},
+	{0x66C0, 0x66EC},
+	{0x6780, 0x67E8},
+	{0x7080, 0x708C},
+	{0x70C0, 0x70C8},
+	{0x7400, 0x741C},
+	{0x7440, 0x7454},
+	{0x7800, 0x7818},
+	{0x8000, 0x8004},
+	{0x8010, 0x8064},
+	{0x8080, 0x8084},
+	{0x80A0, 0x80A4},
+	{0x80C0, 0x80C4},
+	{0x80E0, 0x80F4},
+	{0x8100, 0x8104},
+	{0x8110, 0x812C},
+	{0x9000, 0x9004},
+	{0x9800, 0x982C},
+	{0x9830, 0x9838},
+	{0x9840, 0x986C},
+	{0x9870, 0x9898},
+	{0x9A00, 0x9C00},
+	{0xD580, 0xD59C},
+	{0xF000, 0xF0E0},
+	{0xF140, 0xF190},
+	{0xF250, 0xF25C},
+	{0xF260, 0xF268},
+	{0xF26C, 0xF2A8},
+	{0x10008, 0x1000C},
+	{0x10014, 0x10018},
+	{0x1001C, 0x10020},
+	{0x10024, 0x10028},
+	{0x10030, 0x10034},
+	{0x10040, 0x10054},
+	{0x10058, 0x1007C},
+	{0x10080, 0x100C4},
+	{0x100C8, 0x10114},
+	{0x1012C, 0x10130},
+	{0x10138, 0x10144},
+	{0x10200, 0x10220},
+	{0x10230, 0x10250},
+	{0x10260, 0x10280},
+	{0x10290, 0x102B0},
+	{0x102C0, 0x102DC},
+	{0x102E0, 0x102F4},
+	{0x102FC, 0x1037C},
+	{0x10380, 0x10390},
+	{0x10800, 0x10828},
+	{0x10840, 0x10844},
+	{0x10880, 0x10884},
+	{0x108C0, 0x108E8},
+	{0x10900, 0x10928},
+	{0x10940, 0x10944},
+	{0x10980, 0x10984},
+	{0x109C0, 0x109E8},
+	{0x10A00, 0x10A28},
+	{0x10A40, 0x10A50},
+	{0x11000, 0x11028},
+	{0x11030, 0x11034},
+	{0x11038, 0x11068},
+	{0x11070, 0x11074},
+	{0x11078, 0x110A8},
+	{0x110B0, 0x110B4},
+	{0x110B8, 0x110E8},
+	{0x110F0, 0x110F4},
+	{0x110F8, 0x11128},
+	{0x11138, 0x11144},
+	{0x11178, 0x11180},
+	{0x111B8, 0x111C0},
+	{0x111F8, 0x11200},
+	{0x11238, 0x1123C},
+	{0x11270, 0x11274},
+	{0x11278, 0x1127C},
+	{0x112B0, 0x112B4},
+	{0x112B8, 0x112BC},
+	{0x112F0, 0x112F4},
+	{0x112F8, 0x112FC},
+	{0x11338, 0x1133C},
+	{0x11378, 0x1137C},
+	{0x113B8, 0x113BC},
+	{0x113F8, 0x113FC},
+	{0x11438, 0x11440},
+	{0x11478, 0x11480},
+	{0x114B8, 0x114BC},
+	{0x114F8, 0x114FC},
+	{0x11538, 0x1153C},
+	{0x11578, 0x1157C},
+	{0x115B8, 0x115BC},
+	{0x115F8, 0x115FC},
+	{0x11638, 0x1163C},
+	{0x11678, 0x1167C},
+	{0x116B8, 0x116BC},
+	{0x116F8, 0x116FC},
+	{0x11738, 0x1173C},
+	{0x11778, 0x1177C},
+	{0x117B8, 0x117BC},
+	{0x117F8, 0x117FC},
+	{0x17000, 0x1701C},
+	{0x17020, 0x170AC},
+	{0x18000, 0x18050},
+	{0x18054, 0x18074},
+	{0x18080, 0x180D4},
+	{0x180DC, 0x18104},
+	{0x18108, 0x1813C},
+	{0x18144, 0x18148},
+	{0x18168, 0x18174},
+	{0x18178, 0x18180},
+	{0x181C8, 0x181E0},
+	{0x181E4, 0x181E8},
+	{0x181EC, 0x1820C},
+	{0x1825C, 0x18280},
+	{0x18284, 0x18290},
+	{0x18294, 0x182A0},
+	{0x18300, 0x18304},
+	{0x18314, 0x18320},
+	{0x18328, 0x18350},
+	{0x1835C, 0x1836C},
+	{0x18370, 0x18390},
+	{0x18398, 0x183AC},
+	{0x183BC, 0x183D8},
+	{0x183DC, 0x183F4},
+	{0x18400, 0x186F4},
+	{0x186F8, 0x1871C},
+	{0x18720, 0x18790},
+	{0x19800, 0x19830},
+	{0x19834, 0x19840},
+	{0x19880, 0x1989C},
+	{0x198A4, 0x198B0},
+	{0x198BC, 0x19900},
+	{0x19C00, 0x19C88},
+	{0x19D00, 0x19D20},
+	{0x19E00, 0x19E7C},
+	{0x19E80, 0x19E94},
+	{0x19E98, 0x19EAC},
+	{0x19EB0, 0x19EBC},
+	{0x19F70, 0x19F74},
+	{0x19F80, 0x19F8C},
+	{0x19FA0, 0x19FB4},
+	{0x19FC0, 0x19FD8},
+	{0x1A000, 0x1A200},
+	{0x1A204, 0x1A210},
+	{0x1A228, 0x1A22C},
+	{0x1A230, 0x1A248},
+	{0x1A250, 0x1A270},
+	{0x1A280, 0x1A290},
+	{0x1A2A0, 0x1A2A4},
+	{0x1A2C0, 0x1A2EC},
+	{0x1A300, 0x1A3BC},
+	{0x1A3F0, 0x1A3F4},
+	{0x1A3F8, 0x1A434},
+	{0x1A438, 0x1A444},
+	{0x1A448, 0x1A468},
+	{0x1A580, 0x1A58C},
+	{0x1A644, 0x1A654},
+	{0x1A670, 0x1A698},
+	{0x1A6AC, 0x1A6B0},
+	{0x1A6D0, 0x1A6D4},
+	{0x1A6EC, 0x1A70C},
+	{0x1A710, 0x1A738},
+	{0x1A7C0, 0x1A7D0},
+	{0x1A7D4, 0x1A7D8},
+	{0x1A7DC, 0x1A7E4},
+	{0x1A7F0, 0x1A7F8},
+	{0x1A888, 0x1A89C},
+	{0x1A8A8, 0x1A8AC},
+	{0x1A8C0, 0x1A8DC},
+	{0x1A8F0, 0x1A8FC},
+	{0x1AE04, 0x1AE08},
+	{0x1AE18, 0x1AE24},
+	{0x1AF80, 0x1AF8C},
+	{0x1AFA0, 0x1AFB4},
+	{0x1B000, 0x1B200},
+	{0x1B284, 0x1B288},
+	{0x1B2D0, 0x1B2D8},
+	{0x1B2DC, 0x1B2EC},
+	{0x1B300, 0x1B340},
+	{0x1B374, 0x1B378},
+	{0x1B380, 0x1B384},
+	{0x1B388, 0x1B38C},
+	{0x1B404, 0x1B408},
+	{0x1B420, 0x1B428},
+	{0x1B440, 0x1B444},
+	{0x1B448, 0x1B44C},
+	{0x1B450, 0x1B458},
+	{0x1B45C, 0x1B468},
+	{0x1B584, 0x1B58C},
+	{0x1B68C, 0x1B690},
+	{0x1B6AC, 0x1B6B0},
+	{0x1B7F0, 0x1B7F8},
+	{0x1C800, 0x1CC00},
+	{0x1CE00, 0x1CE04},
+	{0x1CF80, 0x1CF84},
+	{0x1D200, 0x1D800},
+	{0x1E000, 0x20014},
+	{0x20100, 0x20124},
+	{0x21400, 0x217A8},
+	{0x21800, 0x21BA8},
+	{0x21C00, 0x21FA8},
+	{0x22000, 0x223A8},
+	{0x22400, 0x227A8},
+	{0x22800, 0x22BA8},
+	{0x22C00, 0x22FA8},
+	{0x23000, 0x233A8},
+	{0x24000, 0x24034},
+	{0x26000, 0x26064},
+	{0x27000, 0x27024},
+	{0x34000, 0x3400C},
+	{0x34400, 0x3445C},
+	{0x34800, 0x3485C},
+	{0x34C00, 0x34C5C},
+	{0x35000, 0x3505C},
+	{0x35400, 0x3545C},
+	{0x35800, 0x3585C},
+	{0x35C00, 0x35C5C},
+	{0x36000, 0x3605C},
+	{0x38000, 0x38064},
+	{0x38070, 0x380E0},
+	{0x3A000, 0x3A064},
+	{0x40000, 0x400A4},
+	{0x80000, 0x8000C},
+	{0x80010, 0x80020},
+};
+
+static const struct ath10k_mem_section qca6174_hw30_register_sections[] = {
+	{0x800, 0x810},
+	{0x820, 0x82C},
+	{0x830, 0x8F4},
+	{0x90C, 0x91C},
+	{0xA14, 0xA18},
+	{0xA84, 0xA94},
+	{0xAA8, 0xAD4},
+	{0xADC, 0xB40},
+	{0x1000, 0x10A4},
+	{0x10BC, 0x111C},
+	{0x1134, 0x1138},
+	{0x1144, 0x114C},
+	{0x1150, 0x115C},
+	{0x1160, 0x1178},
+	{0x1240, 0x1260},
+	{0x2000, 0x207C},
+	{0x3000, 0x3014},
+	{0x4000, 0x4014},
+	{0x5000, 0x5124},
+	{0x6000, 0x6040},
+	{0x6080, 0x60CC},
+	{0x6100, 0x611C},
+	{0x6140, 0x61D8},
+	{0x6200, 0x6238},
+	{0x6240, 0x628C},
+	{0x62C0, 0x62EC},
+	{0x6380, 0x63E8},
+	{0x6400, 0x6440},
+	{0x6480, 0x64CC},
+	{0x6500, 0x651C},
+	{0x6540, 0x6580},
+	{0x6600, 0x6638},
+	{0x6640, 0x668C},
+	{0x66C0, 0x66EC},
+	{0x6780, 0x67E8},
+	{0x7080, 0x708C},
+	{0x70C0, 0x70C8},
+	{0x7400, 0x741C},
+	{0x7440, 0x7454},
+	{0x7800, 0x7818},
+	{0x8000, 0x8004},
+	{0x8010, 0x8064},
+	{0x8080, 0x8084},
+	{0x80A0, 0x80A4},
+	{0x80C0, 0x80C4},
+	{0x80E0, 0x80F4},
+	{0x8100, 0x8104},
+	{0x8110, 0x812C},
+	{0x9000, 0x9004},
+	{0x9800, 0x982C},
+	{0x9830, 0x9838},
+	{0x9840, 0x986C},
+	{0x9870, 0x9898},
+	{0x9A00, 0x9C00},
+	{0xD580, 0xD59C},
+	{0xF000, 0xF0E0},
+	{0xF140, 0xF190},
+	{0xF250, 0xF25C},
+	{0xF260, 0xF268},
+	{0xF26C, 0xF2A8},
+	{0x10008, 0x1000C},
+	{0x10014, 0x10018},
+	{0x1001C, 0x10020},
+	{0x10024, 0x10028},
+	{0x10030, 0x10034},
+	{0x10040, 0x10054},
+	{0x10058, 0x1007C},
+	{0x10080, 0x100C4},
+	{0x100C8, 0x10114},
+	{0x1012C, 0x10130},
+	{0x10138, 0x10144},
+	{0x10200, 0x10220},
+	{0x10230, 0x10250},
+	{0x10260, 0x10280},
+	{0x10290, 0x102B0},
+	{0x102C0, 0x102DC},
+	{0x102E0, 0x102F4},
+	{0x102FC, 0x1037C},
+	{0x10380, 0x10390},
+	{0x10800, 0x10828},
+	{0x10840, 0x10844},
+	{0x10880, 0x10884},
+	{0x108C0, 0x108E8},
+	{0x10900, 0x10928},
+	{0x10940, 0x10944},
+	{0x10980, 0x10984},
+	{0x109C0, 0x109E8},
+	{0x10A00, 0x10A28},
+	{0x10A40, 0x10A50},
+	{0x11000, 0x11028},
+	{0x11030, 0x11034},
+	{0x11038, 0x11068},
+	{0x11070, 0x11074},
+	{0x11078, 0x110A8},
+	{0x110B0, 0x110B4},
+	{0x110B8, 0x110E8},
+	{0x110F0, 0x110F4},
+	{0x110F8, 0x11128},
+	{0x11138, 0x11144},
+	{0x11178, 0x11180},
+	{0x111B8, 0x111C0},
+	{0x111F8, 0x11200},
+	{0x11238, 0x1123C},
+	{0x11270, 0x11274},
+	{0x11278, 0x1127C},
+	{0x112B0, 0x112B4},
+	{0x112B8, 0x112BC},
+	{0x112F0, 0x112F4},
+	{0x112F8, 0x112FC},
+	{0x11338, 0x1133C},
+	{0x11378, 0x1137C},
+	{0x113B8, 0x113BC},
+	{0x113F8, 0x113FC},
+	{0x11438, 0x11440},
+	{0x11478, 0x11480},
+	{0x114B8, 0x114BC},
+	{0x114F8, 0x114FC},
+	{0x11538, 0x1153C},
+	{0x11578, 0x1157C},
+	{0x115B8, 0x115BC},
+	{0x115F8, 0x115FC},
+	{0x11638, 0x1163C},
+	{0x11678, 0x1167C},
+	{0x116B8, 0x116BC},
+	{0x116F8, 0x116FC},
+	{0x11738, 0x1173C},
+	{0x11778, 0x1177C},
+	{0x117B8, 0x117BC},
+	{0x117F8, 0x117FC},
+	{0x17000, 0x1701C},
+	{0x17020, 0x170AC},
+	{0x18000, 0x18050},
+	{0x18054, 0x18074},
+	{0x18080, 0x180D4},
+	{0x180DC, 0x18104},
+	{0x18108, 0x1813C},
+	{0x18144, 0x18148},
+	{0x18168, 0x18174},
+	{0x18178, 0x18180},
+	{0x181C8, 0x181E0},
+	{0x181E4, 0x181E8},
+	{0x181EC, 0x1820C},
+	{0x1825C, 0x18280},
+	{0x18284, 0x18290},
+	{0x18294, 0x182A0},
+	{0x18300, 0x18304},
+	{0x18314, 0x18320},
+	{0x18328, 0x18350},
+	{0x1835C, 0x1836C},
+	{0x18370, 0x18390},
+	{0x18398, 0x183AC},
+	{0x183BC, 0x183D8},
+	{0x183DC, 0x183F4},
+	{0x18400, 0x186F4},
+	{0x186F8, 0x1871C},
+	{0x18720, 0x18790},
+	{0x19800, 0x19830},
+	{0x19834, 0x19840},
+	{0x19880, 0x1989C},
+	{0x198A4, 0x198B0},
+	{0x198BC, 0x19900},
+	{0x19C00, 0x19C88},
+	{0x19D00, 0x19D20},
+	{0x19E00, 0x19E7C},
+	{0x19E80, 0x19E94},
+	{0x19E98, 0x19EAC},
+	{0x19EB0, 0x19EBC},
+	{0x19F70, 0x19F74},
+	{0x19F80, 0x19F8C},
+	{0x19FA0, 0x19FB4},
+	{0x19FC0, 0x19FD8},
+	{0x1A000, 0x1A200},
+	{0x1A204, 0x1A210},
+	{0x1A228, 0x1A22C},
+	{0x1A230, 0x1A248},
+	{0x1A250, 0x1A270},
+	{0x1A280, 0x1A290},
+	{0x1A2A0, 0x1A2A4},
+	{0x1A2C0, 0x1A2EC},
+	{0x1A300, 0x1A3BC},
+	{0x1A3F0, 0x1A3F4},
+	{0x1A3F8, 0x1A434},
+	{0x1A438, 0x1A444},
+	{0x1A448, 0x1A468},
+	{0x1A580, 0x1A58C},
+	{0x1A644, 0x1A654},
+	{0x1A670, 0x1A698},
+	{0x1A6AC, 0x1A6B0},
+	{0x1A6D0, 0x1A6D4},
+	{0x1A6EC, 0x1A70C},
+	{0x1A710, 0x1A738},
+	{0x1A7C0, 0x1A7D0},
+	{0x1A7D4, 0x1A7D8},
+	{0x1A7DC, 0x1A7E4},
+	{0x1A7F0, 0x1A7F8},
+	{0x1A888, 0x1A89C},
+	{0x1A8A8, 0x1A8AC},
+	{0x1A8C0, 0x1A8DC},
+	{0x1A8F0, 0x1A8FC},
+	{0x1AE04, 0x1AE08},
+	{0x1AE18, 0x1AE24},
+	{0x1AF80, 0x1AF8C},
+	{0x1AFA0, 0x1AFB4},
+	{0x1B000, 0x1B200},
+	{0x1B284, 0x1B288},
+	{0x1B2D0, 0x1B2D8},
+	{0x1B2DC, 0x1B2EC},
+	{0x1B300, 0x1B340},
+	{0x1B374, 0x1B378},
+	{0x1B380, 0x1B384},
+	{0x1B388, 0x1B38C},
+	{0x1B404, 0x1B408},
+	{0x1B420, 0x1B428},
+	{0x1B440, 0x1B444},
+	{0x1B448, 0x1B44C},
+	{0x1B450, 0x1B458},
+	{0x1B45C, 0x1B468},
+	{0x1B584, 0x1B58C},
+	{0x1B68C, 0x1B690},
+	{0x1B6AC, 0x1B6B0},
+	{0x1B7F0, 0x1B7F8},
+	{0x1C800, 0x1CC00},
+	{0x1CE00, 0x1CE04},
+	{0x1CF80, 0x1CF84},
+	{0x1D200, 0x1D800},
+	{0x1E000, 0x20014},
+	{0x20100, 0x20124},
+	{0x21400, 0x217A8},
+	{0x21800, 0x21BA8},
+	{0x21C00, 0x21FA8},
+	{0x22000, 0x223A8},
+	{0x22400, 0x227A8},
+	{0x22800, 0x22BA8},
+	{0x22C00, 0x22FA8},
+	{0x23000, 0x233A8},
+	{0x24000, 0x24034},
+	{0x26000, 0x26064},
+	{0x27000, 0x27024},
+	{0x34000, 0x3400C},
+	{0x34400, 0x3445C},
+	{0x34800, 0x3485C},
+	{0x34C00, 0x34C5C},
+	{0x35000, 0x3505C},
+	{0x35400, 0x3545C},
+	{0x35800, 0x3585C},
+	{0x35C00, 0x35C5C},
+	{0x36000, 0x3605C},
+	{0x38000, 0x38064},
+	{0x38070, 0x380E0},
+	{0x3A000, 0x3A074},
+	{0x40000, 0x400A4},
+	{0x80000, 0x8000C},
+	{0x80010, 0x80020},
+};
+
+static const struct ath10k_mem_region qca6174_hw10_mem_regions[] = {
+	{
+		.type = ATH10K_MEM_REGION_TYPE_DRAM,
+		.start = 0x400000,
+		.len = 0x70000,
+		.name = "DRAM",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+	{
+		.type = ATH10K_MEM_REGION_TYPE_REG,
+
+		/* RTC_SOC_BASE_ADDRESS */
+		.start = 0x0,
+
+		/* WLAN_MBOX_BASE_ADDRESS - RTC_SOC_BASE_ADDRESS */
+		.len = 0x800 - 0x0,
+
+		.name = "REG_PART1",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+	{
+		.type = ATH10K_MEM_REGION_TYPE_REG,
+
+		/* STEREO_BASE_ADDRESS */
+		.start = 0x27000,
+
+		/* USB_BASE_ADDRESS - STEREO_BASE_ADDRESS */
+		.len = 0x60000 - 0x27000,
+
+		.name = "REG_PART2",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+};
+
+static const struct ath10k_mem_region qca6174_hw21_mem_regions[] = {
+	{
+		.type = ATH10K_MEM_REGION_TYPE_DRAM,
+		.start = 0x400000,
+		.len = 0x70000,
+		.name = "DRAM",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+	{
+		.type = ATH10K_MEM_REGION_TYPE_AXI,
+		.start = 0xa0000,
+		.len = 0x18000,
+		.name = "AXI",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+	{
+		.type = ATH10K_MEM_REGION_TYPE_REG,
+		.start = 0x800,
+		.len = 0x80020 - 0x800,
+		.name = "REG_TOTAL",
+		.section_table = {
+			.sections = qca6174_hw21_register_sections,
+			.size = ARRAY_SIZE(qca6174_hw21_register_sections),
+		},
+	},
+};
+
+static const struct ath10k_mem_region qca6174_hw30_mem_regions[] = {
+	{
+		.type = ATH10K_MEM_REGION_TYPE_DRAM,
+		.start = 0x400000,
+		.len = 0x90000,
+		.name = "DRAM",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+	{
+		.type = ATH10K_MEM_REGION_TYPE_AXI,
+		.start = 0xa0000,
+		.len = 0x18000,
+		.name = "AXI",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+	{
+		.type = ATH10K_MEM_REGION_TYPE_REG,
+		.start = 0x800,
+		.len = 0x80020 - 0x800,
+		.name = "REG_TOTAL",
+		.section_table = {
+			.sections = qca6174_hw30_register_sections,
+			.size = ARRAY_SIZE(qca6174_hw30_register_sections),
+		},
+	},
+
+	/* IRAM dump must be put last */
+	{
+		.type = ATH10K_MEM_REGION_TYPE_IRAM1,
+		.start = 0x00980000,
+		.len = 0x00080000,
+		.name = "IRAM1",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+	{
+		.type = ATH10K_MEM_REGION_TYPE_IRAM2,
+		.start = 0x00a00000,
+		.len = 0x00040000,
+		.name = "IRAM2",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+};
+
+static const struct ath10k_mem_region qca988x_hw20_mem_regions[] = {
+	{
+		.type = ATH10K_MEM_REGION_TYPE_DRAM,
+		.start = 0x400000,
+		.len = 0x50000,
+		.name = "DRAM",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+	{
+		.type = ATH10K_MEM_REGION_TYPE_REG,
+		.start = 0x4000,
+		.len = 0x2000,
+		.name = "REG_PART1",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+	{
+		.type = ATH10K_MEM_REGION_TYPE_REG,
+		.start = 0x8000,
+		.len = 0x58000,
+		.name = "REG_PART2",
+		.section_table = {
+			.sections = NULL,
+			.size = 0,
+		},
+	},
+};
+
+static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
+	{
+		.hw_id = QCA6174_HW_1_0_VERSION,
+		.region_table = {
+			.regions = qca6174_hw10_mem_regions,
+			.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+		},
+	},
+	{
+		.hw_id = QCA6174_HW_1_1_VERSION,
+		.region_table = {
+			.regions = qca6174_hw10_mem_regions,
+			.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+		},
+	},
+	{
+		.hw_id = QCA6174_HW_1_3_VERSION,
+		.region_table = {
+			.regions = qca6174_hw10_mem_regions,
+			.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+		},
+	},
+	{
+		.hw_id = QCA6174_HW_2_1_VERSION,
+		.region_table = {
+			.regions = qca6174_hw21_mem_regions,
+			.size = ARRAY_SIZE(qca6174_hw21_mem_regions),
+		},
+	},
+	{
+		.hw_id = QCA6174_HW_3_0_VERSION,
+		.region_table = {
+			.regions = qca6174_hw30_mem_regions,
+			.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+		},
+	},
+	{
+		.hw_id = QCA6174_HW_3_2_VERSION,
+		.region_table = {
+			.regions = qca6174_hw30_mem_regions,
+			.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+		},
+	},
+	{
+		.hw_id = QCA9377_HW_1_1_DEV_VERSION,
+		.region_table = {
+			.regions = qca6174_hw30_mem_regions,
+			.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+		},
+	},
+	{
+		.hw_id = QCA988X_HW_2_0_VERSION,
+		.region_table = {
+			.regions = qca988x_hw20_mem_regions,
+			.size = ARRAY_SIZE(qca988x_hw20_mem_regions),
+		},
+	},
+};
+
+static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar)
+{
+	const struct ath10k_hw_mem_layout *hw;
+	const struct ath10k_mem_region *mem_region;
+	size_t size = 0;
+	int i;
+
+	hw = ath10k_coredump_get_mem_layout(ar);
+
+	if (!hw)
+		return 0;
+
+	mem_region = &hw->region_table.regions[0];
+
+	for (i = 0; i < hw->region_table.size; i++) {
+		size += mem_region->len;
+		mem_region++;
+	}
+
+	/* reserve space for the headers */
+	size += hw->region_table.size * sizeof(struct ath10k_dump_ram_data_hdr);
+
+	/* make sure it is aligned 16 bytes for debug message print out */
+	size = ALIGN(size, 16);
+
+	return size;
+}
+
+const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+	int i;
+
+	if (!test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
+		return NULL;
+
+	if (WARN_ON(ar->target_version == 0))
+		return NULL;
+
+	for (i = 0; i < ARRAY_SIZE(hw_mem_layouts); i++) {
+		if (ar->target_version == hw_mem_layouts[i].hw_id)
+			return &hw_mem_layouts[i];
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(ath10k_coredump_get_mem_layout);
+
+struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
+{
+	struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	if (ath10k_coredump_mask == 0)
+		/* coredump disabled */
+		return NULL;
+
+	guid_gen(&crash_data->guid);
+	ktime_get_real_ts64(&crash_data->timestamp);
+
+	return crash_data;
+}
+EXPORT_SYMBOL(ath10k_coredump_new);
+
+static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
+{
+	struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+	struct ath10k_ce_crash_hdr *ce_hdr;
+	struct ath10k_dump_file_data *dump_data;
+	struct ath10k_tlv_dump_data *dump_tlv;
+	size_t hdr_len = sizeof(*dump_data);
+	size_t len, sofar = 0;
+	unsigned char *buf;
+
+	len = hdr_len;
+
+	if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask))
+		len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+
+	if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask))
+		len += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+			CE_COUNT * sizeof(ce_hdr->entries[0]);
+
+	if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
+		len += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+
+	sofar += hdr_len;
+
+	/* This is going to get big when we start dumping FW RAM and such,
+	 * so go ahead and use vmalloc.
+	 */
+	buf = vzalloc(len);
+	if (!buf)
+		return NULL;
+
+	spin_lock_bh(&ar->data_lock);
+
+	dump_data = (struct ath10k_dump_file_data *)(buf);
+	strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
+		sizeof(dump_data->df_magic));
+	dump_data->len = cpu_to_le32(len);
+
+	dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
+
+	guid_copy(&dump_data->guid, &crash_data->guid);
+	dump_data->chip_id = cpu_to_le32(ar->chip_id);
+	dump_data->bus_type = cpu_to_le32(0);
+	dump_data->target_version = cpu_to_le32(ar->target_version);
+	dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
+	dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
+	dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
+	dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
+	dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
+	dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
+	dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
+	dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
+	dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
+	dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
+
+	strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
+		sizeof(dump_data->fw_ver));
+
+	dump_data->kernel_ver_code = 0;
+	strlcpy(dump_data->kernel_ver, init_utsname()->release,
+		sizeof(dump_data->kernel_ver));
+
+	dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
+	dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
+
+	if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask)) {
+		dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+		dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
+		dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
+		memcpy(dump_tlv->tlv_data, &crash_data->registers,
+		       sizeof(crash_data->registers));
+		sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+	}
+
+	if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask)) {
+		dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+		dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA);
+		dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) +
+						CE_COUNT * sizeof(ce_hdr->entries[0]));
+		ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data);
+		ce_hdr->ce_count = cpu_to_le32(CE_COUNT);
+		memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved));
+		memcpy(ce_hdr->entries, crash_data->ce_crash_data,
+		       CE_COUNT * sizeof(ce_hdr->entries[0]));
+		sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+			CE_COUNT * sizeof(ce_hdr->entries[0]);
+	}
+
+	/* Gather ram dump */
+	if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
+		dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+		dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA);
+		dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len);
+		memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf,
+		       crash_data->ramdump_buf_len);
+		sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+	}
+
+	spin_unlock_bh(&ar->data_lock);
+
+	return dump_data;
+}
+
+int ath10k_coredump_submit(struct ath10k *ar)
+{
+	struct ath10k_dump_file_data *dump;
+
+	if (ath10k_coredump_mask == 0)
+		/* coredump disabled */
+		return 0;
+
+	dump = ath10k_coredump_build(ar);
+	if (!dump) {
+		ath10k_warn(ar, "no crash dump data found for devcoredump");
+		return -ENODATA;
+	}
+
+	dev_coredumpv(ar->dev, dump, le32_to_cpu(dump->len), GFP_KERNEL);
+
+	return 0;
+}
+
+int ath10k_coredump_create(struct ath10k *ar)
+{
+	if (ath10k_coredump_mask == 0)
+		/* coredump disabled */
+		return 0;
+
+	ar->coredump.fw_crash_data = vzalloc(sizeof(*ar->coredump.fw_crash_data));
+	if (!ar->coredump.fw_crash_data)
+		return -ENOMEM;
+
+	return 0;
+}
+
+int ath10k_coredump_register(struct ath10k *ar)
+{
+	struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+	if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
+		crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar);
+
+		crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len);
+		if (!crash_data->ramdump_buf)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+void ath10k_coredump_unregister(struct ath10k *ar)
+{
+	struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+	vfree(crash_data->ramdump_buf);
+}
+
+void ath10k_coredump_destroy(struct ath10k *ar)
+{
+	if (ar->coredump.fw_crash_data->ramdump_buf) {
+		vfree(ar->coredump.fw_crash_data->ramdump_buf);
+		ar->coredump.fw_crash_data->ramdump_buf = NULL;
+		ar->coredump.fw_crash_data->ramdump_buf_len = 0;
+	}
+
+	vfree(ar->coredump.fw_crash_data);
+	ar->coredump.fw_crash_data = NULL;
+}

+ 225 - 0
drivers/net/wireless/ath/ath10k/coredump.h

@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _COREDUMP_H_
+#define _COREDUMP_H_
+
+#include "core.h"
+
+#define ATH10K_FW_CRASH_DUMP_VERSION 1
+
+/**
+ * enum ath10k_fw_crash_dump_type - types of data in the dump file
+ * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
+ */
+enum ath10k_fw_crash_dump_type {
+	ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
+	ATH10K_FW_CRASH_DUMP_CE_DATA = 1,
+
+	/* contains multiple struct ath10k_dump_ram_data_hdr */
+	ATH10K_FW_CRASH_DUMP_RAM_DATA = 2,
+
+	ATH10K_FW_CRASH_DUMP_MAX,
+};
+
+struct ath10k_tlv_dump_data {
+	/* see ath10k_fw_crash_dump_type above */
+	__le32 type;
+
+	/* in bytes */
+	__le32 tlv_len;
+
+	/* pad to 32-bit boundaries as needed */
+	u8 tlv_data[];
+} __packed;
+
+struct ath10k_dump_file_data {
+	/* dump file information */
+
+	/* "ATH10K-FW-DUMP" */
+	char df_magic[16];
+
+	__le32 len;
+
+	/* file dump version */
+	__le32 version;
+
+	/* some info we can get from ath10k struct that might help */
+
+	guid_t guid;
+
+	__le32 chip_id;
+
+	/* 0 for now, in place for later hardware */
+	__le32 bus_type;
+
+	__le32 target_version;
+	__le32 fw_version_major;
+	__le32 fw_version_minor;
+	__le32 fw_version_release;
+	__le32 fw_version_build;
+	__le32 phy_capability;
+	__le32 hw_min_tx_power;
+	__le32 hw_max_tx_power;
+	__le32 ht_cap_info;
+	__le32 vht_cap_info;
+	__le32 num_rf_chains;
+
+	/* firmware version string */
+	char fw_ver[ETHTOOL_FWVERS_LEN];
+
+	/* Kernel related information */
+
+	/* time-of-day stamp */
+	__le64 tv_sec;
+
+	/* time-of-day stamp, nano-seconds */
+	__le64 tv_nsec;
+
+	/* LINUX_VERSION_CODE */
+	__le32 kernel_ver_code;
+
+	/* VERMAGIC_STRING */
+	char kernel_ver[64];
+
+	/* room for growth w/out changing binary format */
+	u8 unused[128];
+
+	/* struct ath10k_tlv_dump_data + more */
+	u8 data[0];
+} __packed;
+
+struct ath10k_dump_ram_data_hdr {
+	/* enum ath10k_mem_region_type */
+	__le32 region_type;
+
+	__le32 start;
+
+	/* length of payload data, not including this header */
+	__le32 length;
+
+	u8 data[0];
+};
+
+/* magic number to fill the holes not copied due to sections in regions */
+#define ATH10K_MAGIC_NOT_COPIED		0xAA
+
+/* part of user space ABI */
+enum ath10k_mem_region_type {
+	ATH10K_MEM_REGION_TYPE_REG	= 1,
+	ATH10K_MEM_REGION_TYPE_DRAM	= 2,
+	ATH10K_MEM_REGION_TYPE_AXI	= 3,
+	ATH10K_MEM_REGION_TYPE_IRAM1	= 4,
+	ATH10K_MEM_REGION_TYPE_IRAM2	= 5,
+};
+
+/* Define a section of the region which should be copied. As not all parts
+ * of the memory is possible to copy, for example some of the registers can
+ * be like that, sections can be used to define what is safe to copy.
+ *
+ * To minimize the size of the array, the list must obey the format:
+ * '{start0,stop0},{start1,stop1},{start2,stop2}....' The values below must
+ * also obey to 'start0 < stop0 < start1 < stop1 < start2 < ...', otherwise
+ * we may encouter error in the dump processing.
+ */
+struct ath10k_mem_section {
+	u32 start;
+	u32 end;
+};
+
+/* One region of a memory layout. If the sections field is null entire
+ * region is copied. If sections is non-null only the areas specified in
+ * sections are copied and rest of the areas are filled with
+ * ATH10K_MAGIC_NOT_COPIED.
+ */
+struct ath10k_mem_region {
+	enum ath10k_mem_region_type type;
+	u32 start;
+	u32 len;
+
+	const char *name;
+
+	struct {
+		const struct ath10k_mem_section *sections;
+		u32 size;
+	} section_table;
+};
+
+/* Contains the memory layout of a hardware version identified with the
+ * hardware id, split into regions.
+ */
+struct ath10k_hw_mem_layout {
+	u32 hw_id;
+
+	struct {
+		const struct ath10k_mem_region *regions;
+		int size;
+	} region_table;
+};
+
+/* FIXME: where to put this? */
+extern unsigned long ath10k_coredump_mask;
+
+#ifdef CONFIG_DEV_COREDUMP
+
+int ath10k_coredump_submit(struct ath10k *ar);
+struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar);
+int ath10k_coredump_create(struct ath10k *ar);
+int ath10k_coredump_register(struct ath10k *ar);
+void ath10k_coredump_unregister(struct ath10k *ar);
+void ath10k_coredump_destroy(struct ath10k *ar);
+
+const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar);
+
+#else /* CONFIG_DEV_COREDUMP */
+
+static inline int ath10k_coredump_submit(struct ath10k *ar)
+{
+	return 0;
+}
+
+static inline struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
+{
+	return NULL;
+}
+
+static inline int ath10k_coredump_create(struct ath10k *ar)
+{
+	return 0;
+}
+
+static inline int ath10k_coredump_register(struct ath10k *ar)
+{
+	return 0;
+}
+
+static inline void ath10k_coredump_unregister(struct ath10k *ar)
+{
+}
+
+static inline void ath10k_coredump_destroy(struct ath10k *ar)
+{
+}
+
+static inline const struct ath10k_hw_mem_layout *
+ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+	return NULL;
+}
+
+#endif /* CONFIG_DEV_COREDUMP */
+
+#endif /* _COREDUMP_H_ */

+ 1 - 276
drivers/net/wireless/ath/ath10k/debug.c

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -18,10 +18,8 @@
 #include <linux/module.h>
 #include <linux/debugfs.h>
 #include <linux/vmalloc.h>
-#include <linux/utsname.h>
 #include <linux/crc32.h>
 #include <linux/firmware.h>
-#include <linux/devcoredump.h>
 
 #include "core.h"
 #include "debug.h"
@@ -33,86 +31,6 @@
 
 #define ATH10K_DEBUG_CAL_DATA_LEN 12064
 
-#define ATH10K_FW_CRASH_DUMP_VERSION 1
-
-/**
- * enum ath10k_fw_crash_dump_type - types of data in the dump file
- * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
- */
-enum ath10k_fw_crash_dump_type {
-	ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
-	ATH10K_FW_CRASH_DUMP_CE_DATA = 1,
-
-	ATH10K_FW_CRASH_DUMP_MAX,
-};
-
-struct ath10k_tlv_dump_data {
-	/* see ath10k_fw_crash_dump_type above */
-	__le32 type;
-
-	/* in bytes */
-	__le32 tlv_len;
-
-	/* pad to 32-bit boundaries as needed */
-	u8 tlv_data[];
-} __packed;
-
-struct ath10k_dump_file_data {
-	/* dump file information */
-
-	/* "ATH10K-FW-DUMP" */
-	char df_magic[16];
-
-	__le32 len;
-
-	/* file dump version */
-	__le32 version;
-
-	/* some info we can get from ath10k struct that might help */
-
-	guid_t guid;
-
-	__le32 chip_id;
-
-	/* 0 for now, in place for later hardware */
-	__le32 bus_type;
-
-	__le32 target_version;
-	__le32 fw_version_major;
-	__le32 fw_version_minor;
-	__le32 fw_version_release;
-	__le32 fw_version_build;
-	__le32 phy_capability;
-	__le32 hw_min_tx_power;
-	__le32 hw_max_tx_power;
-	__le32 ht_cap_info;
-	__le32 vht_cap_info;
-	__le32 num_rf_chains;
-
-	/* firmware version string */
-	char fw_ver[ETHTOOL_FWVERS_LEN];
-
-	/* Kernel related information */
-
-	/* time-of-day stamp */
-	__le64 tv_sec;
-
-	/* time-of-day stamp, nano-seconds */
-	__le64 tv_nsec;
-
-	/* LINUX_VERSION_CODE */
-	__le32 kernel_ver_code;
-
-	/* VERMAGIC_STRING */
-	char kernel_ver[64];
-
-	/* room for growth w/out changing binary format */
-	u8 unused[128];
-
-	/* struct ath10k_tlv_dump_data + more */
-	u8 data[0];
-} __packed;
-
 void ath10k_info(struct ath10k *ar, const char *fmt, ...)
 {
 	struct va_format vaf = {
@@ -711,189 +629,6 @@ static const struct file_operations fops_chip_id = {
 	.llseek = default_llseek,
 };
 
-struct ath10k_fw_crash_data *
-ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
-{
-	struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
-
-	lockdep_assert_held(&ar->data_lock);
-
-	crash_data->crashed_since_read = true;
-	guid_gen(&crash_data->guid);
-	ktime_get_real_ts64(&crash_data->timestamp);
-
-	return crash_data;
-}
-EXPORT_SYMBOL(ath10k_debug_get_new_fw_crash_data);
-
-static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar,
-							    bool mark_read)
-{
-	struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
-	struct ath10k_ce_crash_hdr *ce_hdr;
-	struct ath10k_dump_file_data *dump_data;
-	struct ath10k_tlv_dump_data *dump_tlv;
-	size_t hdr_len = sizeof(*dump_data);
-	size_t len, sofar = 0;
-	unsigned char *buf;
-
-	len = hdr_len;
-	len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
-	len += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
-		CE_COUNT * sizeof(ce_hdr->entries[0]);
-
-	sofar += hdr_len;
-
-	/* This is going to get big when we start dumping FW RAM and such,
-	 * so go ahead and use vmalloc.
-	 */
-	buf = vzalloc(len);
-	if (!buf)
-		return NULL;
-
-	spin_lock_bh(&ar->data_lock);
-
-	if (!crash_data->crashed_since_read) {
-		spin_unlock_bh(&ar->data_lock);
-		vfree(buf);
-		return NULL;
-	}
-
-	dump_data = (struct ath10k_dump_file_data *)(buf);
-	strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
-		sizeof(dump_data->df_magic));
-	dump_data->len = cpu_to_le32(len);
-
-	dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
-
-	guid_copy(&dump_data->guid, &crash_data->guid);
-	dump_data->chip_id = cpu_to_le32(ar->chip_id);
-	dump_data->bus_type = cpu_to_le32(0);
-	dump_data->target_version = cpu_to_le32(ar->target_version);
-	dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
-	dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
-	dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
-	dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
-	dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
-	dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
-	dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
-	dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
-	dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
-	dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
-
-	strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
-		sizeof(dump_data->fw_ver));
-
-	dump_data->kernel_ver_code = 0;
-	strlcpy(dump_data->kernel_ver, init_utsname()->release,
-		sizeof(dump_data->kernel_ver));
-
-	dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
-	dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
-
-	/* Gather crash-dump */
-	dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
-	dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
-	dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
-	memcpy(dump_tlv->tlv_data, &crash_data->registers,
-	       sizeof(crash_data->registers));
-	sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
-
-	dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
-	dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA);
-	dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) +
-					CE_COUNT * sizeof(ce_hdr->entries[0]));
-	ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data);
-	ce_hdr->ce_count = cpu_to_le32(CE_COUNT);
-	memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved));
-	memcpy(ce_hdr->entries, crash_data->ce_crash_data,
-	       CE_COUNT * sizeof(ce_hdr->entries[0]));
-	sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
-		 CE_COUNT * sizeof(ce_hdr->entries[0]);
-
-	ar->debug.fw_crash_data->crashed_since_read = !mark_read;
-
-	spin_unlock_bh(&ar->data_lock);
-
-	return dump_data;
-}
-
-int ath10k_debug_fw_devcoredump(struct ath10k *ar)
-{
-	struct ath10k_dump_file_data *dump;
-	void *dump_ptr;
-	u32 dump_len;
-
-	/* To keep the dump file available also for debugfs don't mark the
-	 * file read, only debugfs should do that.
-	 */
-	dump = ath10k_build_dump_file(ar, false);
-	if (!dump) {
-		ath10k_warn(ar, "no crash dump data found for devcoredump");
-		return -ENODATA;
-	}
-
-	/* Make a copy of the dump file for dev_coredumpv() as during the
-	 * transition period we need to own the original file. Once
-	 * fw_crash_dump debugfs file is removed no need to have a copy
-	 * anymore.
-	 */
-	dump_len = le32_to_cpu(dump->len);
-	dump_ptr = vzalloc(dump_len);
-
-	if (!dump_ptr)
-		return -ENOMEM;
-
-	memcpy(dump_ptr, dump, dump_len);
-
-	dev_coredumpv(ar->dev, dump_ptr, dump_len, GFP_KERNEL);
-
-	return 0;
-}
-
-static int ath10k_fw_crash_dump_open(struct inode *inode, struct file *file)
-{
-	struct ath10k *ar = inode->i_private;
-	struct ath10k_dump_file_data *dump;
-
-	ath10k_warn(ar, "fw_crash_dump debugfs file is deprecated, please use /sys/class/devcoredump instead.");
-
-	dump = ath10k_build_dump_file(ar, true);
-	if (!dump)
-		return -ENODATA;
-
-	file->private_data = dump;
-
-	return 0;
-}
-
-static ssize_t ath10k_fw_crash_dump_read(struct file *file,
-					 char __user *user_buf,
-					 size_t count, loff_t *ppos)
-{
-	struct ath10k_dump_file_data *dump_file = file->private_data;
-
-	return simple_read_from_buffer(user_buf, count, ppos,
-				       dump_file,
-				       le32_to_cpu(dump_file->len));
-}
-
-static int ath10k_fw_crash_dump_release(struct inode *inode,
-					struct file *file)
-{
-	vfree(file->private_data);
-
-	return 0;
-}
-
-static const struct file_operations fops_fw_crash_dump = {
-	.open = ath10k_fw_crash_dump_open,
-	.read = ath10k_fw_crash_dump_read,
-	.release = ath10k_fw_crash_dump_release,
-	.owner = THIS_MODULE,
-	.llseek = default_llseek,
-};
-
 static ssize_t ath10k_reg_addr_read(struct file *file,
 				    char __user *user_buf,
 				    size_t count, loff_t *ppos)
@@ -2402,10 +2137,6 @@ static const struct file_operations fops_fw_checksums = {
 
 int ath10k_debug_create(struct ath10k *ar)
 {
-	ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
-	if (!ar->debug.fw_crash_data)
-		return -ENOMEM;
-
 	ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
 	if (!ar->debug.cal_data)
 		return -ENOMEM;
@@ -2420,9 +2151,6 @@ int ath10k_debug_create(struct ath10k *ar)
 
 void ath10k_debug_destroy(struct ath10k *ar)
 {
-	vfree(ar->debug.fw_crash_data);
-	ar->debug.fw_crash_data = NULL;
-
 	vfree(ar->debug.cal_data);
 	ar->debug.cal_data = NULL;
 
@@ -2460,9 +2188,6 @@ int ath10k_debug_register(struct ath10k *ar)
 	debugfs_create_file("simulate_fw_crash", 0600, ar->debug.debugfs_phy, ar,
 			    &fops_simulate_fw_crash);
 
-	debugfs_create_file("fw_crash_dump", 0400, ar->debug.debugfs_phy, ar,
-			    &fops_fw_crash_dump);
-
 	debugfs_create_file("reg_addr", 0600, ar->debug.debugfs_phy, ar,
 			    &fops_reg_addr);
 

+ 2 - 17
drivers/net/wireless/ath/ath10k/debug.h

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -42,6 +42,7 @@ enum ath10k_debug_mask {
 	ATH10K_DBG_SDIO_DUMP	= 0x00020000,
 	ATH10K_DBG_USB		= 0x00040000,
 	ATH10K_DBG_USB_BULK	= 0x00080000,
+	ATH10K_DBG_SNOC		= 0x00100000,
 	ATH10K_DBG_ANY		= 0xffffffff,
 };
 
@@ -100,13 +101,8 @@ void ath10k_debug_unregister(struct ath10k *ar);
 void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_debug_tpc_stats_process(struct ath10k *ar,
 				    struct ath10k_tpc_stats *tpc_stats);
-struct ath10k_fw_crash_data *
-ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
-
 void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
 
-int ath10k_debug_fw_devcoredump(struct ath10k *ar);
-
 #define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
 
 void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
@@ -173,12 +169,6 @@ static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
 {
 }
 
-static inline struct ath10k_fw_crash_data *
-ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
-{
-	return NULL;
-}
-
 static inline u64 ath10k_debug_get_fw_dbglog_mask(struct ath10k *ar)
 {
 	return 0;
@@ -189,11 +179,6 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
 	return 0;
 }
 
-static inline int ath10k_debug_fw_devcoredump(struct ath10k *ar)
-{
-	return 0;
-}
-
 #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
 
 #define ath10k_debug_get_et_strings NULL

+ 1 - 1
drivers/net/wireless/ath/ath10k/debugfs_sta.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
drivers/net/wireless/ath/ath10k/hif.h

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
drivers/net/wireless/ath/ath10k/htc.c

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
drivers/net/wireless/ath/ath10k/htc.h

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 6 - 3
drivers/net/wireless/ath/ath10k/htt.c

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -207,6 +207,9 @@ int ath10k_htt_init(struct ath10k *ar)
 		WARN_ON(1);
 		return -EINVAL;
 	}
+	ath10k_htt_set_tx_ops(htt);
+	ath10k_htt_set_rx_ops(htt);
+
 	return 0;
 }
 
@@ -254,11 +257,11 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
 		return status;
 	}
 
-	status = ath10k_htt_send_frag_desc_bank_cfg(htt);
+	status = htt->tx_ops->htt_send_frag_desc_bank_cfg(htt);
 	if (status)
 		return status;
 
-	status = ath10k_htt_send_rx_ring_cfg_ll(htt);
+	status = htt->tx_ops->htt_send_rx_ring_cfg(htt);
 	if (status) {
 		ath10k_warn(ar, "failed to setup rx ring: %d\n",
 			    status);

+ 127 - 18
drivers/net/wireless/ath/ath10k/htt.h

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -107,6 +107,14 @@ struct htt_msdu_ext_desc {
 	struct htt_data_tx_desc_frag frags[6];
 };
 
+struct htt_msdu_ext_desc_64 {
+	__le32 tso_flag[5];
+	__le16 ip_identification;
+	u8 flags;
+	u8 reserved;
+	struct htt_data_tx_desc_frag frags[6];
+};
+
 #define	HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE		BIT(0)
 #define	HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE	BIT(1)
 #define	HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE	BIT(2)
@@ -179,6 +187,22 @@ struct htt_data_tx_desc {
 	u8 prefetch[0]; /* start of frame, for FW classification engine */
 } __packed;
 
+struct htt_data_tx_desc_64 {
+	u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
+	__le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
+	__le16 len;
+	__le16 id;
+	__le64 frags_paddr;
+	union {
+		__le32 peerid;
+		struct {
+			__le16 peerid;
+			__le16 freq;
+		} __packed offchan_tx;
+	} __packed;
+	u8 prefetch[0]; /* start of frame, for FW classification engine */
+} __packed;
+
 enum htt_rx_ring_flags {
 	HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
 	HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
@@ -200,8 +224,11 @@ enum htt_rx_ring_flags {
 
 #define HTT_RX_RING_SIZE_MIN 128
 #define HTT_RX_RING_SIZE_MAX 2048
+#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
+#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
+#define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1)
 
-struct htt_rx_ring_setup_ring {
+struct htt_rx_ring_setup_ring32 {
 	__le32 fw_idx_shadow_reg_paddr;
 	__le32 rx_ring_base_paddr;
 	__le16 rx_ring_len; /* in 4-byte words */
@@ -222,14 +249,40 @@ struct htt_rx_ring_setup_ring {
 	__le16 frag_info_offset;
 } __packed;
 
+struct htt_rx_ring_setup_ring64 {
+	__le64 fw_idx_shadow_reg_paddr;
+	__le64 rx_ring_base_paddr;
+	__le16 rx_ring_len; /* in 4-byte words */
+	__le16 rx_ring_bufsize; /* rx skb size - in bytes */
+	__le16 flags; /* %HTT_RX_RING_FLAGS_ */
+	__le16 fw_idx_init_val;
+
+	/* the following offsets are in 4-byte units */
+	__le16 mac80211_hdr_offset;
+	__le16 msdu_payload_offset;
+	__le16 ppdu_start_offset;
+	__le16 ppdu_end_offset;
+	__le16 mpdu_start_offset;
+	__le16 mpdu_end_offset;
+	__le16 msdu_start_offset;
+	__le16 msdu_end_offset;
+	__le16 rx_attention_offset;
+	__le16 frag_info_offset;
+} __packed;
+
 struct htt_rx_ring_setup_hdr {
 	u8 num_rings; /* supported values: 1, 2 */
 	__le16 rsvd0;
 } __packed;
 
-struct htt_rx_ring_setup {
+struct htt_rx_ring_setup_32 {
+	struct htt_rx_ring_setup_hdr hdr;
+	struct htt_rx_ring_setup_ring32 rings[0];
+} __packed;
+
+struct htt_rx_ring_setup_64 {
 	struct htt_rx_ring_setup_hdr hdr;
-	struct htt_rx_ring_setup_ring rings[0];
+	struct htt_rx_ring_setup_ring64 rings[0];
 } __packed;
 
 /*
@@ -855,13 +908,23 @@ struct htt_rx_in_ord_msdu_desc {
 	u8 reserved;
 } __packed;
 
+struct htt_rx_in_ord_msdu_desc_ext {
+	__le64 msdu_paddr;
+	__le16 msdu_len;
+	u8 fw_desc;
+	u8 reserved;
+} __packed;
+
 struct htt_rx_in_ord_ind {
 	u8 info;
 	__le16 peer_id;
 	u8 vdev_id;
 	u8 reserved;
 	__le16 msdu_count;
-	struct htt_rx_in_ord_msdu_desc msdu_descs[0];
+	union {
+		struct htt_rx_in_ord_msdu_desc msdu_descs32[0];
+		struct htt_rx_in_ord_msdu_desc_ext msdu_descs64[0];
+	} __packed;
 } __packed;
 
 #define HTT_RX_IN_ORD_IND_INFO_TID_MASK		0x0000001f
@@ -1351,7 +1414,7 @@ struct htt_q_state_conf {
 	u8 pad[2];
 } __packed;
 
-struct htt_frag_desc_bank_cfg {
+struct htt_frag_desc_bank_cfg32 {
 	u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
 	u8 num_banks;
 	u8 desc_size;
@@ -1360,6 +1423,15 @@ struct htt_frag_desc_bank_cfg {
 	struct htt_q_state_conf q_state;
 } __packed;
 
+struct htt_frag_desc_bank_cfg64 {
+	u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
+	u8 num_banks;
+	u8 desc_size;
+	__le64 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
+	struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
+	struct htt_q_state_conf q_state;
+} __packed;
+
 #define HTT_TX_Q_STATE_ENTRY_COEFFICIENT	128
 #define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK	0x3f
 #define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB		0
@@ -1531,11 +1603,13 @@ struct htt_cmd {
 		struct htt_ver_req ver_req;
 		struct htt_mgmt_tx_desc mgmt_tx;
 		struct htt_data_tx_desc data_tx;
-		struct htt_rx_ring_setup rx_setup;
+		struct htt_rx_ring_setup_32 rx_setup_32;
+		struct htt_rx_ring_setup_64 rx_setup_64;
 		struct htt_stats_req stats_req;
 		struct htt_oob_sync_req oob_sync_req;
 		struct htt_aggr_conf aggr_conf;
-		struct htt_frag_desc_bank_cfg frag_desc_bank_cfg;
+		struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32;
+		struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64;
 		struct htt_tx_fetch_resp tx_fetch_resp;
 	};
 } __packed;
@@ -1593,13 +1667,20 @@ struct htt_peer_unmap_event {
 	u16 peer_id;
 };
 
-struct ath10k_htt_txbuf {
+struct ath10k_htt_txbuf_32 {
 	struct htt_data_tx_desc_frag frags[2];
 	struct ath10k_htc_hdr htc_hdr;
 	struct htt_cmd_hdr cmd_hdr;
 	struct htt_data_tx_desc cmd_tx;
 } __packed;
 
+struct ath10k_htt_txbuf_64 {
+	struct htt_data_tx_desc_frag frags[2];
+	struct ath10k_htc_hdr htc_hdr;
+	struct htt_cmd_hdr cmd_hdr;
+	struct htt_data_tx_desc_64 cmd_tx;
+} __packed;
+
 struct ath10k_htt {
 	struct ath10k *ar;
 	enum ath10k_htc_ep_id eid;
@@ -1644,7 +1725,10 @@ struct ath10k_htt {
 		 * rx buffers the host SW provides for the MAC HW to
 		 * fill.
 		 */
-		__le32 *paddrs_ring;
+		union {
+			__le64 *paddrs_ring_64;
+			__le32 *paddrs_ring_32;
+		};
 
 		/*
 		 * Base address of ring, as a "physical" device address
@@ -1721,12 +1805,20 @@ struct ath10k_htt {
 
 	struct {
 		dma_addr_t paddr;
-		struct htt_msdu_ext_desc *vaddr;
+		union {
+			struct htt_msdu_ext_desc *vaddr_desc_32;
+			struct htt_msdu_ext_desc_64 *vaddr_desc_64;
+		};
+		size_t size;
 	} frag_desc;
 
 	struct {
 		dma_addr_t paddr;
-		struct ath10k_htt_txbuf *vaddr;
+		union {
+			struct ath10k_htt_txbuf_32 *vaddr_txbuff_32;
+			struct ath10k_htt_txbuf_64 *vaddr_txbuff_64;
+		};
+		size_t size;
 	} txbuf;
 
 	struct {
@@ -1741,8 +1833,29 @@ struct ath10k_htt {
 	} tx_q_state;
 
 	bool tx_mem_allocated;
+	const struct ath10k_htt_tx_ops *tx_ops;
+	const struct ath10k_htt_rx_ops *rx_ops;
 };
 
+struct ath10k_htt_tx_ops {
+	int (*htt_send_rx_ring_cfg)(struct ath10k_htt *htt);
+	int (*htt_send_frag_desc_bank_cfg)(struct ath10k_htt *htt);
+	int (*htt_alloc_frag_desc)(struct ath10k_htt *htt);
+	void (*htt_free_frag_desc)(struct ath10k_htt *htt);
+	int (*htt_tx)(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
+		      struct sk_buff *msdu);
+	int (*htt_alloc_txbuff)(struct ath10k_htt *htt);
+	void (*htt_free_txbuff)(struct ath10k_htt *htt);
+};
+
+struct ath10k_htt_rx_ops {
+	size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt);
+	void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr);
+	void (*htt_set_paddrs_ring)(struct ath10k_htt *htt, dma_addr_t paddr,
+				    int idx);
+	void* (*htt_get_vaddr_ring)(struct ath10k_htt *htt);
+	void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx);
+};
 #define RX_HTT_HDR_STATUS_LEN 64
 
 /* This structure layout is programmed via rx ring setup
@@ -1820,8 +1933,6 @@ void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
-int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
-int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
 				u8 max_subfrms_ampdu,
 				u8 max_subfrms_amsdu);
@@ -1846,11 +1957,9 @@ int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
-int ath10k_htt_tx(struct ath10k_htt *htt,
-		  enum ath10k_hw_txrx_mode txmode,
-		  struct sk_buff *msdu);
 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
 					     struct sk_buff *skb);
 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
-
+void ath10k_htt_set_tx_ops(struct ath10k_htt *htt);
+void ath10k_htt_set_rx_ops(struct ath10k_htt *htt);
 #endif

+ 157 - 27
drivers/net/wireless/ath/ath10k/htt_rx.c

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -25,9 +25,6 @@
 
 #include <linux/log2.h>
 
-#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
-#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
-
 /* when under memory pressure rx ring refill may fail and needs a retry */
 #define HTT_RX_RING_REFILL_RETRY_MS 50
 
@@ -36,7 +33,7 @@
 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
 
 static struct sk_buff *
-ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
+ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
 {
 	struct ath10k_skb_rxcb *rxcb;
 
@@ -84,6 +81,60 @@ static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
 	       htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
 }
 
+static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
+{
+	return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
+}
+
+static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
+{
+	return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
+}
+
+static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
+					     void *vaddr)
+{
+	htt->rx_ring.paddrs_ring_32 = vaddr;
+}
+
+static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
+					     void *vaddr)
+{
+	htt->rx_ring.paddrs_ring_64 = vaddr;
+}
+
+static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
+					  dma_addr_t paddr, int idx)
+{
+	htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
+}
+
+static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
+					  dma_addr_t paddr, int idx)
+{
+	htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
+}
+
+static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
+{
+	htt->rx_ring.paddrs_ring_32[idx] = 0;
+}
+
+static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
+{
+	htt->rx_ring.paddrs_ring_64[idx] = 0;
+}
+
+static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
+{
+	return (void *)htt->rx_ring.paddrs_ring_32;
+}
+
+static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
+{
+	return (void *)htt->rx_ring.paddrs_ring_64;
+}
+
 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
 {
 	struct htt_rx_desc *rx_desc;
@@ -129,13 +180,13 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
 		rxcb = ATH10K_SKB_RXCB(skb);
 		rxcb->paddr = paddr;
 		htt->rx_ring.netbufs_ring[idx] = skb;
-		htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
+		htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
 		htt->rx_ring.fill_cnt++;
 
 		if (htt->rx_ring.in_ord_rx) {
 			hash_add(htt->rx_ring.skb_table,
 				 &ATH10K_SKB_RXCB(skb)->hlist,
-				 (u32)paddr);
+				 paddr);
 		}
 
 		num--;
@@ -234,9 +285,8 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
 	ath10k_htt_rx_ring_free(htt);
 
 	dma_free_coherent(htt->ar->dev,
-			  (htt->rx_ring.size *
-			   sizeof(htt->rx_ring.paddrs_ring)),
-			  htt->rx_ring.paddrs_ring,
+			  htt->rx_ops->htt_get_rx_ring_size(htt),
+			  htt->rx_ops->htt_get_vaddr_ring(htt),
 			  htt->rx_ring.base_paddr);
 
 	dma_free_coherent(htt->ar->dev,
@@ -263,7 +313,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
 	idx = htt->rx_ring.sw_rd_idx.msdu_payld;
 	msdu = htt->rx_ring.netbufs_ring[idx];
 	htt->rx_ring.netbufs_ring[idx] = NULL;
-	htt->rx_ring.paddrs_ring[idx] = 0;
+	htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
 
 	idx++;
 	idx &= htt->rx_ring.size_mask;
@@ -383,7 +433,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
 }
 
 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
-					       u32 paddr)
+					       u64 paddr)
 {
 	struct ath10k *ar = htt->ar;
 	struct ath10k_skb_rxcb *rxcb;
@@ -408,12 +458,12 @@ static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
 	return msdu;
 }
 
-static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
-					struct htt_rx_in_ord_ind *ev,
-					struct sk_buff_head *list)
+static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
+					  struct htt_rx_in_ord_ind *ev,
+					  struct sk_buff_head *list)
 {
 	struct ath10k *ar = htt->ar;
-	struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
+	struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
 	struct htt_rx_desc *rxd;
 	struct sk_buff *msdu;
 	int msdu_count;
@@ -458,11 +508,60 @@ static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
 	return 0;
 }
 
+static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
+					  struct htt_rx_in_ord_ind *ev,
+					  struct sk_buff_head *list)
+{
+	struct ath10k *ar = htt->ar;
+	struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
+	struct htt_rx_desc *rxd;
+	struct sk_buff *msdu;
+	int msdu_count;
+	bool is_offload;
+	u64 paddr;
+
+	lockdep_assert_held(&htt->rx_ring.lock);
+
+	msdu_count = __le16_to_cpu(ev->msdu_count);
+	is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
+
+	while (msdu_count--) {
+		paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
+		msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
+		if (!msdu) {
+			__skb_queue_purge(list);
+			return -ENOENT;
+		}
+
+		__skb_queue_tail(list, msdu);
+
+		if (!is_offload) {
+			rxd = (void *)msdu->data;
+
+			trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+
+			skb_put(msdu, sizeof(*rxd));
+			skb_pull(msdu, sizeof(*rxd));
+			skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
+
+			if (!(__le32_to_cpu(rxd->attention.flags) &
+			      RX_ATTENTION_FLAGS_MSDU_DONE)) {
+				ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
+				return -EIO;
+			}
+		}
+
+		msdu_desc++;
+	}
+
+	return 0;
+}
+
 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
 	dma_addr_t paddr;
-	void *vaddr;
+	void *vaddr, *vaddr_ring;
 	size_t size;
 	struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
 
@@ -473,7 +572,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
 	 */
 	htt->rx_ring.size = HTT_RX_RING_SIZE;
 	htt->rx_ring.size_mask = htt->rx_ring.size - 1;
-	htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
+	htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
 
 	if (!is_power_of_2(htt->rx_ring.size)) {
 		ath10k_warn(ar, "htt rx ring size is not power of 2\n");
@@ -486,13 +585,13 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
 	if (!htt->rx_ring.netbufs_ring)
 		goto err_netbuf;
 
-	size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
+	size = htt->rx_ops->htt_get_rx_ring_size(htt);
 
-	vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
-	if (!vaddr)
+	vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
+	if (!vaddr_ring)
 		goto err_dma_ring;
 
-	htt->rx_ring.paddrs_ring = vaddr;
+	htt->rx_ops->htt_config_paddrs_ring(htt, vaddr_ring);
 	htt->rx_ring.base_paddr = paddr;
 
 	vaddr = dma_alloc_coherent(htt->ar->dev,
@@ -526,9 +625,8 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
 
 err_dma_idx:
 	dma_free_coherent(htt->ar->dev,
-			  (htt->rx_ring.size *
-			   sizeof(htt->rx_ring.paddrs_ring)),
-			  htt->rx_ring.paddrs_ring,
+			  htt->rx_ops->htt_get_rx_ring_size(htt),
+			  vaddr_ring,
 			  htt->rx_ring.base_paddr);
 err_dma_ring:
 	kfree(htt->rx_ring.netbufs_ring);
@@ -1986,7 +2084,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
 		   "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
 		   vdev_id, peer_id, tid, offload, frag, msdu_count);
 
-	if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
+	if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
 		ath10k_warn(ar, "dropping invalid in order rx indication\n");
 		return -EINVAL;
 	}
@@ -1995,7 +2093,13 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
 	 * extracted and processed.
 	 */
 	__skb_queue_head_init(&list);
-	ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
+	if (ar->hw_params.target_64bit)
+		ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
+						     &list);
+	else
+		ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
+						     &list);
+
 	if (ret < 0) {
 		ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
 		htt->rx_confused = true;
@@ -2795,3 +2899,29 @@ exit:
 	return done;
 }
 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
+
+static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
+	.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
+	.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
+	.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
+	.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
+	.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
+};
+
+static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
+	.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
+	.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
+	.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
+	.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
+	.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
+};
+
+void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+
+	if (ar->hw_params.target_64bit)
+		htt->rx_ops = &htt_rx_ops_64;
+	else
+		htt->rx_ops = &htt_rx_ops_32;
+}

+ 540 - 60
drivers/net/wireless/ath/ath10k/htt_tx.c

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -229,50 +229,91 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
 	idr_remove(&htt->pending_tx, msdu_id);
 }
 
-static void ath10k_htt_tx_free_cont_txbuf(struct ath10k_htt *htt)
+static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
 	size_t size;
 
-	if (!htt->txbuf.vaddr)
+	if (!htt->txbuf.vaddr_txbuff_32)
 		return;
 
-	size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
-	dma_free_coherent(ar->dev, size, htt->txbuf.vaddr, htt->txbuf.paddr);
-	htt->txbuf.vaddr = NULL;
+	size = htt->txbuf.size;
+	dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32,
+			  htt->txbuf.paddr);
+	htt->txbuf.vaddr_txbuff_32 = NULL;
 }
 
-static int ath10k_htt_tx_alloc_cont_txbuf(struct ath10k_htt *htt)
+static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
 	size_t size;
 
-	size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
-	htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, &htt->txbuf.paddr,
-					      GFP_KERNEL);
-	if (!htt->txbuf.vaddr)
+	size = htt->max_num_pending_tx *
+			sizeof(struct ath10k_htt_txbuf_32);
+
+	htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size,
+							&htt->txbuf.paddr,
+							GFP_KERNEL);
+	if (!htt->txbuf.vaddr_txbuff_32)
 		return -ENOMEM;
 
+	htt->txbuf.size = size;
+
 	return 0;
 }
 
-static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt)
+static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt)
 {
+	struct ath10k *ar = htt->ar;
 	size_t size;
 
-	if (!htt->frag_desc.vaddr)
+	if (!htt->txbuf.vaddr_txbuff_64)
 		return;
 
-	size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
+	size = htt->txbuf.size;
+	dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64,
+			  htt->txbuf.paddr);
+	htt->txbuf.vaddr_txbuff_64 = NULL;
+}
+
+static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	size_t size;
+
+	size = htt->max_num_pending_tx *
+			sizeof(struct ath10k_htt_txbuf_64);
+
+	htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size,
+							&htt->txbuf.paddr,
+							GFP_KERNEL);
+	if (!htt->txbuf.vaddr_txbuff_64)
+		return -ENOMEM;
+
+	htt->txbuf.size = size;
+
+	return 0;
+}
+
+static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt)
+{
+	size_t size;
+
+	if (!htt->frag_desc.vaddr_desc_32)
+		return;
+
+	size = htt->max_num_pending_tx *
+			sizeof(struct htt_msdu_ext_desc);
 
 	dma_free_coherent(htt->ar->dev,
 			  size,
-			  htt->frag_desc.vaddr,
+			  htt->frag_desc.vaddr_desc_32,
 			  htt->frag_desc.paddr);
-	htt->frag_desc.vaddr = NULL;
+
+	htt->frag_desc.vaddr_desc_32 = NULL;
 }
 
-static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
+static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
 	size_t size;
@@ -280,12 +321,57 @@ static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
 	if (!ar->hw_params.continuous_frag_desc)
 		return 0;
 
-	size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
-	htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
-						  &htt->frag_desc.paddr,
-						  GFP_KERNEL);
-	if (!htt->frag_desc.vaddr)
+	size = htt->max_num_pending_tx *
+			sizeof(struct htt_msdu_ext_desc);
+	htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size,
+							  &htt->frag_desc.paddr,
+							  GFP_KERNEL);
+	if (!htt->frag_desc.vaddr_desc_32) {
+		ath10k_err(ar, "failed to alloc fragment desc memory\n");
 		return -ENOMEM;
+	}
+	htt->frag_desc.size = size;
+
+	return 0;
+}
+
+static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt)
+{
+	size_t size;
+
+	if (!htt->frag_desc.vaddr_desc_64)
+		return;
+
+	size = htt->max_num_pending_tx *
+			sizeof(struct htt_msdu_ext_desc_64);
+
+	dma_free_coherent(htt->ar->dev,
+			  size,
+			  htt->frag_desc.vaddr_desc_64,
+			  htt->frag_desc.paddr);
+
+	htt->frag_desc.vaddr_desc_64 = NULL;
+}
+
+static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	size_t size;
+
+	if (!ar->hw_params.continuous_frag_desc)
+		return 0;
+
+	size = htt->max_num_pending_tx *
+			sizeof(struct htt_msdu_ext_desc_64);
+
+	htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size,
+							  &htt->frag_desc.paddr,
+							  GFP_KERNEL);
+	if (!htt->frag_desc.vaddr_desc_64) {
+		ath10k_err(ar, "failed to alloc fragment desc memory\n");
+		return -ENOMEM;
+	}
+	htt->frag_desc.size = size;
 
 	return 0;
 }
@@ -357,13 +443,13 @@ static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
 	struct ath10k *ar = htt->ar;
 	int ret;
 
-	ret = ath10k_htt_tx_alloc_cont_txbuf(htt);
+	ret = htt->tx_ops->htt_alloc_txbuff(htt);
 	if (ret) {
 		ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
 		return ret;
 	}
 
-	ret = ath10k_htt_tx_alloc_cont_frag_desc(htt);
+	ret = htt->tx_ops->htt_alloc_frag_desc(htt);
 	if (ret) {
 		ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
 		goto free_txbuf;
@@ -387,10 +473,10 @@ free_txq:
 	ath10k_htt_tx_free_txq(htt);
 
 free_frag_desc:
-	ath10k_htt_tx_free_cont_frag_desc(htt);
+	htt->tx_ops->htt_free_frag_desc(htt);
 
 free_txbuf:
-	ath10k_htt_tx_free_cont_txbuf(htt);
+	htt->tx_ops->htt_free_txbuff(htt);
 
 	return ret;
 }
@@ -444,9 +530,9 @@ void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
 	if (!htt->tx_mem_allocated)
 		return;
 
-	ath10k_htt_tx_free_cont_txbuf(htt);
+	htt->tx_ops->htt_free_txbuff(htt);
 	ath10k_htt_tx_free_txq(htt);
-	ath10k_htt_tx_free_cont_frag_desc(htt);
+	htt->tx_ops->htt_free_frag_desc(htt);
 	ath10k_htt_tx_free_txdone_fifo(htt);
 	htt->tx_mem_allocated = false;
 }
@@ -545,12 +631,12 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
 	return 0;
 }
 
-int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
+static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
 	struct sk_buff *skb;
 	struct htt_cmd *cmd;
-	struct htt_frag_desc_bank_cfg *cfg;
+	struct htt_frag_desc_bank_cfg32 *cfg;
 	int ret, size;
 	u8 info;
 
@@ -562,7 +648,7 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
 		return -EINVAL;
 	}
 
-	size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
+	size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32);
 	skb = ath10k_htc_alloc_skb(ar, size);
 	if (!skb)
 		return -ENOMEM;
@@ -579,7 +665,7 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
 		     ar->running_fw->fw_file.fw_features))
 		info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
 
-	cfg = &cmd->frag_desc_bank_cfg;
+	cfg = &cmd->frag_desc_bank_cfg32;
 	cfg->info = info;
 	cfg->num_banks = 1;
 	cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
@@ -607,12 +693,112 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
 	return 0;
 }
 
-int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
+static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
 	struct sk_buff *skb;
 	struct htt_cmd *cmd;
-	struct htt_rx_ring_setup_ring *ring;
+	struct htt_frag_desc_bank_cfg64 *cfg;
+	int ret, size;
+	u8 info;
+
+	if (!ar->hw_params.continuous_frag_desc)
+		return 0;
+
+	if (!htt->frag_desc.paddr) {
+		ath10k_warn(ar, "invalid frag desc memory\n");
+		return -EINVAL;
+	}
+
+	size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64);
+	skb = ath10k_htc_alloc_skb(ar, size);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, size);
+	cmd = (struct htt_cmd *)skb->data;
+	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
+
+	info = 0;
+	info |= SM(htt->tx_q_state.type,
+		   HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
+
+	if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+		     ar->running_fw->fw_file.fw_features))
+		info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
+
+	cfg = &cmd->frag_desc_bank_cfg64;
+	cfg->info = info;
+	cfg->num_banks = 1;
+	cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64);
+	cfg->bank_base_addrs[0] =  __cpu_to_le64(htt->frag_desc.paddr);
+	cfg->bank_id[0].bank_min_id = 0;
+	cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
+						    1);
+
+	cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
+	cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
+	cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
+	cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
+	cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
+
+	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+	if (ret) {
+		ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
+			    ret);
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring)
+{
+	struct htt_rx_ring_setup_ring32 *ring =
+			(struct htt_rx_ring_setup_ring32 *)rx_ring;
+
+#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+	ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+	ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+	ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+	ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+	ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+	ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+	ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+	ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+	ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+	ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+}
+
+static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring)
+{
+	struct htt_rx_ring_setup_ring64 *ring =
+			(struct htt_rx_ring_setup_ring64 *)rx_ring;
+
+#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+	ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+	ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+	ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+	ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+	ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+	ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+	ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+	ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+	ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+	ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+}
+
+static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	struct sk_buff *skb;
+	struct htt_cmd *cmd;
+	struct htt_rx_ring_setup_ring32 *ring;
 	const int num_rx_ring = 1;
 	u16 flags;
 	u32 fw_idx;
@@ -626,7 +812,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
 	BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
 	BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
 
-	len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
+	len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
 	    + (sizeof(*ring) * num_rx_ring);
 	skb = ath10k_htc_alloc_skb(ar, len);
 	if (!skb)
@@ -635,10 +821,10 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
 	skb_put(skb, len);
 
 	cmd = (struct htt_cmd *)skb->data;
-	ring = &cmd->rx_setup.rings[0];
+	ring = &cmd->rx_setup_32.rings[0];
 
 	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
-	cmd->rx_setup.hdr.num_rings = 1;
+	cmd->rx_setup_32.hdr.num_rings = 1;
 
 	/* FIXME: do we need all of this? */
 	flags = 0;
@@ -669,21 +855,76 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
 	ring->flags = __cpu_to_le16(flags);
 	ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
 
-#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+	ath10k_htt_fill_rx_desc_offset_32(ring);
+	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+	if (ret) {
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
 
-	ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
-	ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
-	ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
-	ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
-	ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
-	ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
-	ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
-	ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
-	ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
-	ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+	return 0;
+}
 
-#undef desc_offset
+static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	struct sk_buff *skb;
+	struct htt_cmd *cmd;
+	struct htt_rx_ring_setup_ring64 *ring;
+	const int num_rx_ring = 1;
+	u16 flags;
+	u32 fw_idx;
+	int len;
+	int ret;
+
+	/* HW expects the buffer to be an integral number of 4-byte
+	 * "words"
+	 */
+	BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
+	BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
+
+	len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr)
+	    + (sizeof(*ring) * num_rx_ring);
+	skb = ath10k_htc_alloc_skb(ar, len);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, len);
+
+	cmd = (struct htt_cmd *)skb->data;
+	ring = &cmd->rx_setup_64.rings[0];
+
+	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
+	cmd->rx_setup_64.hdr.num_rings = 1;
+
+	flags = 0;
+	flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
+	flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
+	flags |= HTT_RX_RING_FLAGS_PPDU_START;
+	flags |= HTT_RX_RING_FLAGS_PPDU_END;
+	flags |= HTT_RX_RING_FLAGS_MPDU_START;
+	flags |= HTT_RX_RING_FLAGS_MPDU_END;
+	flags |= HTT_RX_RING_FLAGS_MSDU_START;
+	flags |= HTT_RX_RING_FLAGS_MSDU_END;
+	flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
+	flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
+	flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
+	flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
+	flags |= HTT_RX_RING_FLAGS_CTRL_RX;
+	flags |= HTT_RX_RING_FLAGS_MGMT_RX;
+	flags |= HTT_RX_RING_FLAGS_NULL_RX;
+	flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
 
+	fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+
+	ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr);
+	ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr);
+	ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
+	ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
+	ring->flags = __cpu_to_le16(flags);
+	ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
+
+	ath10k_htt_fill_rx_desc_offset_64(ring);
 	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
 	if (ret) {
 		dev_kfree_skb_any(skb);
@@ -895,8 +1136,9 @@ err:
 	return res;
 }
 
-int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
-		  struct sk_buff *msdu)
+static int ath10k_htt_tx_32(struct ath10k_htt *htt,
+			    enum ath10k_hw_txrx_mode txmode,
+			    struct sk_buff *msdu)
 {
 	struct ath10k *ar = htt->ar;
 	struct device *dev = ar->dev;
@@ -904,7 +1146,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
 	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
 	struct ath10k_hif_sg_item sg_items[2];
-	struct ath10k_htt_txbuf *txbuf;
+	struct ath10k_htt_txbuf_32 *txbuf;
 	struct htt_data_tx_desc_frag *frags;
 	bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
 	u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
@@ -917,6 +1159,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
 	u32 frags_paddr = 0;
 	u32 txbuf_paddr;
 	struct htt_msdu_ext_desc *ext_desc = NULL;
+	struct htt_msdu_ext_desc *ext_desc_t = NULL;
 
 	spin_lock_bh(&htt->tx_lock);
 	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
@@ -929,9 +1172,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
 	prefetch_len = min(htt->prefetch_len, msdu->len);
 	prefetch_len = roundup(prefetch_len, 4);
 
-	txbuf = &htt->txbuf.vaddr[msdu_id];
+	txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id;
 	txbuf_paddr = htt->txbuf.paddr +
-		      (sizeof(struct ath10k_htt_txbuf) * msdu_id);
+		      (sizeof(struct ath10k_htt_txbuf_32) * msdu_id);
 
 	if ((ieee80211_is_action(hdr->frame_control) ||
 	     ieee80211_is_deauth(hdr->frame_control) ||
@@ -962,11 +1205,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
 		/* pass through */
 	case ATH10K_HW_TXRX_ETHERNET:
 		if (ar->hw_params.continuous_frag_desc) {
-			memset(&htt->frag_desc.vaddr[msdu_id], 0,
+			ext_desc_t = htt->frag_desc.vaddr_desc_32;
+			memset(&ext_desc_t[msdu_id], 0,
 			       sizeof(struct htt_msdu_ext_desc));
 			frags = (struct htt_data_tx_desc_frag *)
-				&htt->frag_desc.vaddr[msdu_id].frags;
-			ext_desc = &htt->frag_desc.vaddr[msdu_id];
+				&ext_desc_t[msdu_id].frags;
+			ext_desc = &ext_desc_t[msdu_id];
 			frags[0].tword_addr.paddr_lo =
 				__cpu_to_le32(skb_cb->paddr);
 			frags[0].tword_addr.paddr_hi = 0;
@@ -1055,9 +1299,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
 
 	trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
 	ath10k_dbg(ar, ATH10K_DBG_HTT,
-		   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
-		   flags0, flags1, msdu->len, msdu_id, frags_paddr,
-		   (u32)skb_cb->paddr, vdev_id, tid, freq);
+		   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
+		   flags0, flags1, msdu->len, msdu_id, &frags_paddr,
+		   &skb_cb->paddr, vdev_id, tid, freq);
 	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
 			msdu->data, msdu->len);
 	trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
@@ -1093,3 +1337,239 @@ err_free_msdu_id:
 err:
 	return res;
 }
+
+static int ath10k_htt_tx_64(struct ath10k_htt *htt,
+			    enum ath10k_hw_txrx_mode txmode,
+			    struct sk_buff *msdu)
+{
+	struct ath10k *ar = htt->ar;
+	struct device *dev = ar->dev;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
+	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+	struct ath10k_hif_sg_item sg_items[2];
+	struct ath10k_htt_txbuf_64 *txbuf;
+	struct htt_data_tx_desc_frag *frags;
+	bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
+	u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
+	u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
+	int prefetch_len;
+	int res;
+	u8 flags0 = 0;
+	u16 msdu_id, flags1 = 0;
+	u16 freq = 0;
+	dma_addr_t frags_paddr = 0;
+	u32 txbuf_paddr;
+	struct htt_msdu_ext_desc_64 *ext_desc = NULL;
+	struct htt_msdu_ext_desc_64 *ext_desc_t = NULL;
+
+	spin_lock_bh(&htt->tx_lock);
+	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+	spin_unlock_bh(&htt->tx_lock);
+	if (res < 0)
+		goto err;
+
+	msdu_id = res;
+
+	prefetch_len = min(htt->prefetch_len, msdu->len);
+	prefetch_len = roundup(prefetch_len, 4);
+
+	txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id;
+	txbuf_paddr = htt->txbuf.paddr +
+		      (sizeof(struct ath10k_htt_txbuf_64) * msdu_id);
+
+	if ((ieee80211_is_action(hdr->frame_control) ||
+	     ieee80211_is_deauth(hdr->frame_control) ||
+	     ieee80211_is_disassoc(hdr->frame_control)) &&
+	     ieee80211_has_protected(hdr->frame_control)) {
+		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+	} else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
+		   txmode == ATH10K_HW_TXRX_RAW &&
+		   ieee80211_has_protected(hdr->frame_control)) {
+		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+	}
+
+	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+				       DMA_TO_DEVICE);
+	res = dma_mapping_error(dev, skb_cb->paddr);
+	if (res) {
+		res = -EIO;
+		goto err_free_msdu_id;
+	}
+
+	if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+		freq = ar->scan.roc_freq;
+
+	switch (txmode) {
+	case ATH10K_HW_TXRX_RAW:
+	case ATH10K_HW_TXRX_NATIVE_WIFI:
+		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+		/* pass through */
+	case ATH10K_HW_TXRX_ETHERNET:
+		if (ar->hw_params.continuous_frag_desc) {
+			ext_desc_t = htt->frag_desc.vaddr_desc_64;
+			memset(&ext_desc_t[msdu_id], 0,
+			       sizeof(struct htt_msdu_ext_desc_64));
+			frags = (struct htt_data_tx_desc_frag *)
+				&ext_desc_t[msdu_id].frags;
+			ext_desc = &ext_desc_t[msdu_id];
+			frags[0].tword_addr.paddr_lo =
+				__cpu_to_le32(skb_cb->paddr);
+			frags[0].tword_addr.paddr_hi =
+				__cpu_to_le16(upper_32_bits(skb_cb->paddr));
+			frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+
+			frags_paddr =  htt->frag_desc.paddr +
+			   (sizeof(struct htt_msdu_ext_desc_64) * msdu_id);
+		} else {
+			frags = txbuf->frags;
+			frags[0].tword_addr.paddr_lo =
+						__cpu_to_le32(skb_cb->paddr);
+			frags[0].tword_addr.paddr_hi =
+				__cpu_to_le16(upper_32_bits(skb_cb->paddr));
+			frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+			frags[1].tword_addr.paddr_lo = 0;
+			frags[1].tword_addr.paddr_hi = 0;
+			frags[1].tword_addr.len_16 = 0;
+		}
+		flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+		break;
+	case ATH10K_HW_TXRX_MGMT:
+		flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+
+		frags_paddr = skb_cb->paddr;
+		break;
+	}
+
+	/* Normally all commands go through HTC which manages tx credits for
+	 * each endpoint and notifies when tx is completed.
+	 *
+	 * HTT endpoint is creditless so there's no need to care about HTC
+	 * flags. In that case it is trivial to fill the HTC header here.
+	 *
+	 * MSDU transmission is considered completed upon HTT event. This
+	 * implies no relevant resources can be freed until after the event is
+	 * received. That's why HTC tx completion handler itself is ignored by
+	 * setting NULL to transfer_context for all sg items.
+	 *
+	 * There is simply no point in pushing HTT TX_FRM through HTC tx path
+	 * as it's a waste of resources. By bypassing HTC it is possible to
+	 * avoid extra memory allocations, compress data structures and thus
+	 * improve performance.
+	 */
+
+	txbuf->htc_hdr.eid = htt->eid;
+	txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
+					   sizeof(txbuf->cmd_tx) +
+					   prefetch_len);
+	txbuf->htc_hdr.flags = 0;
+
+	if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
+		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
+	flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
+	flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+	if (msdu->ip_summed == CHECKSUM_PARTIAL &&
+	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
+		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+		if (ar->hw_params.continuous_frag_desc)
+			ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
+	}
+
+	/* Prevent firmware from sending up tx inspection requests. There's
+	 * nothing ath10k can do with frames requested for inspection so force
+	 * it to simply rely a regular tx completion with discard status.
+	 */
+	flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
+
+	txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+	txbuf->cmd_tx.flags0 = flags0;
+	txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
+	txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
+	txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
+
+	/* fill fragment descriptor */
+	txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr);
+	if (ath10k_mac_tx_frm_has_freq(ar)) {
+		txbuf->cmd_tx.offchan_tx.peerid =
+				__cpu_to_le16(HTT_INVALID_PEERID);
+		txbuf->cmd_tx.offchan_tx.freq =
+				__cpu_to_le16(freq);
+	} else {
+		txbuf->cmd_tx.peerid =
+				__cpu_to_le32(HTT_INVALID_PEERID);
+	}
+
+	trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
+	ath10k_dbg(ar, ATH10K_DBG_HTT,
+		   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
+		   flags0, flags1, msdu->len, msdu_id, &frags_paddr,
+		   &skb_cb->paddr, vdev_id, tid, freq);
+	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
+			msdu->data, msdu->len);
+	trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
+	trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
+
+	sg_items[0].transfer_id = 0;
+	sg_items[0].transfer_context = NULL;
+	sg_items[0].vaddr = &txbuf->htc_hdr;
+	sg_items[0].paddr = txbuf_paddr +
+			    sizeof(txbuf->frags);
+	sg_items[0].len = sizeof(txbuf->htc_hdr) +
+			  sizeof(txbuf->cmd_hdr) +
+			  sizeof(txbuf->cmd_tx);
+
+	sg_items[1].transfer_id = 0;
+	sg_items[1].transfer_context = NULL;
+	sg_items[1].vaddr = msdu->data;
+	sg_items[1].paddr = skb_cb->paddr;
+	sg_items[1].len = prefetch_len;
+
+	res = ath10k_hif_tx_sg(htt->ar,
+			       htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
+			       sg_items, ARRAY_SIZE(sg_items));
+	if (res)
+		goto err_unmap_msdu;
+
+	return 0;
+
+err_unmap_msdu:
+	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+err_free_msdu_id:
+	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+err:
+	return res;
+}
+
+static const struct ath10k_htt_tx_ops htt_tx_ops_32 = {
+	.htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32,
+	.htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
+	.htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32,
+	.htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32,
+	.htt_tx = ath10k_htt_tx_32,
+	.htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32,
+	.htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32,
+};
+
+static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
+	.htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64,
+	.htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64,
+	.htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64,
+	.htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64,
+	.htt_tx = ath10k_htt_tx_64,
+	.htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64,
+	.htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64,
+};
+
+void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+
+	if (ar->hw_params.target_64bit)
+		htt->tx_ops = &htt_tx_ops_64;
+	else
+		htt->tx_ops = &htt_tx_ops_32;
+}

+ 1 - 1
drivers/net/wireless/ath/ath10k/hw.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 8 - 1
drivers/net/wireless/ath/ath10k/hw.h

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -561,6 +561,12 @@ struct ath10k_hw_params {
 	u32 num_peers;
 	u32 ast_skid_limit;
 	u32 num_wds_entries;
+
+	/* Targets supporting physical addressing capability above 32-bits */
+	bool target_64bit;
+
+	/* Target rx ring fill level */
+	u32 rx_ring_fill_level;
 };
 
 struct htt_rx_desc;
@@ -882,6 +888,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
 #define PCIE_INTR_CLR_ADDRESS			ar->regs->pcie_intr_clr_address
 #define SCRATCH_3_ADDRESS			ar->regs->scratch_3_address
 #define CPU_INTR_ADDRESS			0x0010
+#define FW_RAM_CONFIG_ADDRESS			0x0018
 
 #define CCNT_TO_MSEC(ar, x) ((x) / ar->hw_params.channel_counters_freq_hz)
 

+ 4 - 3
drivers/net/wireless/ath/ath10k/mac.c

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -3597,7 +3597,7 @@ static int ath10k_mac_tx_submit(struct ath10k *ar,
 
 	switch (txpath) {
 	case ATH10K_MAC_TX_HTT:
-		ret = ath10k_htt_tx(htt, txmode, skb);
+		ret = htt->tx_ops->htt_tx(htt, txmode, skb);
 		break;
 	case ATH10K_MAC_TX_HTT_MGMT:
 		ret = ath10k_htt_mgmt_tx(htt, skb);
@@ -8294,7 +8294,8 @@ int ath10k_mac_register(struct ath10k *ar)
 	if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) ||
 	    test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) {
 		ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
-		ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
+		if (test_bit(WMI_SERVICE_TDLS_WIDER_BANDWIDTH, ar->wmi.svc_map))
+			ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
 	}
 
 	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;

+ 1 - 1
drivers/net/wireless/ath/ath10k/mac.h

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 226 - 7
drivers/net/wireless/ath/ath10k/pci.c

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -23,6 +23,7 @@
 
 #include "core.h"
 #include "debug.h"
+#include "coredump.h"
 
 #include "targaddrs.h"
 #include "bmi.h"
@@ -51,6 +52,11 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
 #define ATH10K_PCI_TARGET_WAIT 3000
 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
 
+/* Maximum number of bytes that can be handled atomically by
+ * diag read and write.
+ */
+#define ATH10K_DIAG_TRANSFER_LIMIT	0x5000
+
 static const struct pci_device_id ath10k_pci_id_table[] = {
 	{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
 	{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
@@ -785,7 +791,7 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
 	ATH10K_SKB_RXCB(skb)->paddr = paddr;
 
 	spin_lock_bh(&ce->ce_lock);
-	ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
+	ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
 	spin_unlock_bh(&ce->ce_lock);
 	if (ret) {
 		dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
@@ -923,7 +929,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
 		nbytes = min_t(unsigned int, remaining_bytes,
 			       DIAG_TRANSFER_LIMIT);
 
-		ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
+		ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &ce_data, ce_data);
 		if (ret != 0)
 			goto done;
 
@@ -1089,7 +1095,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
 		nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
 
 		/* Set up to receive directly into Target(!) address */
-		ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
+		ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address);
 		if (ret != 0)
 			goto done;
 
@@ -1461,6 +1467,218 @@ static void ath10k_pci_dump_registers(struct ath10k *ar,
 		crash_data->registers[i] = reg_dump_values[i];
 }
 
+static int ath10k_pci_dump_memory_section(struct ath10k *ar,
+					  const struct ath10k_mem_region *mem_region,
+					  u8 *buf, size_t buf_len)
+{
+	const struct ath10k_mem_section *cur_section, *next_section;
+	unsigned int count, section_size, skip_size;
+	int ret, i, j;
+
+	if (!mem_region || !buf)
+		return 0;
+
+	if (mem_region->section_table.size < 0)
+		return 0;
+
+	cur_section = &mem_region->section_table.sections[0];
+
+	if (mem_region->start > cur_section->start) {
+		ath10k_warn(ar, "incorrect memdump region 0x%x with section start addrress 0x%x.\n",
+			    mem_region->start, cur_section->start);
+		return 0;
+	}
+
+	skip_size = cur_section->start - mem_region->start;
+
+	/* fill the gap between the first register section and register
+	 * start address
+	 */
+	for (i = 0; i < skip_size; i++) {
+		*buf = ATH10K_MAGIC_NOT_COPIED;
+		buf++;
+	}
+
+	count = 0;
+
+	for (i = 0; cur_section != NULL; i++) {
+		section_size = cur_section->end - cur_section->start;
+
+		if (section_size <= 0) {
+			ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
+				    cur_section->start,
+				    cur_section->end);
+			break;
+		}
+
+		if ((i + 1) == mem_region->section_table.size) {
+			/* last section */
+			next_section = NULL;
+			skip_size = 0;
+		} else {
+			next_section = cur_section + 1;
+
+			if (cur_section->end > next_section->start) {
+				ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
+					    next_section->start,
+					    cur_section->end);
+				break;
+			}
+
+			skip_size = next_section->start - cur_section->end;
+		}
+
+		if (buf_len < (skip_size + section_size)) {
+			ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
+			break;
+		}
+
+		buf_len -= skip_size + section_size;
+
+		/* read section to dest memory */
+		ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
+					       buf, section_size);
+		if (ret) {
+			ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
+				    cur_section->start, ret);
+			break;
+		}
+
+		buf += section_size;
+		count += section_size;
+
+		/* fill in the gap between this section and the next */
+		for (j = 0; j < skip_size; j++) {
+			*buf = ATH10K_MAGIC_NOT_COPIED;
+			buf++;
+		}
+
+		count += skip_size;
+
+		if (!next_section)
+			/* this was the last section */
+			break;
+
+		cur_section = next_section;
+	}
+
+	return count;
+}
+
+static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
+{
+	u32 val;
+
+	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+			   FW_RAM_CONFIG_ADDRESS, config);
+
+	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+				FW_RAM_CONFIG_ADDRESS);
+	if (val != config) {
+		ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
+			    val, config);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void ath10k_pci_dump_memory(struct ath10k *ar,
+				   struct ath10k_fw_crash_data *crash_data)
+{
+	const struct ath10k_hw_mem_layout *mem_layout;
+	const struct ath10k_mem_region *current_region;
+	struct ath10k_dump_ram_data_hdr *hdr;
+	u32 count, shift;
+	size_t buf_len;
+	int ret, i;
+	u8 *buf;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	if (!crash_data)
+		return;
+
+	mem_layout = ath10k_coredump_get_mem_layout(ar);
+	if (!mem_layout)
+		return;
+
+	current_region = &mem_layout->region_table.regions[0];
+
+	buf = crash_data->ramdump_buf;
+	buf_len = crash_data->ramdump_buf_len;
+
+	memset(buf, 0, buf_len);
+
+	for (i = 0; i < mem_layout->region_table.size; i++) {
+		count = 0;
+
+		if (current_region->len > buf_len) {
+			ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
+				    current_region->name,
+				    current_region->len,
+				    buf_len);
+			break;
+		}
+
+		/* To get IRAM dump, the host driver needs to switch target
+		 * ram config from DRAM to IRAM.
+		 */
+		if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
+		    current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
+			shift = current_region->start >> 20;
+
+			ret = ath10k_pci_set_ram_config(ar, shift);
+			if (ret) {
+				ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
+					    current_region->name, ret);
+				break;
+			}
+		}
+
+		/* Reserve space for the header. */
+		hdr = (void *)buf;
+		buf += sizeof(*hdr);
+		buf_len -= sizeof(*hdr);
+
+		if (current_region->section_table.size > 0) {
+			/* Copy each section individually. */
+			count = ath10k_pci_dump_memory_section(ar,
+							       current_region,
+							       buf,
+							       current_region->len);
+		} else {
+			/* No individiual memory sections defined so we can
+			 * copy the entire memory region.
+			 */
+			ret = ath10k_pci_diag_read_mem(ar,
+						       current_region->start,
+						       buf,
+						       current_region->len);
+			if (ret) {
+				ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
+					    current_region->name, ret);
+				break;
+			}
+
+			count = current_region->len;
+		}
+
+		hdr->region_type = cpu_to_le32(current_region->type);
+		hdr->start = cpu_to_le32(current_region->start);
+		hdr->length = cpu_to_le32(count);
+
+		if (count == 0)
+			/* Note: the header remains, just with zero length. */
+			break;
+
+		buf += count;
+		buf_len -= count;
+
+		current_region++;
+	}
+}
+
 static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
 {
 	struct ath10k_fw_crash_data *crash_data;
@@ -1470,7 +1688,7 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
 
 	ar->stats.fw_crash_counter++;
 
-	crash_data = ath10k_debug_get_new_fw_crash_data(ar);
+	crash_data = ath10k_coredump_new(ar);
 
 	if (crash_data)
 		scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
@@ -1481,6 +1699,7 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
 	ath10k_print_driver_info(ar);
 	ath10k_pci_dump_registers(ar, crash_data);
 	ath10k_ce_dump_registers(ar, crash_data);
+	ath10k_pci_dump_memory(ar, crash_data);
 
 	spin_unlock_bh(&ar->data_lock);
 
@@ -1858,7 +2077,7 @@ int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
 
 	ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
 	if (ret) {
-		u32 unused_buffer;
+		dma_addr_t unused_buffer;
 		unsigned int unused_nbytes;
 		unsigned int unused_id;
 
@@ -1871,7 +2090,7 @@ int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
 
 err_resp:
 	if (resp) {
-		u32 unused_buffer;
+		dma_addr_t unused_buffer;
 
 		ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
 		dma_unmap_single(ar->dev, resp_paddr,

+ 1 - 1
drivers/net/wireless/ath/ath10k/pci.h

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 51 - 1
drivers/net/wireless/ath/ath10k/rx_desc.h

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -210,6 +210,10 @@ struct rx_frag_info {
 	u8 ring1_more_count;
 	u8 ring2_more_count;
 	u8 ring3_more_count;
+	u8 ring4_more_count;
+	u8 ring5_more_count;
+	u8 ring6_more_count;
+	u8 ring7_more_count;
 } __packed;
 
 /*
@@ -471,10 +475,16 @@ struct rx_msdu_start_qca99x0 {
 	__le32 info2; /* %RX_MSDU_START_INFO2_ */
 } __packed;
 
+struct rx_msdu_start_wcn3990 {
+	__le32 info2; /* %RX_MSDU_START_INFO2_ */
+	__le32 info3; /* %RX_MSDU_START_INFO3_ */
+} __packed;
+
 struct rx_msdu_start {
 	struct rx_msdu_start_common common;
 	union {
 		struct rx_msdu_start_qca99x0 qca99x0;
+		struct rx_msdu_start_wcn3990 wcn3990;
 	} __packed;
 } __packed;
 
@@ -595,10 +605,23 @@ struct rx_msdu_end_qca99x0 {
 	__le32 info2;
 } __packed;
 
+struct rx_msdu_end_wcn3990 {
+	__le32 ipv6_crc;
+	__le32 tcp_seq_no;
+	__le32 tcp_ack_no;
+	__le32 info1;
+	__le32 info2;
+	__le32 rule_indication_0;
+	__le32 rule_indication_1;
+	__le32 rule_indication_2;
+	__le32 rule_indication_3;
+} __packed;
+
 struct rx_msdu_end {
 	struct rx_msdu_end_common common;
 	union {
 		struct rx_msdu_end_qca99x0 qca99x0;
+		struct rx_msdu_end_wcn3990 wcn3990;
 	} __packed;
 } __packed;
 
@@ -963,6 +986,12 @@ struct rx_pkt_end {
 	__le32 phy_timestamp_2;
 } __packed;
 
+struct rx_pkt_end_wcn3990 {
+	__le32 info0; /* %RX_PKT_END_INFO0_ */
+	__le64 phy_timestamp_1;
+	__le64 phy_timestamp_2;
+} __packed;
+
 #define RX_LOCATION_INFO0_RTT_FAC_LEGACY_MASK		0x00003fff
 #define RX_LOCATION_INFO0_RTT_FAC_LEGACY_LSB		0
 #define RX_LOCATION_INFO0_RTT_FAC_VHT_MASK		0x1fff8000
@@ -998,6 +1027,12 @@ struct rx_location_info {
 	__le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
 } __packed;
 
+struct rx_location_info_wcn3990 {
+	__le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */
+	__le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
+	__le32 rx_location_info2; /* %RX_LOCATION_INFO2_ */
+} __packed;
+
 enum rx_phy_ppdu_end_info0 {
 	RX_PHY_PPDU_END_INFO0_ERR_RADAR           = BIT(2),
 	RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT        = BIT(3),
@@ -1086,6 +1121,20 @@ struct rx_ppdu_end_qca9984 {
 	__le16 info1; /* %RX_PPDU_END_INFO1_ */
 } __packed;
 
+struct rx_ppdu_end_wcn3990 {
+	struct rx_pkt_end_wcn3990 rx_pkt_end;
+	struct rx_location_info_wcn3990 rx_location_info;
+	struct rx_phy_ppdu_end rx_phy_ppdu_end;
+	__le32 rx_timing_offset;
+	__le32 reserved_info_0;
+	__le32 reserved_info_1;
+	__le32 rx_antenna_info;
+	__le32 rx_coex_info;
+	__le32 rx_mpdu_cnt_info;
+	__le64 phy_timestamp_tx;
+	__le32 rx_bb_length;
+} __packed;
+
 struct rx_ppdu_end {
 	struct rx_ppdu_end_common common;
 	union {
@@ -1093,6 +1142,7 @@ struct rx_ppdu_end {
 		struct rx_ppdu_end_qca6174 qca6174;
 		struct rx_ppdu_end_qca99x0 qca99x0;
 		struct rx_ppdu_end_qca9984 qca9984;
+		struct rx_ppdu_end_wcn3990 wcn3990;
 	} __packed;
 } __packed;
 

+ 1 - 1
drivers/net/wireless/ath/ath10k/spectral.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2013-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
drivers/net/wireless/ath/ath10k/spectral.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2013-2015 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
drivers/net/wireless/ath/ath10k/swap.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
drivers/net/wireless/ath/ath10k/swap.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
drivers/net/wireless/ath/ath10k/targaddrs.h

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
drivers/net/wireless/ath/ath10k/testmode.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
drivers/net/wireless/ath/ath10k/testmode_i.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
drivers/net/wireless/ath/ath10k/thermal.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
drivers/net/wireless/ath/ath10k/thermal.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
drivers/net/wireless/ath/ath10k/trace.h

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
drivers/net/wireless/ath/ath10k/txrx.c

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
drivers/net/wireless/ath/ath10k/txrx.h

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2014,2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
drivers/net/wireless/ath/ath10k/wmi-ops.h

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 3
drivers/net/wireless/ath/ath10k/wmi-tlv.c

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -2494,7 +2494,6 @@ ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
 	void *ptr;
 	int len;
 	u32 buf_len = msdu->len;
-	u16 fc;
 	struct ath10k_vif *arvif;
 	dma_addr_t mgmt_frame_dma;
 	u32 vdev_id;
@@ -2503,7 +2502,6 @@ ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
 		return ERR_PTR(-EINVAL);
 
 	hdr = (struct ieee80211_hdr *)msdu->data;
-	fc = le16_to_cpu(hdr->frame_control);
 	arvif = (void *)cb->vif->drv_priv;
 	vdev_id = arvif->vdev_id;
 

+ 1 - 1
drivers/net/wireless/ath/ath10k/wmi-tlv.h

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
drivers/net/wireless/ath/ath10k/wmi.c

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 7 - 2
drivers/net/wireless/ath/ath10k/wmi.h

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -196,6 +196,7 @@ enum wmi_service {
 	WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
 	WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
 	WMI_SERVICE_MGMT_TX_WMI,
+	WMI_SERVICE_TDLS_WIDER_BANDWIDTH,
 
 	/* keep last */
 	WMI_SERVICE_MAX,
@@ -337,6 +338,7 @@ enum wmi_10_4_service {
 	WMI_10_4_SERVICE_TDLS_UAPSD_SLEEP_STA,
 	WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
 	WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
+	WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH,
 };
 
 static inline char *wmi_service_name(int service_id)
@@ -445,6 +447,7 @@ static inline char *wmi_service_name(int service_id)
 	SVCSTR(WMI_SERVICE_SMART_LOGGING_SUPPORT);
 	SVCSTR(WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE);
 	SVCSTR(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY);
+	SVCSTR(WMI_SERVICE_TDLS_WIDER_BANDWIDTH);
 	default:
 		return NULL;
 	}
@@ -741,6 +744,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
 	       WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, len);
 	SVCMAP(WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
 	       WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, len);
+	SVCMAP(WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH,
+	       WMI_SERVICE_TDLS_WIDER_BANDWIDTH, len);
 }
 
 #undef SVCMAP
@@ -2924,7 +2929,7 @@ struct wmi_ext_resource_config_10_4_cmd {
 	__le32 max_tdls_concurrent_buffer_sta;
 };
 
-/* strucutre describing host memory chunk. */
+/* structure describing host memory chunk. */
 struct host_memory_chunk {
 	/* id of the request that is passed up in service ready */
 	__le32 req_id;

+ 1 - 1
drivers/net/wireless/ath/ath10k/wow.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
drivers/net/wireless/ath/ath10k/wow.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above

+ 1 - 1
drivers/net/wireless/ath/wcn36xx/smd.c

@@ -626,7 +626,7 @@ int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
 
 	msg_body.scan_type = WCN36XX_HAL_SCAN_TYPE_ACTIVE;
 	msg_body.min_ch_time = 30;
-	msg_body.min_ch_time = 100;
+	msg_body.max_ch_time = 100;
 	msg_body.scan_hidden = 1;
 	memcpy(msg_body.mac, vif->addr, ETH_ALEN);
 	msg_body.p2p_search = vif->p2p;

+ 66 - 2
drivers/net/wireless/ath/wil6210/cfg80211.c

@@ -956,9 +956,8 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy,
 				    struct cfg80211_chan_def *chandef)
 {
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-	struct wireless_dev *wdev = wil_to_wdev(wil);
 
-	wdev->preset_chandef = *chandef;
+	wil->monitor_chandef = *chandef;
 
 	return 0;
 }
@@ -1751,6 +1750,69 @@ static int wil_cfg80211_resume(struct wiphy *wiphy)
 	return 0;
 }
 
+static int
+wil_cfg80211_sched_scan_start(struct wiphy *wiphy,
+			      struct net_device *dev,
+			      struct cfg80211_sched_scan_request *request)
+{
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+	int i, rc;
+
+	wil_dbg_misc(wil,
+		     "sched scan start: n_ssids %d, ie_len %zu, flags 0x%x\n",
+		     request->n_ssids, request->ie_len, request->flags);
+	for (i = 0; i < request->n_ssids; i++) {
+		wil_dbg_misc(wil, "SSID[%d]:", i);
+		wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+				  request->ssids[i].ssid,
+				  request->ssids[i].ssid_len, true);
+	}
+	wil_dbg_misc(wil, "channels:");
+	for (i = 0; i < request->n_channels; i++)
+		wil_dbg_misc(wil, " %d%s", request->channels[i]->hw_value,
+			     i == request->n_channels - 1 ? "\n" : "");
+	wil_dbg_misc(wil, "n_match_sets %d, min_rssi_thold %d, delay %d\n",
+		     request->n_match_sets, request->min_rssi_thold,
+		     request->delay);
+	for (i = 0; i < request->n_match_sets; i++) {
+		struct cfg80211_match_set *ms = &request->match_sets[i];
+
+		wil_dbg_misc(wil, "MATCHSET[%d]: rssi_thold %d\n",
+			     i, ms->rssi_thold);
+		wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+				  ms->ssid.ssid,
+				  ms->ssid.ssid_len, true);
+	}
+	wil_dbg_misc(wil, "n_scan_plans %d\n", request->n_scan_plans);
+	for (i = 0; i < request->n_scan_plans; i++) {
+		struct cfg80211_sched_scan_plan *sp = &request->scan_plans[i];
+
+		wil_dbg_misc(wil, "SCAN PLAN[%d]: interval %d iterations %d\n",
+			     i, sp->interval, sp->iterations);
+	}
+
+	rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie);
+	if (rc)
+		return rc;
+	return wmi_start_sched_scan(wil, request);
+}
+
+static int
+wil_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev,
+			     u64 reqid)
+{
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+	int rc;
+
+	rc = wmi_stop_sched_scan(wil);
+	/* device would return error if it thinks PNO is already stopped.
+	 * ignore the return code so user space and driver gets back in-sync
+	 */
+	wil_dbg_misc(wil, "sched scan stopped (%d)\n", rc);
+
+	return 0;
+}
+
 static const struct cfg80211_ops wil_cfg80211_ops = {
 	.add_virtual_intf = wil_cfg80211_add_iface,
 	.del_virtual_intf = wil_cfg80211_del_iface,
@@ -1784,6 +1846,8 @@ static const struct cfg80211_ops wil_cfg80211_ops = {
 	.set_power_mgmt = wil_cfg80211_set_power_mgmt,
 	.suspend = wil_cfg80211_suspend,
 	.resume = wil_cfg80211_resume,
+	.sched_scan_start = wil_cfg80211_sched_scan_start,
+	.sched_scan_stop = wil_cfg80211_sched_scan_stop,
 };
 
 static void wil_wiphy_init(struct wiphy *wiphy)

+ 0 - 1
drivers/net/wireless/ath/wil6210/debugfs.c

@@ -869,7 +869,6 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
 
 	params.buf = frame;
 	params.len = len;
-	params.chan = wdev->preset_chandef.chan;
 
 	rc = wil_cfg80211_mgmt_tx(wiphy, wdev, &params, NULL);
 

+ 1 - 1
drivers/net/wireless/ath/wil6210/interrupt.c

@@ -565,7 +565,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
 	if (unlikely((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)))
 		return IRQ_NONE;
 
-	/* FIXME: IRQ mask debug */
+	/* IRQ mask debug */
 	if (unlikely(wil6210_debug_irq_mask(wil, pseudo_cause)))
 		return IRQ_NONE;
 

+ 56 - 11
drivers/net/wireless/ath/wil6210/main.c

@@ -771,11 +771,11 @@ static void wil_collect_fw_info(struct wil6210_priv *wil)
 void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
 {
 	struct wiphy *wiphy = wil_to_wiphy(wil);
+	int features;
 
 	wil->keep_radio_on_during_sleep =
-		wil->platform_ops.keep_radio_on_during_sleep &&
-		wil->platform_ops.keep_radio_on_during_sleep(
-			wil->platform_handle) &&
+		test_bit(WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND,
+			 wil->platform_capa) &&
 		test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
 
 	wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
@@ -785,6 +785,24 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
 		wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
 	else
 		wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+
+	if (test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities)) {
+		wiphy->max_sched_scan_reqs = 1;
+		wiphy->max_sched_scan_ssids = WMI_MAX_PNO_SSID_NUM;
+		wiphy->max_match_sets = WMI_MAX_PNO_SSID_NUM;
+		wiphy->max_sched_scan_ie_len = WMI_MAX_IE_LEN;
+		wiphy->max_sched_scan_plans = WMI_MAX_PLANS_NUM;
+	}
+
+	if (wil->platform_ops.set_features) {
+		features = (test_bit(WMI_FW_CAPABILITY_REF_CLOCK_CONTROL,
+				     wil->fw_capabilities) &&
+			    test_bit(WIL_PLATFORM_CAPA_EXT_CLK,
+				     wil->platform_capa)) ?
+			BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL) : 0;
+
+		wil->platform_ops.set_features(wil->platform_handle, features);
+	}
 }
 
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
@@ -980,6 +998,7 @@ static void wil_pre_fw_config(struct wil6210_priv *wil)
 int wil_reset(struct wil6210_priv *wil, bool load_fw)
 {
 	int rc;
+	unsigned long status_flags = BIT(wil_status_resetting);
 
 	wil_dbg_misc(wil, "reset\n");
 
@@ -1000,6 +1019,16 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 	if (wil->hw_version == HW_VER_UNKNOWN)
 		return -ENODEV;
 
+	if (test_bit(WIL_PLATFORM_CAPA_T_PWR_ON_0, wil->platform_capa)) {
+		wil_dbg_misc(wil, "Notify FW to set T_POWER_ON=0\n");
+		wil_s(wil, RGF_USER_USAGE_8, BIT_USER_SUPPORT_T_POWER_ON_0);
+	}
+
+	if (test_bit(WIL_PLATFORM_CAPA_EXT_CLK, wil->platform_capa)) {
+		wil_dbg_misc(wil, "Notify FW on ext clock configuration\n");
+		wil_s(wil, RGF_USER_USAGE_8, BIT_USER_EXT_CLK);
+	}
+
 	if (wil->platform_ops.notify) {
 		rc = wil->platform_ops.notify(wil->platform_handle,
 					      WIL_PLATFORM_EVT_PRE_RESET);
@@ -1009,6 +1038,14 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 	}
 
 	set_bit(wil_status_resetting, wil->status);
+	if (test_bit(wil_status_collecting_dumps, wil->status)) {
+		/* Device collects crash dump, cancel the reset.
+		 * following crash dump collection, reset would take place.
+		 */
+		wil_dbg_misc(wil, "reject reset while collecting crash dump\n");
+		rc = -EBUSY;
+		goto out;
+	}
 
 	cancel_work_sync(&wil->disconnect_worker);
 	wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
@@ -1023,7 +1060,11 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 
 	/* prevent NAPI from being scheduled and prevent wmi commands */
 	mutex_lock(&wil->wmi_mutex);
-	bitmap_zero(wil->status, wil_status_last);
+	if (test_bit(wil_status_suspending, wil->status))
+		status_flags |= BIT(wil_status_suspending);
+	bitmap_and(wil->status, wil->status, &status_flags,
+		   wil_status_last);
+	wil_dbg_misc(wil, "wil->status (0x%lx)\n", *wil->status);
 	mutex_unlock(&wil->wmi_mutex);
 
 	wil_mask_irq(wil);
@@ -1041,14 +1082,14 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 	wil_rx_fini(wil);
 	if (rc) {
 		wil_bl_crash_info(wil, true);
-		return rc;
+		goto out;
 	}
 
 	rc = wil_get_bl_info(wil);
 	if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */
 		rc = 0;
 	if (rc)
-		return rc;
+		goto out;
 
 	wil_set_oob_mode(wil, oob_mode);
 	if (load_fw) {
@@ -1060,10 +1101,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 		/* Loading f/w from the file */
 		rc = wil_request_firmware(wil, wil->wil_fw_name, true);
 		if (rc)
-			return rc;
+			goto out;
 		rc = wil_request_firmware(wil, WIL_BOARD_FILE_NAME, true);
 		if (rc)
-			return rc;
+			goto out;
 
 		wil_pre_fw_config(wil);
 		wil_release_cpu(wil);
@@ -1075,6 +1116,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 	reinit_completion(&wil->wmi_call);
 	reinit_completion(&wil->halp.comp);
 
+	clear_bit(wil_status_resetting, wil->status);
+
 	if (load_fw) {
 		wil_configure_interrupt_moderation(wil);
 		wil_unmask_irq(wil);
@@ -1108,6 +1151,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 	}
 
 	return rc;
+
+out:
+	clear_bit(wil_status_resetting, wil->status);
+	return rc;
 }
 
 void wil_fw_error_recovery(struct wil6210_priv *wil)
@@ -1213,9 +1260,7 @@ int __wil_down(struct wil6210_priv *wil)
 	wil_abort_scan(wil, false);
 	mutex_unlock(&wil->p2p_wdev_mutex);
 
-	wil_reset(wil, false);
-
-	return 0;
+	return wil_reset(wil, false);
 }
 
 int wil_down(struct wil6210_priv *wil)

+ 1 - 1
drivers/net/wireless/ath/wil6210/netdev.c

@@ -150,7 +150,7 @@ void *wil_if_alloc(struct device *dev)
 	wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
 	/* default monitor channel */
 	ch = wdev->wiphy->bands[NL80211_BAND_60GHZ]->channels;
-	cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
+	cfg80211_chandef_create(&wil->monitor_chandef, ch, NL80211_CHAN_NO_HT);
 
 	ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup);
 	if (!ndev) {

+ 33 - 28
drivers/net/wireless/ath/wil6210/pcie_bus.c

@@ -31,10 +31,8 @@ static bool ftm_mode;
 module_param(ftm_mode, bool, 0444);
 MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
 
-#ifdef CONFIG_PM
 static int wil6210_pm_notify(struct notifier_block *notify_block,
 			     unsigned long mode, void *unused);
-#endif /* CONFIG_PM */
 
 static
 void wil_set_capabilities(struct wil6210_priv *wil)
@@ -43,9 +41,11 @@ void wil_set_capabilities(struct wil6210_priv *wil)
 	u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
 	u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) &
 			    RGF_USER_REVISION_ID_MASK);
+	int platform_capa;
 
 	bitmap_zero(wil->hw_capabilities, hw_capability_last);
 	bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
+	bitmap_zero(wil->platform_capa, WIL_PLATFORM_CAPA_MAX);
 	wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT :
 			   WIL_FW_NAME_DEFAULT;
 	wil->chip_revision = chip_revision;
@@ -81,6 +81,14 @@ void wil_set_capabilities(struct wil6210_priv *wil)
 
 	wil_info(wil, "Board hardware is %s\n", wil->hw_name);
 
+	/* Get platform capabilities */
+	if (wil->platform_ops.get_capa) {
+		platform_capa =
+			wil->platform_ops.get_capa(wil->platform_handle);
+		memcpy(wil->platform_capa, &platform_capa,
+		       min(sizeof(wil->platform_capa), sizeof(platform_capa)));
+	}
+
 	/* extract FW capabilities from file without loading the FW */
 	wil_request_firmware(wil, wil->wil_fw_name, false);
 	wil_refresh_fw_capabilities(wil);
@@ -206,6 +214,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		.fw_recovery = wil_platform_rop_fw_recovery,
 	};
 	u32 bar_size = pci_resource_len(pdev, 0);
+	int dma_addr_size[] = {48, 40, 32}; /* keep descending order */
+	int i;
 
 	/* check HW */
 	dev_info(&pdev->dev, WIL_NAME
@@ -241,21 +251,23 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	}
 	/* rollback to err_plat */
 
-	/* device supports 48bit addresses */
-	rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
-	if (rc) {
-		dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc);
-		rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+	/* device supports >32bit addresses */
+	for (i = 0; i < ARRAY_SIZE(dma_addr_size); i++) {
+		rc = dma_set_mask_and_coherent(dev,
+					       DMA_BIT_MASK(dma_addr_size[i]));
 		if (rc) {
-			dev_err(dev,
-				"dma_set_mask_and_coherent(32) failed: %d\n",
-				rc);
-			goto err_plat;
+			dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
+				dma_addr_size[i], rc);
+			continue;
 		}
-	} else {
-		wil->use_extended_dma_addr = 1;
+		dev_info(dev, "using dma mask %d", dma_addr_size[i]);
+		wil->dma_addr_size = dma_addr_size[i];
+		break;
 	}
 
+	if (wil->dma_addr_size == 0)
+		goto err_plat;
+
 	rc = pci_enable_device(pdev);
 	if (rc && pdev->msi_enabled == 0) {
 		wil_err(wil,
@@ -307,15 +319,15 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		goto bus_disable;
 	}
 
-#ifdef CONFIG_PM
-	wil->pm_notify.notifier_call = wil6210_pm_notify;
+	if (IS_ENABLED(CONFIG_PM))
+		wil->pm_notify.notifier_call = wil6210_pm_notify;
+
 	rc = register_pm_notifier(&wil->pm_notify);
 	if (rc)
 		/* Do not fail the driver initialization, as suspend can
 		 * be prevented in a later phase if needed
 		 */
 		wil_err(wil, "register_pm_notifier failed: %d\n", rc);
-#endif /* CONFIG_PM */
 
 	wil6210_debugfs_init(wil);
 
@@ -346,9 +358,7 @@ static void wil_pcie_remove(struct pci_dev *pdev)
 
 	wil_dbg_misc(wil, "pcie_remove\n");
 
-#ifdef CONFIG_PM
 	unregister_pm_notifier(&wil->pm_notify);
-#endif /* CONFIG_PM */
 
 	wil_pm_runtime_forbid(wil);
 
@@ -372,8 +382,6 @@ static const struct pci_device_id wil6210_pcie_ids[] = {
 };
 MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
 
-#ifdef CONFIG_PM
-
 static int wil6210_suspend(struct device *dev, bool is_runtime)
 {
 	int rc = 0;
@@ -481,17 +489,17 @@ static int wil6210_pm_notify(struct notifier_block *notify_block,
 	return rc;
 }
 
-static int wil6210_pm_suspend(struct device *dev)
+static int __maybe_unused wil6210_pm_suspend(struct device *dev)
 {
 	return wil6210_suspend(dev, false);
 }
 
-static int wil6210_pm_resume(struct device *dev)
+static int __maybe_unused wil6210_pm_resume(struct device *dev)
 {
 	return wil6210_resume(dev, false);
 }
 
-static int wil6210_pm_runtime_idle(struct device *dev)
+static int __maybe_unused wil6210_pm_runtime_idle(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct wil6210_priv *wil = pci_get_drvdata(pdev);
@@ -501,12 +509,12 @@ static int wil6210_pm_runtime_idle(struct device *dev)
 	return wil_can_suspend(wil, true);
 }
 
-static int wil6210_pm_runtime_resume(struct device *dev)
+static int __maybe_unused wil6210_pm_runtime_resume(struct device *dev)
 {
 	return wil6210_resume(dev, true);
 }
 
-static int wil6210_pm_runtime_suspend(struct device *dev)
+static int __maybe_unused wil6210_pm_runtime_suspend(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct wil6210_priv *wil = pci_get_drvdata(pdev);
@@ -518,15 +526,12 @@ static int wil6210_pm_runtime_suspend(struct device *dev)
 
 	return wil6210_suspend(dev, true);
 }
-#endif /* CONFIG_PM */
 
 static const struct dev_pm_ops wil6210_pm_ops = {
-#ifdef CONFIG_PM
 	SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
 	SET_RUNTIME_PM_OPS(wil6210_pm_runtime_suspend,
 			   wil6210_pm_runtime_resume,
 			   wil6210_pm_runtime_idle)
-#endif /* CONFIG_PM */
 };
 
 static struct pci_driver wil6210_driver = {

+ 17 - 0
drivers/net/wireless/ath/wil6210/pm.c

@@ -145,6 +145,13 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
 
 	/* Prevent handling of new tx and wmi commands */
 	set_bit(wil_status_suspending, wil->status);
+	if (test_bit(wil_status_collecting_dumps, wil->status)) {
+		/* Device collects crash dump, cancel the suspend */
+		wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
+		clear_bit(wil_status_suspending, wil->status);
+		wil->suspend_stats.rejected_by_host++;
+		return -EBUSY;
+	}
 	wil_update_net_queues_bh(wil, NULL, true);
 
 	if (!wil_is_tx_idle(wil)) {
@@ -255,6 +262,15 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
 
 	wil_dbg_pm(wil, "suspend radio off\n");
 
+	set_bit(wil_status_suspending, wil->status);
+	if (test_bit(wil_status_collecting_dumps, wil->status)) {
+		/* Device collects crash dump, cancel the suspend */
+		wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
+		clear_bit(wil_status_suspending, wil->status);
+		wil->suspend_stats.rejected_by_host++;
+		return -EBUSY;
+	}
+
 	/* if netif up, hardware is alive, shut it down */
 	if (ndev->flags & IFF_UP) {
 		rc = wil_down(wil);
@@ -281,6 +297,7 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
 	set_bit(wil_status_suspended, wil->status);
 
 out:
+	clear_bit(wil_status_suspending, wil->status);
 	wil_dbg_pm(wil, "suspend radio off: %d\n", rc);
 
 	return rc;

+ 6 - 5
drivers/net/wireless/ath/wil6210/pmc.c

@@ -111,14 +111,14 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
 	 *
 	 * HW has limitation that all vrings addresses must share the same
 	 * upper 16 msb bits part of 48 bits address. To workaround that,
-	 * if we are using 48 bit addresses switch to 32 bit allocation
-	 * before allocating vring memory.
+	 * if we are using more than 32 bit addresses switch to 32 bit
+	 * allocation before allocating vring memory.
 	 *
 	 * There's no check for the return value of dma_set_mask_and_coherent,
 	 * since we assume if we were able to set the mask during
 	 * initialization in this system it will not fail if we set it again
 	 */
-	if (wil->use_extended_dma_addr)
+	if (wil->dma_addr_size > 32)
 		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
 
 	pmc->pring_va = dma_alloc_coherent(dev,
@@ -126,8 +126,9 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
 			&pmc->pring_pa,
 			GFP_KERNEL);
 
-	if (wil->use_extended_dma_addr)
-		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+	if (wil->dma_addr_size > 32)
+		dma_set_mask_and_coherent(dev,
+					  DMA_BIT_MASK(wil->dma_addr_size));
 
 	wil_dbg_misc(wil,
 		     "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",

+ 7 - 7
drivers/net/wireless/ath/wil6210/txrx.c

@@ -178,14 +178,14 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
 	 *
 	 * HW has limitation that all vrings addresses must share the same
 	 * upper 16 msb bits part of 48 bits address. To workaround that,
-	 * if we are using 48 bit addresses switch to 32 bit allocation
-	 * before allocating vring memory.
+	 * if we are using more than 32 bit addresses switch to 32 bit
+	 * allocation before allocating vring memory.
 	 *
 	 * There's no check for the return value of dma_set_mask_and_coherent,
 	 * since we assume if we were able to set the mask during
 	 * initialization in this system it will not fail if we set it again
 	 */
-	if (wil->use_extended_dma_addr)
+	if (wil->dma_addr_size > 32)
 		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
 
 	vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
@@ -195,8 +195,9 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
 		return -ENOMEM;
 	}
 
-	if (wil->use_extended_dma_addr)
-		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+	if (wil->dma_addr_size > 32)
+		dma_set_mask_and_coherent(dev,
+					  DMA_BIT_MASK(wil->dma_addr_size));
 
 	/* initially, all descriptors are SW owned
 	 * For Tx and Rx, ownership bit is at the same location, thus
@@ -347,7 +348,6 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
 static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
 				       struct sk_buff *skb)
 {
-	struct wireless_dev *wdev = wil->wdev;
 	struct wil6210_rtap {
 		struct ieee80211_radiotap_header rthdr;
 		/* fields should be in the order of bits in rthdr.it_present */
@@ -374,7 +374,7 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
 	int rtap_len = sizeof(struct wil6210_rtap);
 	int phy_length = 0; /* phy info header size, bytes */
 	static char phy_data[128];
-	struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+	struct ieee80211_channel *ch = wil->monitor_chandef.chan;
 
 	if (rtap_include_phy_info) {
 		rtap_len = sizeof(*rtap_vendor) + sizeof(*d);

+ 13 - 4
drivers/net/wireless/ath/wil6210/wil6210.h

@@ -161,6 +161,10 @@ struct RGF_ICR {
 #define RGF_USER_USAGE_6		(0x880018)
 	#define BIT_USER_OOB_MODE		BIT(31)
 	#define BIT_USER_OOB_R2_MODE		BIT(30)
+#define RGF_USER_USAGE_8		(0x880020)
+	#define BIT_USER_PREVENT_DEEP_SLEEP	BIT(0)
+	#define BIT_USER_SUPPORT_T_POWER_ON_0	BIT(1)
+	#define BIT_USER_EXT_CLK		BIT(2)
 #define RGF_USER_HW_MACHINE_STATE	(0x8801dc)
 	#define HW_MACHINE_BOOT_DONE	(0x3fffffd)
 #define RGF_USER_USER_CPU_0		(0x8801e0)
@@ -435,12 +439,13 @@ enum { /* for wil6210_priv.status */
 	wil_status_fwconnected,
 	wil_status_dontscan,
 	wil_status_mbox_ready, /* MBOX structures ready */
-	wil_status_irqen, /* FIXME: interrupts enabled - for debug */
+	wil_status_irqen, /* interrupts enabled - for debug */
 	wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
 	wil_status_resetting, /* reset in progress */
 	wil_status_suspending, /* suspend in progress */
 	wil_status_suspended, /* suspend completed, device is suspended */
 	wil_status_resuming, /* resume in progress */
+	wil_status_collecting_dumps, /* crashdump collection in progress */
 	wil_status_last /* keep last */
 };
 
@@ -643,12 +648,14 @@ struct wil6210_priv {
 	const char *wil_fw_name;
 	DECLARE_BITMAP(hw_capabilities, hw_capability_last);
 	DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX);
+	DECLARE_BITMAP(platform_capa, WIL_PLATFORM_CAPA_MAX);
 	u8 n_mids; /* number of additional MIDs as reported by FW */
 	u32 recovery_count; /* num of FW recovery attempts in a short time */
 	u32 recovery_state; /* FW recovery state machine */
 	unsigned long last_fw_recovery; /* jiffies of last fw recovery */
 	wait_queue_head_t wq; /* for all wait_event() use */
 	/* profile */
+	struct cfg80211_chan_def monitor_chandef;
 	u32 monitor_flags;
 	u32 privacy; /* secure connection? */
 	u8 hidden_ssid; /* relevant in AP mode */
@@ -704,7 +711,7 @@ struct wil6210_priv {
 	struct wil_sta_info sta[WIL6210_MAX_CID];
 	int bcast_vring;
 	u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once  */
-	bool use_extended_dma_addr; /* indicates whether we are using 48 bits */
+	u32 dma_addr_size; /* indicates dma addr size */
 	/* scan */
 	struct cfg80211_scan_request *scan_request;
 
@@ -742,9 +749,7 @@ struct wil6210_priv {
 
 	int fw_calib_result;
 
-#ifdef CONFIG_PM
 	struct notifier_block pm_notify;
-#endif /* CONFIG_PM */
 
 	bool suspend_resp_rcvd;
 	bool suspend_resp_comp;
@@ -1032,4 +1037,8 @@ void wil_halp_unvote(struct wil6210_priv *wil);
 void wil6210_set_halp(struct wil6210_priv *wil);
 void wil6210_clear_halp(struct wil6210_priv *wil);
 
+int wmi_start_sched_scan(struct wil6210_priv *wil,
+			 struct cfg80211_sched_scan_request *request);
+int wmi_stop_sched_scan(struct wil6210_priv *wil);
+
 #endif /* __WIL6210_H__ */

+ 11 - 0
drivers/net/wireless/ath/wil6210/wil_crash_dump.c

@@ -72,6 +72,15 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
 		return -EINVAL;
 	}
 
+	set_bit(wil_status_collecting_dumps, wil->status);
+	if (test_bit(wil_status_suspending, wil->status) ||
+	    test_bit(wil_status_suspended, wil->status) ||
+	    test_bit(wil_status_resetting, wil->status)) {
+		wil_err(wil, "cannot collect fw dump during suspend/reset\n");
+		clear_bit(wil_status_collecting_dumps, wil->status);
+		return -EINVAL;
+	}
+
 	/* copy to crash dump area */
 	for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
 		map = &fw_mapping[i];
@@ -91,6 +100,8 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
 				     (const void __iomem * __force)data, len);
 	}
 
+	clear_bit(wil_status_collecting_dumps, wil->status);
+
 	return 0;
 }
 

+ 14 - 1
drivers/net/wireless/ath/wil6210/wil_platform.h

@@ -27,6 +27,18 @@ enum wil_platform_event {
 	WIL_PLATFORM_EVT_POST_SUSPEND = 4,
 };
 
+enum wil_platform_features {
+	WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL = 0,
+	WIL_PLATFORM_FEATURE_MAX,
+};
+
+enum wil_platform_capa {
+	WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND = 0,
+	WIL_PLATFORM_CAPA_T_PWR_ON_0 = 1,
+	WIL_PLATFORM_CAPA_EXT_CLK = 2,
+	WIL_PLATFORM_CAPA_MAX,
+};
+
 /**
  * struct wil_platform_ops - wil platform module calls from this
  * driver to platform driver
@@ -37,7 +49,8 @@ struct wil_platform_ops {
 	int (*resume)(void *handle, bool device_powered_on);
 	void (*uninit)(void *handle);
 	int (*notify)(void *handle, enum wil_platform_event evt);
-	bool (*keep_radio_on_during_sleep)(void *handle);
+	int (*get_capa)(void *handle);
+	void (*set_features)(void *handle, int features);
 };
 
 /**

+ 238 - 3
drivers/net/wireless/ath/wil6210/wmi.c

@@ -38,6 +38,7 @@ MODULE_PARM_DESC(led_id,
 		 " 60G device led enablement. Set the led ID (0-2) to enable");
 
 #define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
+#define WIL_WMI_CALL_GENERAL_TO_MS 100
 
 /**
  * WMI event receiving - theory of operations
@@ -314,6 +315,10 @@ static const char *cmdid2name(u16 cmdid)
 		return "WMI_LINK_MAINTAIN_CFG_WRITE_CMD";
 	case WMI_LO_POWER_CALIB_FROM_OTP_CMDID:
 		return "WMI_LO_POWER_CALIB_FROM_OTP_CMD";
+	case WMI_START_SCHED_SCAN_CMDID:
+		return "WMI_START_SCHED_SCAN_CMD";
+	case WMI_STOP_SCHED_SCAN_CMDID:
+		return "WMI_STOP_SCHED_SCAN_CMD";
 	default:
 		return "Untracked CMD";
 	}
@@ -428,6 +433,12 @@ static const char *eventid2name(u16 eventid)
 		return "WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENT";
 	case WMI_LO_POWER_CALIB_FROM_OTP_EVENTID:
 		return "WMI_LO_POWER_CALIB_FROM_OTP_EVENT";
+	case WMI_START_SCHED_SCAN_EVENTID:
+		return "WMI_START_SCHED_SCAN_EVENT";
+	case WMI_STOP_SCHED_SCAN_EVENTID:
+		return "WMI_STOP_SCHED_SCAN_EVENT";
+	case WMI_SCHED_SCAN_RESULT_EVENTID:
+		return "WMI_SCHED_SCAN_RESULT_EVENT";
 	default:
 		return "Untracked EVENT";
 	}
@@ -802,8 +813,6 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
 		}
 	}
 
-	/* FIXME FW can transmit only ucast frames to peer */
-	/* FIXME real ring_id instead of hard coded 0 */
 	ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid);
 	wil->sta[evt->cid].status = wil_sta_conn_pending;
 
@@ -1066,6 +1075,75 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
 	spin_unlock_bh(&sta->tid_rx_lock);
 }
 
+static void
+wmi_evt_sched_scan_result(struct wil6210_priv *wil, int id, void *d, int len)
+{
+	struct wmi_sched_scan_result_event *data = d;
+	struct wiphy *wiphy = wil_to_wiphy(wil);
+	struct ieee80211_mgmt *rx_mgmt_frame =
+		(struct ieee80211_mgmt *)data->payload;
+	int flen = len - offsetof(struct wmi_sched_scan_result_event, payload);
+	int ch_no;
+	u32 freq;
+	struct ieee80211_channel *channel;
+	s32 signal;
+	__le16 fc;
+	u32 d_len;
+	struct cfg80211_bss *bss;
+
+	if (flen < 0) {
+		wil_err(wil, "sched scan result event too short, len %d\n",
+			len);
+		return;
+	}
+
+	d_len = le32_to_cpu(data->info.len);
+	if (d_len != flen) {
+		wil_err(wil,
+			"sched scan result length mismatch, d_len %d should be %d\n",
+			d_len, flen);
+		return;
+	}
+
+	fc = rx_mgmt_frame->frame_control;
+	if (!ieee80211_is_probe_resp(fc)) {
+		wil_err(wil, "sched scan result invalid frame, fc 0x%04x\n",
+			fc);
+		return;
+	}
+
+	ch_no = data->info.channel + 1;
+	freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ);
+	channel = ieee80211_get_channel(wiphy, freq);
+	if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
+		signal = 100 * data->info.rssi;
+	else
+		signal = data->info.sqi;
+
+	wil_dbg_wmi(wil, "sched scan result: channel %d MCS %d RSSI %d\n",
+		    data->info.channel, data->info.mcs, data->info.rssi);
+	wil_dbg_wmi(wil, "len %d qid %d mid %d cid %d\n",
+		    d_len, data->info.qid, data->info.mid, data->info.cid);
+	wil_hex_dump_wmi("PROBE ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
+			 d_len, true);
+
+	if (!channel) {
+		wil_err(wil, "Frame on unsupported channel\n");
+		return;
+	}
+
+	bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
+					d_len, signal, GFP_KERNEL);
+	if (bss) {
+		wil_dbg_wmi(wil, "Added BSS %pM\n", rx_mgmt_frame->bssid);
+		cfg80211_put_bss(wiphy, bss);
+	} else {
+		wil_err(wil, "cfg80211_inform_bss_frame() failed\n");
+	}
+
+	cfg80211_sched_scan_results(wiphy, 0);
+}
+
 /**
  * Some events are ignored for purpose; and need not be interpreted as
  * "unhandled events"
@@ -1093,6 +1171,7 @@ static const struct {
 	{WMI_DELBA_EVENTID,		wmi_evt_delba},
 	{WMI_VRING_EN_EVENTID,		wmi_evt_vring_en},
 	{WMI_DATA_PORT_OPEN_EVENTID,		wmi_evt_ignore},
+	{WMI_SCHED_SCAN_RESULT_EVENTID,		wmi_evt_sched_scan_result},
 };
 
 /*
@@ -1703,7 +1782,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
 	int rc;
 
 	if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
-		struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+		struct ieee80211_channel *ch = wil->monitor_chandef.chan;
 
 		cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
 		if (ch)
@@ -2284,3 +2363,159 @@ out:
 	spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
 	return rc;
 }
+
+static void
+wmi_sched_scan_set_ssids(struct wil6210_priv *wil,
+			 struct wmi_start_sched_scan_cmd *cmd,
+			 struct cfg80211_ssid *ssids, int n_ssids,
+			 struct cfg80211_match_set *match_sets,
+			 int n_match_sets)
+{
+	int i;
+
+	if (n_match_sets > WMI_MAX_PNO_SSID_NUM) {
+		wil_dbg_wmi(wil, "too many match sets (%d), use first %d\n",
+			    n_match_sets, WMI_MAX_PNO_SSID_NUM);
+		n_match_sets = WMI_MAX_PNO_SSID_NUM;
+	}
+	cmd->num_of_ssids = n_match_sets;
+
+	for (i = 0; i < n_match_sets; i++) {
+		struct wmi_sched_scan_ssid_match *wmi_match =
+			&cmd->ssid_for_match[i];
+		struct cfg80211_match_set *cfg_match = &match_sets[i];
+		int j;
+
+		wmi_match->ssid_len = cfg_match->ssid.ssid_len;
+		memcpy(wmi_match->ssid, cfg_match->ssid.ssid,
+		       min_t(u8, wmi_match->ssid_len, WMI_MAX_SSID_LEN));
+		wmi_match->rssi_threshold = S8_MIN;
+		if (cfg_match->rssi_thold >= S8_MIN &&
+		    cfg_match->rssi_thold <= S8_MAX)
+			wmi_match->rssi_threshold = cfg_match->rssi_thold;
+
+		for (j = 0; j < n_ssids; j++)
+			if (wmi_match->ssid_len == ssids[j].ssid_len &&
+			    memcmp(wmi_match->ssid, ssids[j].ssid,
+				   wmi_match->ssid_len) == 0)
+				wmi_match->add_ssid_to_probe = true;
+	}
+}
+
+static void
+wmi_sched_scan_set_channels(struct wil6210_priv *wil,
+			    struct wmi_start_sched_scan_cmd *cmd,
+			    u32 n_channels,
+			    struct ieee80211_channel **channels)
+{
+	int i;
+
+	if (n_channels > WMI_MAX_CHANNEL_NUM) {
+		wil_dbg_wmi(wil, "too many channels (%d), use first %d\n",
+			    n_channels, WMI_MAX_CHANNEL_NUM);
+		n_channels = WMI_MAX_CHANNEL_NUM;
+	}
+	cmd->num_of_channels = n_channels;
+
+	for (i = 0; i < n_channels; i++) {
+		struct ieee80211_channel *cfg_chan = channels[i];
+
+		cmd->channel_list[i] = cfg_chan->hw_value - 1;
+	}
+}
+
+static void
+wmi_sched_scan_set_plans(struct wil6210_priv *wil,
+			 struct wmi_start_sched_scan_cmd *cmd,
+			 struct cfg80211_sched_scan_plan *scan_plans,
+			 int n_scan_plans)
+{
+	int i;
+
+	if (n_scan_plans > WMI_MAX_PLANS_NUM) {
+		wil_dbg_wmi(wil, "too many plans (%d), use first %d\n",
+			    n_scan_plans, WMI_MAX_PLANS_NUM);
+		n_scan_plans = WMI_MAX_PLANS_NUM;
+	}
+
+	for (i = 0; i < n_scan_plans; i++) {
+		struct cfg80211_sched_scan_plan *cfg_plan = &scan_plans[i];
+
+		cmd->scan_plans[i].interval_sec =
+			cpu_to_le16(cfg_plan->interval);
+		cmd->scan_plans[i].num_of_iterations =
+			cpu_to_le16(cfg_plan->iterations);
+	}
+}
+
+int wmi_start_sched_scan(struct wil6210_priv *wil,
+			 struct cfg80211_sched_scan_request *request)
+{
+	int rc;
+	struct wmi_start_sched_scan_cmd cmd = {
+		.min_rssi_threshold = S8_MIN,
+		.initial_delay_sec = cpu_to_le16(request->delay),
+	};
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_start_sched_scan_event evt;
+	} __packed reply;
+
+	if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
+		return -ENOTSUPP;
+
+	if (request->min_rssi_thold >= S8_MIN &&
+	    request->min_rssi_thold <= S8_MAX)
+		cmd.min_rssi_threshold = request->min_rssi_thold;
+
+	wmi_sched_scan_set_ssids(wil, &cmd, request->ssids, request->n_ssids,
+				 request->match_sets, request->n_match_sets);
+	wmi_sched_scan_set_channels(wil, &cmd,
+				    request->n_channels, request->channels);
+	wmi_sched_scan_set_plans(wil, &cmd,
+				 request->scan_plans, request->n_scan_plans);
+
+	reply.evt.result = WMI_PNO_REJECT;
+
+	rc = wmi_call(wil, WMI_START_SCHED_SCAN_CMDID, &cmd, sizeof(cmd),
+		      WMI_START_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
+		      WIL_WMI_CALL_GENERAL_TO_MS);
+	if (rc)
+		return rc;
+
+	if (reply.evt.result != WMI_PNO_SUCCESS) {
+		wil_err(wil, "start sched scan failed, result %d\n",
+			reply.evt.result);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int wmi_stop_sched_scan(struct wil6210_priv *wil)
+{
+	int rc;
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_stop_sched_scan_event evt;
+	} __packed reply;
+
+	if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
+		return -ENOTSUPP;
+
+	reply.evt.result = WMI_PNO_REJECT;
+
+	rc = wmi_call(wil, WMI_STOP_SCHED_SCAN_CMDID, NULL, 0,
+		      WMI_STOP_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
+		      WIL_WMI_CALL_GENERAL_TO_MS);
+	if (rc)
+		return rc;
+
+	if (reply.evt.result != WMI_PNO_SUCCESS) {
+		wil_err(wil, "stop sched scan failed, result %d\n",
+			reply.evt.result);
+		return -EINVAL;
+	}
+
+	return 0;
+}

+ 82 - 18
drivers/net/wireless/ath/wil6210/wmi.h

@@ -71,6 +71,8 @@ enum wmi_fw_capability {
 	WMI_FW_CAPABILITY_RSSI_REPORTING		= 12,
 	WMI_FW_CAPABILITY_SET_SILENT_RSSI_TABLE		= 13,
 	WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP	= 14,
+	WMI_FW_CAPABILITY_PNO				= 15,
+	WMI_FW_CAPABILITY_REF_CLOCK_CONTROL		= 18,
 	WMI_FW_CAPABILITY_MAX,
 };
 
@@ -87,6 +89,8 @@ enum wmi_command_id {
 	WMI_CONNECT_CMDID				= 0x01,
 	WMI_DISCONNECT_CMDID				= 0x03,
 	WMI_DISCONNECT_STA_CMDID			= 0x04,
+	WMI_START_SCHED_SCAN_CMDID			= 0x05,
+	WMI_STOP_SCHED_SCAN_CMDID			= 0x06,
 	WMI_START_SCAN_CMDID				= 0x07,
 	WMI_SET_BSS_FILTER_CMDID			= 0x09,
 	WMI_SET_PROBED_SSID_CMDID			= 0x0A,
@@ -385,6 +389,38 @@ struct wmi_start_scan_cmd {
 	} channel_list[0];
 } __packed;
 
+#define WMI_MAX_PNO_SSID_NUM	(16)
+#define WMI_MAX_CHANNEL_NUM	(6)
+#define WMI_MAX_PLANS_NUM	(2)
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_sched_scan_ssid_match {
+	u8 ssid_len;
+	u8 ssid[WMI_MAX_SSID_LEN];
+	s8 rssi_threshold;
+	/* boolean */
+	u8 add_ssid_to_probe;
+	u8 reserved;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_sched_scan_plan {
+	__le16 interval_sec;
+	__le16 num_of_iterations;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_start_sched_scan_cmd {
+	struct wmi_sched_scan_ssid_match ssid_for_match[WMI_MAX_PNO_SSID_NUM];
+	u8 num_of_ssids;
+	s8 min_rssi_threshold;
+	u8 channel_list[WMI_MAX_CHANNEL_NUM];
+	u8 num_of_channels;
+	u8 reserved;
+	__le16 initial_delay_sec;
+	struct wmi_sched_scan_plan scan_plans[WMI_MAX_PLANS_NUM];
+} __packed;
+
 /* WMI_SET_PROBED_SSID_CMDID */
 #define MAX_PROBED_SSID_INDEX	(3)
 
@@ -1238,6 +1274,9 @@ enum wmi_event_id {
 	WMI_READY_EVENTID				= 0x1001,
 	WMI_CONNECT_EVENTID				= 0x1002,
 	WMI_DISCONNECT_EVENTID				= 0x1003,
+	WMI_START_SCHED_SCAN_EVENTID			= 0x1005,
+	WMI_STOP_SCHED_SCAN_EVENTID			= 0x1006,
+	WMI_SCHED_SCAN_RESULT_EVENTID			= 0x1007,
 	WMI_SCAN_COMPLETE_EVENTID			= 0x100A,
 	WMI_REPORT_STATISTICS_EVENTID			= 0x100B,
 	WMI_RD_MEM_RSP_EVENTID				= 0x1800,
@@ -1600,6 +1639,49 @@ struct wmi_scan_complete_event {
 	__le32 status;
 } __packed;
 
+/* wmi_rx_mgmt_info */
+struct wmi_rx_mgmt_info {
+	u8 mcs;
+	s8 rssi;
+	u8 range;
+	u8 sqi;
+	__le16 stype;
+	__le16 status;
+	__le32 len;
+	/* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
+	u8 qid;
+	/* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
+	u8 mid;
+	u8 cid;
+	/* From Radio MNGR */
+	u8 channel;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_EVENTID */
+enum wmi_pno_result {
+	WMI_PNO_SUCCESS			= 0x00,
+	WMI_PNO_REJECT			= 0x01,
+	WMI_PNO_INVALID_PARAMETERS	= 0x02,
+	WMI_PNO_NOT_ENABLED		= 0x03,
+};
+
+struct wmi_start_sched_scan_event {
+	/* pno_result */
+	u8 result;
+	u8 reserved[3];
+} __packed;
+
+struct wmi_stop_sched_scan_event {
+	/* pno_result */
+	u8 result;
+	u8 reserved[3];
+} __packed;
+
+struct wmi_sched_scan_result_event {
+	struct wmi_rx_mgmt_info info;
+	u8 payload[0];
+} __packed;
+
 /* WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT */
 enum wmi_acs_info_bitmask {
 	WMI_ACS_INFO_BITMASK_BEACON_FOUND	= 0x01,
@@ -1814,24 +1896,6 @@ struct wmi_get_ssid_event {
 	u8 ssid[WMI_MAX_SSID_LEN];
 } __packed;
 
-/* wmi_rx_mgmt_info */
-struct wmi_rx_mgmt_info {
-	u8 mcs;
-	s8 rssi;
-	u8 range;
-	u8 sqi;
-	__le16 stype;
-	__le16 status;
-	__le32 len;
-	/* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
-	u8 qid;
-	/* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
-	u8 mid;
-	u8 cid;
-	/* From Radio MNGR */
-	u8 channel;
-} __packed;
-
 /* EVENT: WMI_RF_XPM_READ_RESULT_EVENTID */
 struct wmi_rf_xpm_read_result_event {
 	/* enum wmi_fw_status_e - success=0 or fail=1 */